def get_summary_data_freshair_remote(token, fullURL='http://localhost:32400'): libraries_dict = plexcore.get_libraries(token, fullurl=fullURL) keynum = max([ key for key in libraries_dict if libraries_dict[key] == 'npr fresh air' ]) sincedate = plexcore.get_current_date_newsletter() key, num_songs, _, _, totdur, totsizebytes = plexcore._get_library_stats_artist( keynum, token, fullurl=fullURL) mainstring = 'there are %d episodes of npr fresh air.' % num_songs sizestring = 'the total size of fresh air media is %s.' % get_formatted_size( totsizebytes) durstring = 'the total duration of fresh air media is %s.' % get_formatted_duration( totdur) if sincedate is not none: key, num_songs_since, _, _, \ totdur_since, totsizebytes_since = plexcore._get_library_stats_artist( keynum, token, fullurl = fullURL, sincedate = sincedate ) if num_songs_since > 0: mainstring_since = ' '.join([ 'since %s, i have added %d new fresh air episodes.' % (sinceDate.strftime('%B %d, %Y'), num_songs_since), 'The total size of Fresh Air media I have added is %s.' % get_formatted_size(totsizebytes_since), 'The total duration of Fresh Air media I have added is %s.' % get_formatted_duration(totdur_since) ]) return ' '.join( [mainstring, sizestring, durstring, mainstring_since]) return ' '.join([mainstring, sizestring, durstring])
def data(self, index, role): if not index.isValid(): return None row = index.row() col = index.column() episode = self.actualTVSeasonData[row].copy() # ## color background role if role == Qt.BackgroundRole: if not episode['have_episode']: return QBrush(QColor("#373949")) elif role == Qt.DisplayRole: if col == 0: return episode['episode'] elif col == 1: return episode['title'] elif col == 2: return episode['date aired'].strftime('%d/%m/%Y') elif col == 3: if 'duration' in episode: dur = episode['duration'] hrdur, rem = divmod(dur, 3600) dur_string = datetime.datetime.fromtimestamp(dur).strftime( '%M:%S.%f')[:-3] if hrdur != 0: dur_string = '%d:%s' % (hrdur, dur_string) else: dur_string = 'NOT IN LIB' return dur_string elif col == 4: if 'size' in episode: return get_formatted_size(episode['size']) else: return 'NOT IN LIB' return None
def _print_summary(library_key, library_dict, token, fullURL): data = plexcore.get_library_stats(library_key, token, fullURL=fullURL) mediatype = data['mediatype'] title = data['title'] columns = min(100, get_terminal_size().columns) if mediatype == 'movie': mystr = ' '.join([ '"%s" is a movie library.' % title, 'There are %d movies here.' % data['num_movies'], 'The total size of movie media is %s.' % get_formatted_size(data['totsize']), 'The total duration of movie media is %s.' % get_formatted_duration(data['totdur']) ]) elif mediatype == 'show': mystr = ' '.join([ '"%s" is a TV library.' % title, 'There are %d TV files in %d TV shows.' % (data['num_tveps'], data['num_tvshows']), 'The total size of TV media is %s.' % get_formatted_size(data['totsize']), 'The total duration of TV shows is %s.' % get_formatted_duration(data['totdur']) ]) elif mediatype == 'artist': num_songs = data['num_songs'] num_artists = data['num_artists'] num_albums = data['num_albums'] totsize = data['totsize'] totdur = data['totdur'] mystr = ' '.join([ '"%s" is a music library.' % title, 'There are %d songs made by %d artists in %d albums.' % (num_songs, num_artists, num_albums), 'The total size of music media is %s.' % get_formatted_size(totsize), 'The total duration of music media is %s.' % get_formatted_duration(totdur) ]) print('\n%s\n' % '\n'.join(textwrap.fill(mystr, width=columns).split('\n')))
def processEpisode(self, episode): seriesName = episode['seriesName'] season = episode['season'] epno = episode['episode'] dateAired = episode['date aired'] overview = episode['overview'] title = episode['title'].strip() html = BeautifulSoup( """ <html> <p>%s, season %02d, episode %02d, <b>%s</b>.</p> <p>aired on %s.</p> <p>%s.</p> </html>""" % (seriesName, season, epno, title, dateAired.strftime('%d/%m/%Y'), overview), 'lxml') body_elem = html.find_all('body')[0] if 'duration' in episode: dur_tag = html.new_tag("p") dur_tag.string = "duration: %s." % (get_formatted_duration( episode['duration'])) body_elem.append(dur_tag) if 'size' in episode: siz_tag = html.new_tag("p") siz_tag.string = "size: %s." % (get_formatted_size( episode['size'])) body_elem.append(siz_tag) if len(set(['picurl', 'plex_token']) - set(episode)) == 0: # not add in the picture img_content = plexcore.get_pic_data(episode['picurl'], token=episode['plex_token']) img = PIL.Image.open(io.BytesIO(img_content)) mimetype = PIL.Image.MIME[img.format] par_img_tag = html.new_tag('p') img_tag = html.new_tag('img') img_tag['width'] = 7.0 / 9 * self.episodeSummaryArea.width() img_tag['src'] = "data:%s;base64,%s" % ( mimetype, base64.b64encode(img_content).decode('utf-8')) par_img_tag.append(img_tag) body_elem.append(par_img_tag) self.episodeSummaryArea.setHtml(html.prettify())
def processSeasonSummary(cls, season, episodes): if len(episodes) == 0: return "" eps_exist = list( filter(lambda epno: episodes[epno]['have_episode'] == True, episodes)) minDate = min( map(lambda episode: episode['date aired'], episodes.values())) maxDate = max( map(lambda episode: episode['date aired'], episodes.values())) html = BeautifulSoup( """ <html> <p>%02d episodes in season %02d.</p> <p>%02d episodes in season %02d on Plex Server.</p> <p>first episode aired on %s.</p> <p>last episode aired on %s.</p> </html>""" % (len(episodes), season, len(eps_exist), season, minDate.strftime('%B %d, %Y'), maxDate.strftime('%B %d, %Y')), "lxml") body_elem = html.find_all('body')[0] if len(eps_exist) != 0: average_duration_season_in_secs = numpy.array( list(map(lambda epno: episodes[epno]['duration'], eps_exist))).mean() average_size_duration_in_bytes = numpy.array( list(map(lambda epno: episodes[epno]['size'], eps_exist))).mean() # dur_tag = html.new_tag("p") dur_tag.string = "average duration of season %02d episodes: %s." % ( season, get_formatted_duration(average_duration_season_in_secs)) siz_tag = html.new_tag("p") siz_tag.string = "average size of season %02d episodes: %s." % ( season, get_formatted_size(average_size_duration_in_bytes)) body_elem.append(dur_tag) body_elem.append(siz_tag) return html.prettify()
def get_summary_data_thisamericanlife_remote(token, fullURL='http://localhost:32400'): libraries_dict = plexcore.get_libraries(token, fullURL=fullURL) keynum = max([ key for key in libraries_dict if libraries_dict[key] == 'This American Life' ]) sinceDate = plexcore.get_current_date_newsletter() key, song_data = plexcore._get_library_data_artist(keynum, token, fullURL=fullURL) num_episodes = 0 totdur = 0.0 totsizebytes = 0.0 for key in song_data: for key2 in song_data[key]: num_episodes += len(song_data[key][key2]) for track in song_data[key][key2]: name, dt, dur, sizebytes = track totdur += dur totsizebytes += sizebytes mainstring = 'There are %d episodes in %d series in This American Life.' % ( num_episodes, len(song_data)) sizestring = 'The total size of This American Life media is %s.' % \ get_formatted_size( totsizebytes ) durstring = 'The total duration of This American Life media is %s.' % \ get_formatted_duration( totdur ) if sinceDate is None: pristrings = [ ' '.join([mainstring, sizestring, durstring]), ] else: key, song_data_since = plexcore._get_library_data_artist( keynum, token, fullURL=fullURL, sinceDate=sinceDate) num_episodes_since = 0 totdur_since = 0.0 totsizebytes_since = 0.0 for key in song_data_since: for key2 in song_data_since[key]: num_episodes_since += len(song_data_since[key][key2]) for track in song_data_since[key][key2]: name, dt, dur, sizebytes = track totdur_since += dur totsizebytes_since += sizebytes if num_episodes_since > 0: mainstring_since = ' '.join([ 'Since %s, I have added %d new This American Life episodes.' % (sinceDate.strftime('%B %d, %Y'), num_episodes_since), 'The total size of This American Life media I added is %s.' % get_formatted_size(totsizebytes_since), 'The total duration of This American Life media I added is %s.' % get_formatted_duration(totdur_since) ]) pristrings = [ ' '.join([mainstring, sizestring, durstring, mainstring_since]), ] else: pristrings = [ ' '.join([mainstring, sizestring, durstring]), ] # catpristrings = {} for album in song_data: if album == 'Ira Glass': actalbum = 'This American Life' else: actalbum = album totdur = 0.0 totsizebytes = 0.0 num_episodes = 0 for key2 in song_data[album]: num_episodes += len(song_data[album][key2]) for track in song_data[album][key2]: name, dt, dur, sizebytes = track totdur += dur totsizebytes += sizebytes mainstring = 'There are %d episodes in this category.' % num_episodes sizestring = 'The total size of media here is %s.' % get_formatted_size( totsizebytes) durstring = 'The total duration of media here is %s.' % get_formatted_duration( totdur) if sinceDate is None: mystring = ' '.join([mainstring, sizestring, durstring]) else: if album not in song_data_since: mystring = ' '.join([mainstring, sizestring, durstring]) else: totdur_since = 0.0 totsizebytes_since = 0.0 num_episodes_since = 0 for key2 in song_data_since[album]: num_episodes_since += len(song_data_since[album][key2]) for track in song_data_since[album][key2]: name, dt, dur, sizebytes = track totdur_since += dur totsizebytes_since += sizebytes if num_episodes_since > 0: mainstring_since = ' '.join([ 'Since %s, I have added %d new episodes in this category.' % (sinceDate.strftime('%B %d, %Y'), num_episodes_since), 'The total size of media I added here is %s.' % get_formatted_size(totsizebytes_since), 'The total duration of media I added here is %s.' % get_formatted_duration(totdur_since) ]) mystring = ' '.join( [mainstring, sizestring, durstring, mainstring_since]) else: mystring = ' '.join([mainstring, sizestring, durstring]) catpristrings[actalbum] = mystring pristrings.append(catpristrings) return pristrings
def get_summary_data_movies_remote(token, fullURL='http://localhost:32400'): libraries_dict = plexcore.get_libraries(token, fullURL=fullURL) if libraries_dict is None: return None keynums = set( filter(lambda keynum: libraries_dict[keynum][1] == 'movie', libraries_dict)) if len(keynums) == 0: return None # sinceDate = plexcore.get_current_date_newsletter() datas = list( map( lambda keynum: plexcore.get_library_stats( keynum, token, fullURL=fullURL), keynums)) num_movies_since = -1 sorted_by_genres = {} sorted_by_genres_since = {} for data in datas: data_sorted_by_genre = data['genres'] for genre in data_sorted_by_genre: if genre not in sorted_by_genres: sorted_by_genres[genre] = data_sorted_by_genre[genre].copy() continue sorted_by_genres[genre]['totum'] += data_sorted_by_genre[genre][ 'totnum'] sorted_by_genres[genre]['totdur'] += data_sorted_by_genre[genre][ 'totdur'] sorted_by_genres[genre]['totsize'] += data_sorted_by_genre[genre][ 'totsize'] if sinceDate is not None: datas_since = list( filter( lambda data_since: data_since['num_movies'] > 0, map( lambda keynum: plexcore.get_library_stats( keynum, token, fullURL=fullURL, sinceDate=sinceDate), keynums))) if len(datas_since) != 0: num_movies_since = sum( list( map(lambda data_since: data_since['num_movies'], datas_since))) categories_since = set( chain.from_iterable( map(lambda data_since: data_since['genres'].keys()))) totsize_since = sum( list(map(lambda data_since: data_since['totsize'], datas_since))) totdur_since = sum( list(map(lambda data_since: data_since['totdur'], datas_since))) mainstring_since = ' '.join([ 'Since %s, I have added %d movies in %d categories.' % (sinceDate.strftime('%B %d, %Y'), num_movies_since, len(categories_since)), # 'The total size of movie media I added is %s.' % get_formatted_size(totsize_since), # 'The total duration of movie media I added is %s.' % get_formatted_duration(totdur_since) ]) for data_since in datas_since: data_since_sorted_by_genre = data_since['genres'] for genre in data_since_sorted_by_genre: if genre not in sorted_by_genres_since: sorted_by_genres_since[ genre] = data_since_sorted_by_genre[genre].copy() continue sorted_by_genres_since[genre][ 'totum'] += data_since_sorted_by_genre[genre]['totnum'] sorted_by_genres_since[genre][ 'totdur'] += data_since_sorted_by_genre[genre][ 'totdur'] sorted_by_genres_since[genre][ 'totsize'] += data_since_sorted_by_genre[genre][ 'totsize'] categories = set(sorted_by_genres.keys()) num_movies = sum(list(map(lambda data: data['num_movies'], datas))) totdur = sum(list(map(lambda data: data['totdur'], datas))) totsize = sum(list(map(lambda data: data['totsize'], datas))) mainstring = 'There are %d movies in %d categories.' % (num_movies, len(categories)) sizestring = 'The total size of movie media is %s.' % get_formatted_size( totsize) durstring = 'The total duration of movie media is %s.' % get_formatted_duration( totdur) # ## get last 7 movies that I have added lastN_movies = plexcore.get_lastN_movies(7, token, fullURL=fullURL) lastNstrings = [ '', '', 'Here are the last %d movies I have added.' % len(lastN_movies), '\\begin{itemize}' ] for title, year, date, url in lastN_movies: if url is None: lastNstrings.append('\item %s (%d), added on %s.' % (title, year, date.strftime('%d %B %Y'))) else: lastNstrings.append('\item \href{%s}{%s (%d)}, added on %s.' % (url, title, year, date.strftime('%d %B %Y'))) lastNstrings.append('\end{itemize}') lastNstring = '\n'.join(lastNstrings) finalstring = 'Here is a summary by category.' if sinceDate is not None and num_movies_since > 0: movstring = ' '.join([ mainstring, sizestring, durstring, mainstring_since, lastNstring, finalstring ]) else: movstring = ' '.join( [mainstring, sizestring, durstring, lastNstring, finalstring]) movstrings = [ movstring, ] catmovstrings = {} for cat in sorted(categories): num_movies = sorted_by_genres[cat]['totnum'] totdur = sorted_by_genres[cat]['totdur'] totsize = sorted_by_genres[cat]['totsize'] mainstring = 'There are %d movies in this category.' % num_movies sizestring = 'The total size of movie media here is %s.' % get_formatted_size( totsize) durstring = 'The total duration of movie media here is %s.' % get_formatted_duration( totdur) if sinceDate is not None and cat in sorted_by_genres_since and num_movies > 0: num_movies_since = sorted_by_genres_since[cat]['totnum'] totdur_since = sorted_by_genres_since[cat]['totdur'] totsize_since = sorted_by_genres_since[cat]['totsize'] mainstring_since = ' '.join([ 'Since %s, I have added %d movies in this category.' % (sinceDate.strftime('%B %d, %Y'), num_movies_since), 'The total size of movie media I added here is %s.' % get_formatted_size(totsize_since), 'The total duration of movie media I added here is %s.' % get_formatted_duration(totdur_since) ]) movstring = ' '.join( [mainstring, sizestring, durstring, mainstring_since]) else: movstring = ' '.join([mainstring, sizestring, durstring]) catmovstrings[cat] = movstring movstrings.append(catmovstrings) return movstrings
def get_summary_data_television_remote(token, fullURL='http://localhost:32400'): libraries_dict = plexcore.get_libraries(token, fullURL=fullURL, do_full=True) if libraries_dict is None: return None keynums = set( filter(lambda keynum: libraries_dict[keynum][1] == 'show', libraries_dict)) if len(keynums) == 0: return None # sinceDate = plexcore.get_current_date_newsletter() datas = list( map( lambda keynum: plexcore.get_library_stats( keynum, token, fullURL=fullURL), keynums)) sizestring = 'The total size of TV media is %s.' % (get_formatted_size( sum(list(map(lambda data: data['totsize'], datas))))) durstring = 'The total duration of TV media is %s.' % ( get_formatted_duration( sum(list(map(lambda data: data['totdur'], datas))))) mainstring = 'There are %d TV episodes in %d TV shows.' % ( sum(list(map(lambda data: data['num_tveps'], datas))), sum(list(map(lambda data: data['num_tvshows'], datas)))) if sinceDate is not None: datas_since = list( filter( lambda data_since: data_since['num_tveps'] > 0, map( lambda keynum: plexcore.get_library_stats( keynum, token, fullURL=fullURL, sinceDate=sinceDate), keynums))) if len(datas_since) > 0: mainstring_since = ' '.join([ 'Since %s, I have added %d TV files in %d TV shows.' % (sinceDate.strftime('%B %d, %Y'), sum( list( map(lambda data_since: data_since['num_tveps'], datas_since))), sum( list( map(lambda data_since: data_since['num_tvshows'], datas_since)))), 'The total size of TV media I added is %s.' % get_formatted_size( sum( list( map(lambda data_since: data_since['totsize'], datas_since)))), 'The total duration of TV media I added is %s.' % get_formatted_duration( sum( list( map(lambda data_since: data_since['totdur'], datas_since)))) ]) tvstring = ' '.join( [mainstring, sizestring, durstring, mainstring_since]) return tvstring tvstring = ' '.join([mainstring, sizestring, durstring]) return tvstring
def get_summary_data_music_remote(token, fullURL='http://localhost:32400'): libraries_dict = plexcore.get_libraries(token, fullURL=fullURL, do_full=True) if libraries_dict is None: return None keynums = set( filter(lambda keynum: libraries_dict[keynum][1] == 'artist', libraries_dict)) if len(keynums) == 0: return None sinceDate = plexcore.get_current_date_newsletter() datas = list( map( lambda keynum: plexcore.get_library_stats( keynum, token, fullURL=fullURL), keynums)) mainstring = 'There are %d songs made by %d artists in %d albums.' % ( sum(list(map(lambda data: data['num_songs'], datas))), sum(list(map(lambda data: data['num_artists'], datas))), sum(list(map(lambda data: data['num_albums'], datas)))) sizestring = 'The total size of music media is %s.' % get_formatted_size( sum(list(map(lambda data: data['totsize'], datas)))) durstring = 'The total duration of music media is %s.' % get_formatted_duration( sum(list(map(lambda data: data['totdur'], datas)))) if sinceDate is not None: datas_since = list( filter( lambda data_since: data_since['num_songs'] > 0, map( lambda keynum: plexcore.get_library_stats( keynum, token, fullURL=fullURL, sinceDate=sinceDate), keynums))) if len(datas_since) != 0: num_songs_since = sum( list( map(lambda data_since: data_since['num_songs'], datas_since))) num_artists_since = sum( list( map(lambda data_since: data_since['num_artists'], datas_since))) num_albums_since = sum( list( map(lambda data_since: data_since['num_albums'], datas_since))) totsize_since = sum( list(map(lambda data_since: data_since['totsize'], datas_since))) totdur_since = sum( list(map(lambda data_since: data_since['totdur'], datas_since))) mainstring_since = ' '.join([ 'Since %s, I have added %d songs made by %d artists in %d albums.' % (sinceDate.strftime('%B %d, %Y'), num_songs_since, num_artists_since, num_albums_since), # 'The total size of music media I added is %s.' % get_formatted_size(totsize_since), # 'The total duration of music media I added is %s.' % get_formatted_duration(totdur_since) ]) musicstring = ' '.join( [mainstring, sizestring, durstring, mainstring_since]) return musicstring musicstring = ' '.join([mainstring, sizestring, durstring]) return musicstring
def get_title(elem): if 'size' in elem: return '%s (%s)' % (elem['title'], get_formatted_size( elem['size'])) return '%s ()' % elem['title']
def get_movie_torrent_zooqle(name, maxnum=10, verify=True): """ Returns a :py:class:`tuple` of candidate movie Magnet links found using the Zooqle_ torrent service and the string ``"SUCCESS"``, if successful. :param str name: the movie string on which to search. :param int maxnum: optional argument, the maximum number of magnet links to return. Default is 100. Must be :math:`\ge 5`. :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``. :returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched movie, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are, * ``title`` is the name of the candidate movie to download, and in parentheses is the size of the download in MB or GB. * ``rawtitle`` is *only* the name of the candidate movie to download. * ``seeders`` is the number of seeds for this Magnet link. * ``leechers`` is the number of leeches for this Magnet link. * ``link`` is the Magnet URI link. * ``torrent_size`` is the size of this torrent in MB. If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <plexcore.return_error_raw>`. :rtype: tuple .. _Zooqle: https://zooqle.com """ assert (maxnum >= 5) names_of_trackers = map( lambda tracker: tracker.replace(':', '%3A').replace('/', '%2F'), [ 'udp://tracker.opentrackr.org:1337/announce', 'udp://open.demonii.com:1337', 'udp://tracker.pomf.se:80/announce', 'udp://torrent.gresille.org:80/announce', 'udp://11.rarbg.com/announce', 'udp://11.rarbg.com:80/announce', 'udp://open.demonii.com:1337/announce', 'udp://tracker.openbittorrent.com:80', 'http://tracker.ex.ua:80/announce', 'http://tracker.ex.ua/announce', 'http://bt.careland.com.cn:6969/announce', 'udp://glotorrents.pw:6969/announce' ]) tracklist = ''.join( map(lambda tracker: '&tr=%s' % tracker, names_of_trackers)) def _get_magnet_link(info_hash, title): download_url = "magnet:?xt=urn:btih:" + info_hash + "&dn=" + '+'.join( title.split()) + tracklist return download_url candname = re.sub("'", '', name) url = 'https://zooqle.com/search' params = { 'q': '+'.join(candname.split() + [ 'category%3AMovie', ]), 'fmt': 'rss' } paramurl = '?' + '&'.join( map(lambda tok: '%s=%s' % (tok, params[tok]), params)) fullurl = urljoin(url, paramurl) response = requests.get(fullurl, verify=verify) if response.status_code != 200: return None, 'ERROR, COULD NOT FIND ZOOQLE TORRENTS FOR %s' % candname myxml = BeautifulSoup(response.content, 'lxml') def is_valid_elem(elem): names = set(map(lambda elm: elm.name, elem.find_all())) return len(names & set([ 'torrent:infohash', 'torrent:seeds', 'torrent:peers', 'torrent:contentlength' ])) == 4 cand_items = list( filter( lambda elem: len(elem.find_all('title')) >= 1 and is_valid_elem( elem) and get_maximum_matchval( max(elem.find_all('title')).get_text(), candname) >= 80, myxml.find_all('item'))) def get_num_forelem(elem, name): valid_elm = list( filter(lambda elm: elm.name == 'torrent:%s' % name, elem)) if len(valid_elm) == 0: return None valid_elm = valid_elm[0] return int(valid_elm.get_text()) def get_infohash(elem): valid_elm = list( filter(lambda elm: elm.name == 'torrent:infohash', elem)) if len(valid_elm) == 0: return None valid_elm = valid_elm[0] return valid_elm.get_text().lower() items_toshow = list( map( lambda elem: { 'title': '%s (%s)' % (max(elem.find_all('title')).get_text(), get_formatted_size(get_num_forelem(elem, 'contentlength'))), 'raw_title': max(elem.find_all('title')).get_text(), 'seeders': get_num_forelem(elem, 'seeds'), 'leechers': get_num_forelem(elem, 'peers'), 'link': _get_magnet_link(get_infohash(elem), max(elem.find_all('title')).get_text()), 'torrent_size': get_num_forelem(elem, 'contentlength') }, cand_items)) if len(items_toshow) == 0: return None, 'ERROR, COULD NOT FIND ZOOQLE TORRENTS FOR %s' % candname return sorted(items_toshow, key=lambda item: -item['seeders'] - item['leechers'] )[:maxnum], 'SUCCESS'
def get_movie_torrent_eztv_io(name, maxnum=10, verify=True, tmdb_id=None): """ Returns a :py:class:`tuple` of candidate movie Magnet links found using the `EZTV.IO`_ torrent service and the string ``"SUCCESS"``, if successful. :param str name: the movie on which to search. :param int maxnum: optional argument, the maximum number of magnet links to return. Default is 10. Must be :math:`\ge 5`. :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``. :param str tmdb_id: optional argument. The TMDB_ ID of the movie. :returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched movie, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are, * ``title`` is the name of the candidate movie to download, and in parentheses is the size of the download in MB or GB. * ``rawtitle`` is *only* the name of the candidate movie to download. * ``seeders`` is the number of seeds for this Magnet link. * ``leechers`` is the number of leeches for this Magnet link. * ``link`` is the Magnet URI link. * ``torrent_size`` is the size of this torrent in MB. If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <plexcore.return_error_raw>`. :rtype: tuple .. warning:: cannot get it to work as of |date|. Cannot get it to work when giving it valid movie searches, such as ``"Star Trek Beyond"``. See :numref:`table_working_movietorrents`. .. _`EZTV.IO`: https://eztv.io .. _`Star Trek Beyond`: https://en.wikipedia.org/wiki/Star_Trek_Beyond """ assert (maxnum >= 5) if tmdb_id is None: tmdb_id = plextmdb.get_movie_tmdbids(name, verify=verify) if tmdb_id is None: return return_error_raw('FAILURE, COULD NOT FIND IMDB ID FOR %s.' % name) # ## check that the name matches movie_name = plextmdb.get_movie_info( tmdb_id, verify=verify)['title'].lower().strip() if movie_name != name.lower().strip(): return return_error_raw('FAILURE, COULD NOT FIND IMDB ID FOR %s.' % name) imdb_id = plextmdb.get_imdbid_from_id(tmdb_id, verify=verify) if imdb_id is None: return return_error_raw('FAILURE, COULD NOT FIND IMDB ID FOR %s.' % name) response = requests.get('https://eztv.io/api/get-torrents', params={ 'imdb_id': int(imdb_id.replace('t', '')), 'limit': 100, 'page': 0 }, verify=verify) if response.status_code != 200: return return_error_raw( 'ERROR, COULD NOT FIND ANY TORRENTS FOR %s IN EZTV.IO' % name) alldat = response.json() if alldat['torrents_count'] == 0: return return_error_raw( 'ERROR, COULD NOT FIND ANY TORRENTS FOR %s IN EZTV.IO' % name) all_torrents = alldat['torrents'] for pageno in range(1, 101): if alldat['torrents_count'] < 100: break response = requests.get('https://eztv.io/api/get-torrents', params={ 'imdb_id': int(imdb_id.replace('t', '')), 'limit': 100, 'page': pageno }, verify=verify) if response.status_code != 200: break alldat = response.json() if alldat['torrents_count'] == 0: break all_torrents += alldat['torrents'] all_torrents_mine = all_torrents[:maxnum] if len(all_torrents_mine) == 0: return return_error_raw('ERROR, COULD NOT FIND %s IN EZTV.IO' % name) return list( map( lambda tor: { 'raw_title': tor['title'], 'title': '%s (%s)' % (tor['title'], get_formatted_size(tor['size_bytes'])), 'seeders': int(tor['seeds']), 'leechers': int(tor['peers']), 'link': tor['magnet_url'], 'torrent_size': float(tor['size_bytes']) / 1024**2 }, all_torrents_mine)), 'SUCCESS'
def get_movie_torrent_jackett(name, maxnum=10, verify=True, doRaw=False, tmdb_id=None): """ Returns a :py:class:`tuple` of candidate movie Magnet links found using the main Jackett_ torrent searching service and the string ``"SUCCESS"``, if successful. :param str name: the movie string on which to search. :param int maxnum: optional argumeent, the maximum number of magnet links to return. Default is 10. Must be :math:`\ge 5`. :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``. :param bool doRaw: optional argument. If ``True``, uses the IMDb_ information to search for the movie. Otherwise, uses the full string in ``name`` to search for the movie. :param int tmdb_id: optional argument. If defined, use this TMDB_ movie ID to search for magnet links. :returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched movie, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are, * ``title`` is the name of the candidate movie to download, and in parentheses the size of the candidate in MB or GB. * ``rawtitle`` is *only* the name of the candidate movie to download. * ``seeders`` is the number of seeds for this Magnet link. * ``leechers`` is the number of leeches for this Magnet link. * ``link`` is the Magnet URI link. * ``torrent_size`` is the size of this torrent in GB. If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <plexcore.return_error_raw>`. :rtype: tuple .. _Jackett: https://github.com/Jackett/Jackett """ time0 = time.time() data = get_jackett_credentials() if data is None: return return_error_raw( 'failure, could not get jackett server credentials') url, apikey = data endpoint = 'api/v2.0/indexers/all/results/torznab/api' popName = False if tmdb_id is not None: popName = True def _return_params(name, popName, tmdb_id): params = {'apikey': apikey, 'cat': 2000} if tmdb_id is not None: imdb_id = plextmdb.get_imdbid_from_id(tmdb_id, verify=verify) params['imdbid'] = imdb_id return params elif doRaw: params['q'] = name return params tmdb_id = plextmdb.get_movie_tmdbids(name, verify=verify) #if tmdb_id is None or doRaw and not popName: # params['q'] = name # return params # ## check that the name matches movie_name = plextmdb.get_movie_info( tmdb_id, verify=verify)['title'].lower().strip() if movie_name != name.lower().strip(): params['q'] = name return params imdb_id = plextmdb.get_imdbid_from_id(tmdb_id, verify=verify) if imdb_id is None: params['q'] = name return params params['imdbid'] = imdb_id return params params = _return_params(name, popName, tmdb_id) if popName and 'q' in params: params.pop('q') logging.debug('params: %s, mainURL = %s' % (params, urljoin(url, endpoint))) response = requests.get(urljoin(url, endpoint), verify=verify, params=params) if response.status_code != 200: return return_error_raw(' '.join([ 'failure, problem with jackett server accessible at %s.' % url, 'Error code = %d. Error data = %s.' % (response.status_code, response.content) ])) logging.debug('processed jackett torrents for %s in %0.3f seconds.' % (name, time.time() - time0)) html = BeautifulSoup(response.content, 'lxml') if len(html.find_all('item')) == 0: return return_error_raw( 'failure, could not find movie %s with jackett.' % name) items = [] def _get_magnet_url(item): magnet_url = item.find('torznab:attr', {'name': 'magneturl'}) if magnet_url is not None and 'magnet' in magnet_url['value']: return magnet_url['value'] # ## not found it here, must go into URL url2 = item.find('guid') if url2 is None: return None url2 = url2.text if not validators.url(url2): return None resp2 = requests.get(url2, verify=verify) if resp2.status_code != 200: return None h2 = BeautifulSoup(resp2.content, 'lxml') valid_magnet_links = set( map( lambda elem: elem['href'], filter( lambda elem: 'href' in elem.attrs and 'magnet' in elem[ 'href'], h2.find_all('a')))) if len(valid_magnet_links) == 0: return None return max(valid_magnet_links) for item in html.find_all('item'): title = item.find('title') if title is None: continue title = title.text torrent_size = item.find('size') if torrent_size is not None: torrent_size = float(torrent_size.text) / 1024**2 seeders = item.find('torznab:attr', {'name': 'seeders'}) if seeders is None: seeders = -1 else: seeders = int(seeders['value']) leechers = item.find('torznab:attr', {'name': 'peers'}) if leechers is None: leechers = -1 else: leechers = int(leechers['value']) # ## now do one of two things to get the magnet URL magnet_url = _get_magnet_url(item) if magnet_url is None: continue myitem = { 'raw_title': title, 'title': title, 'seeders': seeders, 'leechers': leechers, 'link': magnet_url } if torrent_size is not None: myitem['title'] = '%s (%s)' % ( title, get_formatted_size(torrent_size * 1024**2)) myitem['torrent_size'] = torrent_size items.append(myitem) if len(items) == 0: return return_error_raw('FAILURE, JACKETT CANNOT FIND %s' % name) return items[:maxnum], 'SUCCESS'
def getShowSummary(cls, seriesName, tvdata_on_plex, missing_eps): seasons_info = tvdata_on_plex[seriesName]['seasons'] overview = tvdata_on_plex[seriesName]['summary'] didend = tvdata_on_plex[seriesName]['didEnd'] num_total = sum( list( map(lambda seasno: len(seasons_info[seasno]['episodes']), set(seasons_info) - set([0])))) if seriesName not in missing_eps: num_missing = 0 else: num_missing = len(missing_eps[seriesName]) if didend: show_status = "Show has ended" else: show_status = "Show is still ongoing" minDate = min( map( lambda seasno: min( map( lambda epno: seasons_info[seasno]['episodes'][epno][ 'date aired'], seasons_info[seasno]['episodes'])), set(seasons_info) - set([0]))) maxDate = max( map( lambda seasno: max( map( lambda epno: seasons_info[seasno]['episodes'][epno][ 'date aired'], seasons_info[seasno]['episodes'])), set(seasons_info) - set([0]))) html = BeautifulSoup( """ <html> <p>Summary for %s.</p> <p>%s.</p> <p>%02d episodes, %02d missing.</p> <p>First episode aired on %s.</p> <p>Last episode aired on %s.</p> <p> </html>""" % (seriesName, show_status, num_total, num_missing, minDate.strftime('%B %d, %Y'), maxDate.strftime('%B %d, %Y')), 'lxml') body_elem = html.find_all('body')[0] html2 = BeautifulSoup( """ <html> <body> </body> </html>""", 'lxml') body2_elem = html2.find_all('body')[0] if len(overview) != 0: summary_tag = html2.new_tag("p") summary_tag.string = overview body2_elem.append(summary_tag) average_duration_in_secs = numpy.array( list( chain.from_iterable( map( lambda seasno: list( map( lambda epno: seasons_info[seasno]['episodes'][ epno]['duration'], seasons_info[seasno][ 'episodes'])), set(seasons_info) - set([0]))))).mean() average_size_in_bytes = numpy.array( list( chain.from_iterable( map( lambda seasno: list( map( lambda epno: seasons_info[seasno]['episodes'] [epno]['size'], seasons_info[seasno]['episodes' ])), set(seasons_info) - set([0]))))).mean() dur_tag = html.new_tag("p") dur_tag.string = "average duration of %02d episodes: %s." % ( num_total, get_formatted_duration(average_duration_in_secs)) siz_tag = html.new_tag("p") siz_tag.string = "average size of %02d episodes: %s." % ( num_total, get_formatted_size(average_size_in_bytes)) body_elem.append(dur_tag) body_elem.append(siz_tag) return html.prettify(), html2.prettify()