Exemple #1
0
def get_summary_data_freshair_remote(token, fullURL='http://localhost:32400'):
    libraries_dict = core.get_libraries(token, fullurl=fullURL)
    keynum = max([
        key for key in libraries_dict if libraries_dict[key] == 'npr fresh air'
    ])
    sincedate = core.get_current_date_newsletter()
    key, num_songs, _, _, totdur, totsizebytes = core._get_library_stats_artist(
        keynum, token, fullurl=fullURL)
    mainstring = 'there are %d episodes of npr fresh air.' % num_songs
    sizestring = 'the total size of npr fresh air media is %s.' % get_formatted_size(
        totsizebytes)
    durstring = 'the total duration of npr fresh air media is %s.' % get_formatted_duration(
        totdur)
    if sincedate is not none:
        key, num_songs_since, _, _, \
            totdur_since, totsizebytes_since = core._get_library_stats_artist(
                keynum, token, fullurl = fullURL, sincedate = sincedate )
        if num_songs_since > 0:
            mainstring_since = ' '.join([
                'since %s, i have added %d new fresh air episodes.' %
                (sinceDate.strftime('%B %d, %Y'), num_songs_since),
                'The total size of Fresh Air media I have added is %s.' %
                get_formatted_size(totsizebytes_since),
                'The total duration of Fresh Air media I have added is %s.' %
                get_formatted_duration(totdur_since)
            ])
            return ' '.join(
                [mainstring, sizestring, durstring, mainstring_since])
    return ' '.join([mainstring, sizestring, durstring])
Exemple #2
0
def get_summary_data_music_remote(
    token, fullURL = 'http://localhost:32400',
    sinceDate = datetime.datetime.strptime('January 1, 2020', '%B %d, %Y' ).date( ) ):
    """
    This returns summary information on songs from all music libraries on the Plex_ server, for use as part of the Plex_ newsletter sent out to one's Plex_ server friends. The email first summarizes ALL the music data, and then summarizes the music data uploaded and processed since a previous date. For example,
    
       As of December 29, 2020, there are 17,853 songs made by 889 artists in 1,764 albums. The total size of music media is 306.979 GB. The total duration of music media is 7 months, 18 days, 15 hours, 8 minutes, and 15.785 seconds.

       Since January 01, 2020, I have added 7,117 songs made by 700 artists in 1,180 albums. The total size of music media that I have added is 48.167 GB. The total duration of music media that I have added is 28 days, 15 hours, 25 minutes, and 37.580 seconds.
    
    :param str token: the Plex_ access token.
    :param str fullURL: the Plex_ server URL.
    :param date sinceDate: the :py:class:`datetime <datetime.date>` from which we have added songs. Default is :py:class:`date <datetime.date>` corresponding to ``January 1, 2020``.
    
    :returns: a :py:class:`string <str>` description of music media in all music libraries on the Plex_ server. If there is no Plex_ server or music library, returns ``None``.
    :rtype: str

    .. seealso:: :py:meth:`get_summary_body <howdy.email.email.get_summary_body>`.
    """
    libraries_dict = core.get_libraries( token, fullURL = fullURL, do_full = True )
    if libraries_dict is None: return None
    keynums = set(filter(lambda keynum: libraries_dict[ keynum ][ 1 ] == 'artist', libraries_dict ) )
    if len( keynums ) == 0: return None
    # sinceDate = core.get_current_date_newsletter( )
    datas = list(map(lambda keynum: core.get_library_stats( keynum, token, fullURL = fullURL ), keynums))
    music_summ = {
        'current_date_string' : datetime.datetime.now( ).date( ).strftime( '%B %d, %Y' ),
        'num_songs' : f'{sum(list(map(lambda data: data[ "num_songs" ], datas))):,}',
        'num_artists' : f'{sum(list(map(lambda data: data[ "num_artists" ], datas))):,}',
        'num_albums' : f'{sum(list(map(lambda data: data[ "num_albums" ], datas))):,}',
        'formatted_size' : get_formatted_size(sum(list(map(lambda data: data[ 'totsize' ], datas)))),
        'formatted_duration' : get_formatted_duration(sum(list(map(lambda data: data[ 'totdur' ], datas)))) }
    #
    ## now since sinceDate
    datas_since = list(filter(
        lambda data_since: data_since[ 'num_songs' ] > 0,
        map(lambda keynum: core.get_library_stats(
            keynum, token, fullURL = fullURL, sinceDate = sinceDate ), keynums ) ) )
    music_summ[ 'len_datas_since' ] = len( datas_since )
    if len( datas_since ) > 0:
        music_summ[ 'since_date_string' ] = sinceDate.strftime( '%B %d, %Y' )
        music_summ[ 'num_songs_since' ] = f'{sum(list(map(lambda data_since: data_since[ "num_songs" ], datas_since))):,}'
        music_summ[ 'num_artists_since' ] = f'{sum(list(map(lambda data_since: data_since[ "num_artists" ], datas_since))):,}'
        music_summ[ 'num_albums_since' ] = f'{sum(list(map(lambda data_since: data_since[ "num_albums" ], datas_since))):,}'
        music_summ[ 'formatted_size_since' ] = get_formatted_size( sum(list(map(lambda data_since: data_since[ 'totsize'], datas_since))))
        music_summ[ 'formatted_duration_since' ] = get_formatted_duration( sum(list(map(lambda data_since: data_since[ 'totdur' ], datas_since))) )
    #
    env = Environment( loader = FileSystemLoader( resourceDir ) )
    template = env.get_template( 'summary_data_music_template.rst' )
    musicstring = template.render( music_summ = music_summ )
    return musicstring
Exemple #3
0
def get_summary_data_television_remote(
    token, fullURL = 'http://localhost:32400',
    sinceDate = datetime.datetime.strptime('January 1, 2020', '%B %d, %Y' ).date( ) ):
    """
    This returns summary information on TV media from all television libraries on the Plex_ server, for use as part of the Plex_ newsletter sent out to one's Plex_ server friends. The email first summarizes ALL the TV data, and then summarizes the TV data uploaded and processed since a previous date. For example,

       As of December 29, 2020, there are 25,195 TV episodes in 298 TV shows. The total size of TV media is 6.690 TB. The total duration of TV media is 1 year, 5 months, 19 days, 9 hours, 29 minutes, and 13.919 seconds.

       Since January 01, 2020, I have added 5,005 TV epsisodes in 298 TV shows. The total size of TV media that I have added is 1.571 TB. The total duration of TV media that I have added is 3 months, 16 days, 4 hours, 52 minutes, and 15.406 seconds.

    :param str token: the Plex_ access token.
    :param str fullURL: the Plex_ server URL.
    :param date sinceDate: the :py:class:`datetime <datetime.date>` from which we have added songs. Default is :py:class:`date <datetime.date>` corresponding to ``January 1, 2020``.
    
    :returns: a :py:class:`string <str>` description of TV media in all TV libraries on the Plex_ server. If there is no Plex_ server or TV library, returns ``None``.
    :rtype: str

    .. seealso:: :py:meth:`get_summary_body <howdy.email.email.get_summary_body>`.
    """
    libraries_dict = core.get_libraries( token, fullURL = fullURL, do_full = True )
    if libraries_dict is None: return None
    keynums = set(filter(lambda keynum: libraries_dict[ keynum ][ 1 ] == 'show', libraries_dict ) )
    if len( keynums ) == 0: return None
    #
    # sinceDate = core.get_current_date_newsletter( )
    datas = list(map(lambda keynum: core.get_library_stats( keynum, token, fullURL = fullURL ), keynums))
    tv_summ = {
        'current_date_string' : datetime.datetime.now( ).date( ).strftime( '%B %d, %Y' ),
        'num_episodes' : f'{sum(list(map(lambda data: data[ "num_tveps" ], datas))):,}',
        'num_shows' : f'{sum(list(map(lambda data: data[ "num_tvshows" ], datas))):,}',
        'formatted_size' : get_formatted_size(sum(list(map(lambda data: data[ 'totsize' ], datas)))),
        'formatted_duration' : get_formatted_duration(sum(list(map(lambda data: data[ 'totdur' ], datas)))) }
    datas_since = list(filter(
        lambda data_since: data_since[ 'num_tveps' ] > 0,
        map(lambda keynum: core.get_library_stats(
            keynum, token, fullURL = fullURL, sinceDate = sinceDate ), keynums) ) )
    tv_summ[ 'len_datas_since' ] = len( datas_since )
    if len( datas_since ) > 0:
        tv_summ[ 'since_date_string' ] = sinceDate.strftime( '%B %d, %Y' )
        tv_summ[ 'num_episodes_since' ] = f'{sum(list(map(lambda data_since: data_since[ "num_tveps" ], datas_since))):,}'
        tv_summ[ 'num_shows_since' ] = f'{sum(list(map(lambda data_since: data_since[ "num_tvshows" ], datas_since))):,}'
        tv_summ[ 'formatted_size_since' ] = get_formatted_size( sum(list(map(lambda data_since: data_since[ 'totsize'], datas_since))))
        tv_summ[ 'formatted_duration_since' ] = get_formatted_duration( sum(list(map(lambda data_since: data_since[ 'totdur' ], datas_since))) )
    env = Environment( loader = FileSystemLoader( resourceDir ) )
    template = env.get_template( 'summary_data_tv_template.rst' )
    tvstring = template.render( tv_summ = tv_summ )
    return tvstring
Exemple #4
0
 def _get_category_entry( cat ):
     mainstring = template_mainstring.render(
         current_date_string = current_date_string,
         num_movies = f'{sorted_by_genres[ cat ][ "totnum" ]:,}',
         totsize = get_formatted_size( sorted_by_genres[ cat ][ 'totsize' ] ),
         totdur = get_formatted_duration( sorted_by_genres[ cat ][ 'totdur' ] ) )
     if cat in sorted_by_genres_since and sorted_by_genres_since[ cat ][ 'totnum' ] > 0:
         num_movies_since = f'{sorted_by_genres_since[ cat ][ "totnum" ]:,}'
         totsize_since    = get_formatted_size( sorted_by_genres_since[ cat ][ 'totsize' ] )
         totdur_since     = get_formatted_duration( sorted_by_genres_since[ cat ][ 'totdur'  ] )
         mainstring_since = template_sincestring.render(
             since_date_string = sinceDate.strftime( '%B %d, %Y' ),
             num_movies_since = num_movies_since,
             totsize_since = totsize_since,
             totdur_since = totdur_since )
         description = ' '.join([ mainstring, mainstring_since ])
         return { 'category' : cat, 'description' : description }
     return { 'category' : cat, 'description' : mainstring }
Exemple #5
0
def _print_summary(library_key, library_dict, token, fullURL):
    data = core.get_library_stats(library_key, token, fullURL=fullURL)
    mediatype = data['mediatype']
    title = data['title']
    columns = min(100, get_terminal_size().columns)
    if mediatype == 'movie':
        mystr = ' '.join([
            '"%s" is a movie library.' % title,
            'There are %d movies here.' % data['num_movies'],
            'The total size of movie media is %s.' %
            get_formatted_size(data['totsize']),
            'The total duration of movie media is %s.' %
            get_formatted_duration(data['totdur'])
        ])
    elif mediatype == 'show':
        mystr = ' '.join([
            '"%s" is a TV library.' % title,
            'There are %d TV files in %d TV shows.' %
            (data['num_tveps'], data['num_tvshows']),
            'The total size of TV media is %s.' %
            get_formatted_size(data['totsize']),
            'The total duration of TV shows is %s.' %
            get_formatted_duration(data['totdur'])
        ])
    elif mediatype == 'artist':
        num_songs = data['num_songs']
        num_artists = data['num_artists']
        num_albums = data['num_albums']
        totsize = data['totsize']
        totdur = data['totdur']
        mystr = ' '.join([
            '"%s" is a music library.' % title,
            'There are %d songs made by %d artists in %d albums.' %
            (num_songs, num_artists, num_albums),
            'The total size of music media is %s.' %
            get_formatted_size(totsize),
            'The total duration of music media is %s.' %
            get_formatted_duration(totdur)
        ])
    print('\n%s\n' %
          '\n'.join(textwrap.fill(mystr, width=columns).split('\n')))
Exemple #6
0
 def get_title(elem):
     if 'size' in elem:
         return '%s (%s)' % (elem['title'], get_formatted_size(
             elem['size']))
     return '%s ()' % elem['title']
Exemple #7
0
def get_movie_torrent_zooqle(name, maxnum=10, verify=True):
    """
    Returns a :py:class:`tuple` of candidate movie Magnet links found using the Zooqle_ torrent service and the string ``"SUCCESS"``, if successful.

    :param str name: the movie string on which to search.
    :param int maxnum: optional argument, the maximum number of magnet links to return. Default is 100. Must be :math:`\ge 5`.
    :param bool verify:  optional argument, whether to verify SSL connections. Default is ``True``.
    
    :returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched movie, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are,

      * ``title`` is the name of the candidate movie to download, and in parentheses is the size of the download in MB or GB.
      * ``rawtitle`` is *only* the name of the candidate movie to download.
      * ``seeders`` is the number of seeds for this Magnet link.
      * ``leechers`` is the number of leeches for this Magnet link.
      * ``link`` is the Magnet URI link.
      * ``torrent_size`` is the size of this torrent in MB.
    
    If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <howdy.core.return_error_raw>`.
    
    :rtype: tuple
    
    .. _Zooqle: https://zooqle.com
    """
    assert (maxnum >= 5)
    names_of_trackers = map(
        lambda tracker: tracker.replace(':', '%3A').replace('/', '%2F'), [
            'udp://tracker.opentrackr.org:1337/announce',
            'udp://open.demonii.com:1337', 'udp://tracker.pomf.se:80/announce',
            'udp://torrent.gresille.org:80/announce',
            'udp://11.rarbg.com/announce', 'udp://11.rarbg.com:80/announce',
            'udp://open.demonii.com:1337/announce',
            'udp://tracker.openbittorrent.com:80',
            'http://tracker.ex.ua:80/announce',
            'http://tracker.ex.ua/announce',
            'http://bt.careland.com.cn:6969/announce',
            'udp://glotorrents.pw:6969/announce'
        ])
    tracklist = ''.join(
        map(lambda tracker: '&tr=%s' % tracker, names_of_trackers))

    def _get_magnet_link(info_hash, title):
        download_url = "magnet:?xt=urn:btih:" + info_hash + "&dn=" + '+'.join(
            title.split()) + tracklist
        return download_url

    candname = re.sub("'", '', name)
    url = 'https://zooqle.com/search'
    params = {
        'q': '+'.join(candname.split() + [
            'category%3AMovie',
        ]),
        'fmt': 'rss'
    }
    paramurl = '?' + '&'.join(
        map(lambda tok: '%s=%s' % (tok, params[tok]), params))
    fullurl = urljoin(url, paramurl)
    response = requests.get(fullurl, verify=verify)
    if response.status_code != 200:
        return return_error_raw(
            'ERROR, COULD NOT FIND ZOOQLE TORRENTS FOR %s' % candname)
    myxml = BeautifulSoup(response.content, 'lxml')

    def is_valid_elem(elem):
        names = set(map(lambda elm: elm.name, elem.find_all()))
        return len(names & set([
            'torrent:infohash', 'torrent:seeds', 'torrent:peers',
            'torrent:contentlength'
        ])) == 4

    cand_items = list(
        filter(
            lambda elem: len(elem.find_all('title')) >= 1 and is_valid_elem(
                elem) and get_maximum_matchval(
                    max(elem.find_all('title')).get_text(), candname) >= 80,
            myxml.find_all('item')))

    def get_num_forelem(elem, name):
        valid_elm = list(
            filter(lambda elm: elm.name == 'torrent:%s' % name, elem))
        if len(valid_elm) == 0: return None
        valid_elm = valid_elm[0]
        return int(valid_elm.get_text())

    def get_infohash(elem):
        valid_elm = list(
            filter(lambda elm: elm.name == 'torrent:infohash', elem))
        if len(valid_elm) == 0: return None
        valid_elm = valid_elm[0]
        return valid_elm.get_text().lower()

    items_toshow = list(
        map(
            lambda elem: {
                'title':
                '%s (%s)' %
                (max(elem.find_all('title')).get_text(),
                 get_formatted_size(get_num_forelem(elem, 'contentlength'))),
                'raw_title':
                max(elem.find_all('title')).get_text(),
                'seeders':
                get_num_forelem(elem, 'seeds'),
                'leechers':
                get_num_forelem(elem, 'peers'),
                'link':
                _get_magnet_link(get_infohash(elem),
                                 max(elem.find_all('title')).get_text()),
                'torrent_size':
                float(get_num_forelem(elem, 'contentlength') * 1.0 / 1024**2)
            }, cand_items))
    if len(items_toshow) == 0:
        return return_error_raw(
            'ERROR, COULD NOT FIND ZOOQLE TORRENTS FOR %s' % candname)
    return sorted(items_toshow,
                  key=lambda item: -item['seeders'] - item['leechers']
                  )[:maxnum], 'SUCCESS'
Exemple #8
0
def get_movie_torrent_eztv_io(name, maxnum=10, verify=True, tmdb_id=None):
    """
    Returns a :py:class:`tuple` of candidate movie Magnet links found using the `EZTV.IO`_ torrent service and the string ``"SUCCESS"``, if successful.

    :param str name: the movie on which to search.
    :param int maxnum: optional argument, the maximum number of magnet links to return. Default is 10. Must be :math:`\ge 5`.
    :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``.
    :param str tmdb_id: optional argument. The TMDB_ ID of the movie.
    
    :returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched movie, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are,
       
      * ``title`` is the name of the candidate movie to download, and in parentheses is the size of the download in MB or GB.
      * ``rawtitle`` is *only* the name of the candidate movie to download.
      * ``seeders`` is the number of seeds for this Magnet link.
      * ``leechers`` is the number of leeches for this Magnet link.
      * ``link`` is the Magnet URI link.
      * ``torrent_size`` is the size of this torrent in MB.
    
    If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <howdy.core.return_error_raw>`.
    
    :rtype: tuple

    .. warning:: As of |date|, cannot get it to work when giving it valid movie searches, such as ``"Star Trek Beyond"``. See :numref:`table_working_movietorrents`.
    
    .. _`EZTV.IO`: https://eztv.io
    .. _`Star Trek Beyond`: https://en.wikipedia.org/wiki/Star_Trek_Beyond
    """
    assert (maxnum >= 5)
    if tmdb_id is None:
        tmdb_id = movie.get_movie_tmdbids(name, verify=verify)
    if tmdb_id is None:
        return return_error_raw('FAILURE, COULD NOT FIND IMDB ID FOR %s.' %
                                name)
    #
    ## check that the name matches
    movie_name = movie.get_movie_info(tmdb_id,
                                      verify=verify)['title'].lower().strip()
    if movie_name != name.lower().strip():
        return return_error_raw('FAILURE, COULD NOT FIND IMDB ID FOR %s.' %
                                name)
    imdb_id = movie.get_imdbid_from_id(tmdb_id, verify=verify)
    if imdb_id is None:
        return return_error_raw('FAILURE, COULD NOT FIND IMDB ID FOR %s.' %
                                name)
    response = requests.get('https://eztv.io/api/get-torrents',
                            params={
                                'imdb_id': int(imdb_id.replace('t', '')),
                                'limit': 100,
                                'page': 0
                            },
                            verify=verify)
    if response.status_code != 200:
        return return_error_raw(
            'ERROR, COULD NOT FIND ANY TORRENTS FOR %s IN EZTV.IO' % name)
    alldat = response.json()
    if alldat['torrents_count'] == 0:
        return return_error_raw(
            'ERROR, COULD NOT FIND ANY TORRENTS FOR %s IN EZTV.IO' % name)
    all_torrents = alldat['torrents']
    for pageno in range(1, 101):
        if alldat['torrents_count'] < 100: break
        response = requests.get('https://eztv.io/api/get-torrents',
                                params={
                                    'imdb_id': int(imdb_id.replace('t', '')),
                                    'limit': 100,
                                    'page': pageno
                                },
                                verify=verify)
        if response.status_code != 200: break
        alldat = response.json()
        if alldat['torrents_count'] == 0: break
        all_torrents += alldat['torrents']
    all_torrents_mine = all_torrents[:maxnum]
    if len(all_torrents_mine) == 0:
        return return_error_raw('ERROR, COULD NOT FIND %s IN EZTV.IO' % name)
    return list(
        map(
            lambda tor: {
                'raw_title':
                tor['title'],
                'title':
                '%s (%s)' %
                (tor['title'], get_formatted_size(tor['size_bytes'])),
                'seeders':
                int(tor['seeds']),
                'leechers':
                int(tor['peers']),
                'link':
                tor['magnet_url'],
                'torrent_size':
                float(tor['size_bytes']) / 1024**2
            }, all_torrents_mine)), 'SUCCESS'
Exemple #9
0
def get_movie_torrent_jackett(name,
                              maxnum=10,
                              verify=True,
                              doRaw=False,
                              tmdb_id=None):
    """
    Returns a :py:class:`tuple` of candidate movie Magnet links found using the main Jackett_ torrent searching service and the string ``"SUCCESS"``, if successful.

    :param str name: the movie string on which to search.
    :param int maxnum: optional argumeent, the maximum number of magnet links to return. Default is 10. Must be :math:`\ge 5`.
    :param bool verify: optional argument, whether to verify SSL connections. Default is ``True``.
    :param bool doRaw: optional argument. If ``True``, uses the IMDb_ information to search for the movie. Otherwise, uses the full string in ``name`` to search for the movie.
    :param int tmdb_id: optional argument. If defined, use this TMDB_ movie ID to search for magnet links.
    
    :returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched movie, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are,
       
      * ``title`` is the name of the candidate movie to download, and in parentheses the size of the candidate in MB or GB.
      * ``rawtitle`` is *only* the name of the candidate movie to download.
      * ``seeders`` is the number of seeds for this Magnet link.
      * ``leechers`` is the number of leeches for this Magnet link.
      * ``link`` is the Magnet URI link.
      * ``torrent_size`` is the size of this torrent in GB.
    
    If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <howdy.core.return_error_raw>`.
    
    :rtype: tuple
    
    .. _Jackett: https://github.com/Jackett/Jackett
    """
    time0 = time.time()
    data = core.get_jackett_credentials()
    if data is None:
        return return_error_raw(
            'failure, could not get jackett server credentials')
    url, apikey = data
    endpoint = 'api/v2.0/indexers/all/results/torznab/api'
    popName = False
    if tmdb_id is not None: popName = True

    def _return_params(name, popName, tmdb_id):
        params = {'apikey': apikey, 'cat': 2000}
        if tmdb_id is not None:
            imdb_id = movie.get_imdbid_from_id(tmdb_id, verify=verify)
            params['imdbid'] = imdb_id
            return params
        elif doRaw:
            params['q'] = name
            return params

        tmdb_id = movie.get_movie_tmdbids(name, verify=verify)
        #if tmdb_id is None or doRaw and not popName:
        #    params['q'] = name
        #    return params
        #
        ## check that the name matches
        movie_name = movie.get_movie_info(
            tmdb_id, verify=verify)['title'].lower().strip()
        if movie_name != name.lower().strip():
            params['q'] = name
            return params
        imdb_id = movie.get_imdbid_from_id(tmdb_id, verify=verify)
        if imdb_id is None:
            params['q'] = name
            return params
        params['imdbid'] = imdb_id
        return params

    params = _return_params(name, popName, tmdb_id)
    if popName and 'q' in params: params.pop('q')
    logging.info('params: %s, mainURL = %s' % (params, urljoin(url, endpoint)))
    response = requests.get(urljoin(url, endpoint),
                            verify=verify,
                            params=params)
    if response.status_code != 200:
        return return_error_raw(' '.join([
            'failure, problem with jackett server accessible at %s.' % url,
            'Error code = %d. Error data = %s.' %
            (response.status_code, response.content)
        ]))
    logging.info('processed jackett torrents for %s in %0.3f seconds.' %
                 (name, time.time() - time0))
    html = BeautifulSoup(response.content, 'lxml')
    if len(html.find_all('item')) == 0:
        return return_error_raw(
            'failure, could not find movie %s with jackett.' % name)
    items = []

    def _get_magnet_url(item):
        magnet_url = item.find('torznab:attr', {'name': 'magneturl'})
        if magnet_url is not None and 'magnet' in magnet_url['value']:
            return magnet_url['value']
        #
        ## not found it here, must go into URL
        url2 = item.find('guid')
        if url2 is None: return None
        url2 = url2.text
        if not validators.url(url2): return None
        resp2 = requests.get(url2, verify=verify)
        if resp2.status_code != 200: return None
        h2 = BeautifulSoup(resp2.content, 'lxml')
        valid_magnet_links = set(
            map(
                lambda elem: elem['href'],
                filter(
                    lambda elem: 'href' in elem.attrs and 'magnet' in elem[
                        'href'], h2.find_all('a'))))
        if len(valid_magnet_links) == 0: return None
        return max(valid_magnet_links)

    for item in html.find_all('item'):
        title = item.find('title')
        if title is None: continue
        title = title.text
        torrent_size = item.find('size')
        if torrent_size is not None:
            torrent_size = float(torrent_size.text) / 1024**2
        seeders = item.find('torznab:attr', {'name': 'seeders'})
        if seeders is None: seeders = -1
        else: seeders = int(seeders['value'])
        leechers = item.find('torznab:attr', {'name': 'peers'})
        if leechers is None: leechers = -1
        else: leechers = int(leechers['value'])
        #
        ## now do one of two things to get the magnet URL
        magnet_url = _get_magnet_url(item)
        if magnet_url is None: continue
        myitem = {
            'raw_title': title,
            'title': title,
            'seeders': seeders,
            'leechers': leechers,
            'link': magnet_url
        }
        if torrent_size is not None:
            myitem['title'] = '%s (%s)' % (
                title, get_formatted_size(torrent_size * 1024**2))
            myitem['torrent_size'] = torrent_size
        items.append(myitem)
    if len(items) == 0:
        return return_error_raw('FAILURE, JACKETT CANNOT FIND %s' % name)
    return items[:maxnum], 'SUCCESS'
Exemple #10
0
def get_summary_data_movies_remote(
    token, fullURL = 'http://localhost:32400',
    sinceDate = datetime.datetime.strptime('January 1, 2020', '%B %d, %Y' ).date( ) ):
    """
    This returns summary information on movie media from all movie libraries on the Plex_ server, for use as part of the Plex_ newsletter sent out to one's Plex_ server friends. The email first summarizes ALL the movie data, and then summarizes the movie data uploaded and processed since the last newsletter's date. Unlike :py:meth:`get_summary_data_music_remote <howdy.email.email.get_summary_data_music_remote>` and :py:meth:`get_summary_data_television_remote <howdy.email.email.get_summary_data_television_remote>`, this returns a :py:class:`list` of strings rather than a string.

    :param str token: the Plex_ access token.
    :param str fullURL: the Plex_ server URL.
    
    :returns: a :py:class:`string <str>` description of TV media in all TV libraries on the Plex_ server. If there is no Plex_ server or TV library, returns ``None``.
    :rtype: list

    .. seealso:: :py:meth:`get_summary_body <howdy.email.email.get_summary_body>`.
    """
    libraries_dict = core.get_libraries( token, fullURL = fullURL, do_full = True )
    if libraries_dict is None:
        return None
    keynums = set(filter(lambda keynum: libraries_dict[ keynum ][ 1 ] == 'movie', libraries_dict ) )
    if len( keynums ) == 0:
        return None
    #
    # sinceDate = core.get_current_date_newsletter( )
    #
    ## hard coding (for now) how to join by genres
    join_genres = { 'action' : [ 'thriller', 'western' ], 'comedy' : [ 'family', ], 'drama' : [ 'drame', ] }
    def _join_by_genre( sort_by_genre, join_genres ):
        alljoins = list(chain.from_iterable(map(lambda genre: join_genres[ genre ], join_genres ) ) )
        assert( len( alljoins ) == len( set( alljoins ) ) )
        assert( len( set( alljoins ) & set( join_genres ) ) == 0 )
        for genre in join_genres:
            g2s = set( join_genres[ genre ] ) & set( sort_by_genre )
            if len( g2s ) == 0: continue
            if genre not in sort_by_genre:
                sort_by_genre[ genre ][ 'totnum' ] = 0
                sort_by_genre[ genre ][ 'totdur' ] = 0.0
                sort_by_genre[ genre ][ 'totsize' ] = 0.0
            for g2 in g2s:
                sort_by_genre[ genre ][ 'totnum' ] += sort_by_genre[ g2 ][ 'totnum' ]
                sort_by_genre[ genre ][ 'totdur' ] += sort_by_genre[ g2 ][ 'totdur' ]
                sort_by_genre[ genre ][ 'totsize' ] += sort_by_genre[ g2 ][ 'totsize' ]
            for g2 in g2s:
                sort_by_genre.pop( g2 )
    #
    current_date_string = datetime.datetime.now( ).date( ).strftime( '%B %d, %Y' )
    datas = list(map(lambda keynum: core.get_library_stats( keynum, token, fullURL = fullURL ), keynums ) )
    num_movies_since = -1
    sorted_by_genres = { }
    sorted_by_genres_since = { }
    for data in datas:
        data_sorted_by_genre = data[ 'genres' ]
        for genre in data_sorted_by_genre:
            if genre not in sorted_by_genres:
                sorted_by_genres[ genre ] = data_sorted_by_genre[ genre ].copy( )
                continue
            sorted_by_genres[ genre ][ 'totum'  ] += data_sorted_by_genre[ genre ][ 'totnum'  ]
            sorted_by_genres[ genre ][ 'totdur' ] += data_sorted_by_genre[ genre ][ 'totdur'  ]
            sorted_by_genres[ genre ][ 'totsize'] += data_sorted_by_genre[ genre ][ 'totsize' ]
    _join_by_genre( sorted_by_genres, join_genres )
    categories = set( sorted_by_genres )
    num_movies = f'{sum(list(map(lambda data: data[ "num_movies" ], datas ) ) ):,}'
    totdur = get_formatted_duration( sum(list(map(lambda data: data[ 'totdur' ], datas ) ) ) )
    totsize = get_formatted_size( sum(list(map(lambda data: data[ 'totsize' ], datas ) ) ) )
    movie_summ = {
        'current_date_string' : current_date_string,
        'num_movies' : num_movies,
        'num_categories' : len( categories ),
        'formatted_size' : totsize,
        'formatted_duration' : totdur }
    #
    datas_since = list(filter(
        lambda data_since: data_since[ 'num_movies' ] > 0,
        map(lambda keynum: core.get_library_stats(
            keynum, token, fullURL = fullURL, sinceDate = sinceDate ), keynums ) ) )
    movie_summ[ 'len_datas_since' ] = len( datas_since )
    if len( datas_since ) != 0:
        for data_since in datas_since:
            data_since_sorted_by_genre = data_since[ 'genres' ]
            for genre in data_since_sorted_by_genre:
                if genre not in sorted_by_genres_since:
                    sorted_by_genres_since[ genre ] = data_since_sorted_by_genre[ genre ].copy( )
                    continue
                sorted_by_genres_since[ genre ][ 'totum'  ] += data_since_sorted_by_genre[ genre ][ 'totnum'  ]
                sorted_by_genres_since[ genre ][ 'totdur' ] += data_since_sorted_by_genre[ genre ][ 'totdur'  ]
                sorted_by_genres_since[ genre ][ 'totsize'] += data_since_sorted_by_genre[ genre ][ 'totsize' ]
        _join_by_genre( sorted_by_genres_since, join_genres )
        num_movies_since = f'{sum(list(map(lambda data_since: data_since[ "num_movies" ], datas_since ) ) ):,}'
        categories_since = set( sorted_by_genres_since )
        totsize_since = get_formatted_size( sum(list(map(lambda data_since: data_since[ 'totsize' ], datas_since ) ) ) )
        totdur_since = get_formatted_duration( sum(list(map(lambda data_since: data_since[ 'totdur' ], datas_since ) ) ) )
        movie_summ[ 'since_date_string' ] = sinceDate.strftime( '%B %d, %Y' )
        movie_summ[ 'num_movies_since' ] = num_movies_since
        movie_summ[ 'num_categories_since' ] = len( categories_since )
        movie_summ[ 'formatted_size_since' ] = totsize_since
        movie_summ[ 'formatted_duration_since' ] =  totdur_since
        
    #
    ## get last 7 movies that I have added, to pass to JINJA template
    lastN_movies = core.get_lastN_movies( 7, token, fullURL = fullURL, useLastNewsletterDate = False )
    last_N_movies = [ ]
    def _get_nth_movie( lastN_entry ):
        title, year, date, url = lastN_entry
        if url is None:
            return {
                'hasURL' : False,
                'name' : title,
                'year' : year,
                'added_date_string' : date.strftime( '%B %d, %Y' ),
                'url' : '' }
        return {
            'hasURL' : True,
            'name' : title,
            'year' : year,
            'added_date_string' : date.strftime( '%B %d, %Y' ),
            'url' : url }
    last_N_movies = list(map(_get_nth_movie, lastN_movies ) )
    #
    ## catmovstrings list to pass to JINJA template
    template_mainstring = Template(' '.join([
        'As of ``{{ current_date_string }}``, there are {{ num_movies }} movies in this category.',
        'The total size of movie media here is {{ totsize }}.',
        'The total duration of movie media here is {{ totdur }}.' ]) )
    template_sincestring = Template(' '.join([
        'Since ``{{ since_date_string }}``, I have added {{ num_movies_since }} movies in this category.',
        'The total size of movie media I added here is {{ totsize_since }}.',
        'The total duration of movie media I added here is {{ totdur_since }}.' ] ) )
    def _get_category_entry( cat ):
        mainstring = template_mainstring.render(
            current_date_string = current_date_string,
            num_movies = f'{sorted_by_genres[ cat ][ "totnum" ]:,}',
            totsize = get_formatted_size( sorted_by_genres[ cat ][ 'totsize' ] ),
            totdur = get_formatted_duration( sorted_by_genres[ cat ][ 'totdur' ] ) )
        if cat in sorted_by_genres_since and sorted_by_genres_since[ cat ][ 'totnum' ] > 0:
            num_movies_since = f'{sorted_by_genres_since[ cat ][ "totnum" ]:,}'
            totsize_since    = get_formatted_size( sorted_by_genres_since[ cat ][ 'totsize' ] )
            totdur_since     = get_formatted_duration( sorted_by_genres_since[ cat ][ 'totdur'  ] )
            mainstring_since = template_sincestring.render(
                since_date_string = sinceDate.strftime( '%B %d, %Y' ),
                num_movies_since = num_movies_since,
                totsize_since = totsize_since,
                totdur_since = totdur_since )
            description = ' '.join([ mainstring, mainstring_since ])
            return { 'category' : cat, 'description' : description }
        return { 'category' : cat, 'description' : mainstring }
    catmovs = list(map(_get_category_entry, sorted( sorted_by_genres ) ) )
    env = Environment( loader = FileSystemLoader( resourceDir ) )
    template = env.get_template( 'summary_data_movie_template.rst' )
    movstring = template.render( movie_summ = movie_summ, last_N_movies = last_N_movies, catmovs = catmovs )
    return movstring
Exemple #11
0
def get_summary_data_thisamericanlife_remote(token,
                                             fullURL='http://localhost:32400'):
    libraries_dict = core.get_libraries(token, fullURL=fullURL)
    keynum = max([
        key for key in libraries_dict
        if libraries_dict[key] == 'This American Life'
    ])
    sinceDate = core.get_current_date_newsletter()
    key, song_data = core._get_library_data_artist(keynum,
                                                   token,
                                                   fullURL=fullURL)
    num_episodes = 0
    totdur = 0.0
    totsizebytes = 0.0
    for key in song_data:
        for key2 in song_data[key]:
            num_episodes += len(song_data[key][key2])
            for track in song_data[key][key2]:
                name, dt, dur, sizebytes = track
                totdur += dur
                totsizebytes += sizebytes
    mainstring = 'There are %d episodes in %d series in This American Life.' % (
        num_episodes, len(song_data))
    sizestring = 'The total size of This American Life media is %s.' % \
        get_formatted_size( totsizebytes )
    durstring = 'The total duration of This American Life media is %s.' % \
        get_formatted_duration( totdur )
    if sinceDate is None:
        pristrings = [
            ' '.join([mainstring, sizestring, durstring]),
        ]
    else:
        key, song_data_since = core._get_library_data_artist(
            keynum, token, fullURL=fullURL, sinceDate=sinceDate)
        num_episodes_since = 0
        totdur_since = 0.0
        totsizebytes_since = 0.0
        for key in song_data_since:
            for key2 in song_data_since[key]:
                num_episodes_since += len(song_data_since[key][key2])
                for track in song_data_since[key][key2]:
                    name, dt, dur, sizebytes = track
                    totdur_since += dur
                    totsizebytes_since += sizebytes
        if num_episodes_since > 0:
            mainstring_since = ' '.join([
                'Since %s, I have added %d new This American Life episodes.' %
                (sinceDate.strftime('%B %d, %Y'), num_episodes_since),
                'The total size of This American Life media I added is %s.' %
                get_formatted_size(totsizebytes_since),
                'The total duration of This American Life media I added is %s.'
                % get_formatted_duration(totdur_since)
            ])
            pristrings = [
                ' '.join([mainstring, sizestring, durstring,
                          mainstring_since]),
            ]
        else:
            pristrings = [
                ' '.join([mainstring, sizestring, durstring]),
            ]
    #
    catpristrings = {}
    for album in song_data:
        if album == 'Ira Glass': actalbum = 'This American Life'
        else: actalbum = album
        totdur = 0.0
        totsizebytes = 0.0
        num_episodes = 0
        for key2 in song_data[album]:
            num_episodes += len(song_data[album][key2])
            for track in song_data[album][key2]:
                name, dt, dur, sizebytes = track
                totdur += dur
                totsizebytes += sizebytes
        mainstring = 'There are %d episodes in this category.' % num_episodes
        sizestring = 'The total size of media here is %s.' % get_formatted_size(
            totsizebytes)
        durstring = 'The total duration of media here is %s.' % get_formatted_duration(
            totdur)
        if sinceDate is None:
            mystring = ' '.join([mainstring, sizestring, durstring])
        else:
            if album not in song_data_since:
                mystring = ' '.join([mainstring, sizestring, durstring])
            else:
                totdur_since = 0.0
                totsizebytes_since = 0.0
                num_episodes_since = 0
                for key2 in song_data_since[album]:
                    num_episodes_since += len(song_data_since[album][key2])
                    for track in song_data_since[album][key2]:
                        name, dt, dur, sizebytes = track
                        totdur_since += dur
                        totsizebytes_since += sizebytes
                if num_episodes_since > 0:
                    mainstring_since = ' '.join([
                        'Since %s, I have added %d new episodes in this category.'
                        %
                        (sinceDate.strftime('%B %d, %Y'), num_episodes_since),
                        'The total size of media I added here is %s.' %
                        get_formatted_size(totsizebytes_since),
                        'The total duration of media I added here is %s.' %
                        get_formatted_duration(totdur_since)
                    ])
                    mystring = ' '.join(
                        [mainstring, sizestring, durstring, mainstring_since])
                else:
                    mystring = ' '.join([mainstring, sizestring, durstring])
        catpristrings[actalbum] = mystring
    pristrings.append(catpristrings)
    return pristrings
Exemple #12
0
    def getShowSummary(cls, seriesName, tvdata_on_plex, missing_eps):
        seasons_info = tvdata_on_plex[seriesName]['seasons']
        overview = tvdata_on_plex[seriesName]['summary']
        didend = tvdata_on_plex[seriesName]['didEnd']
        num_total = sum(
            list(
                map(lambda seasno: len(seasons_info[seasno]['episodes']),
                    set(seasons_info) - set([0]))))
        if seriesName not in missing_eps: num_missing = 0
        else: num_missing = len(missing_eps[seriesName])
        if didend: show_status = "Show has ended"
        else: show_status = "Show is still ongoing"
        minDate = min(
            map(
                lambda seasno: min(
                    map(
                        lambda epno: seasons_info[seasno]['episodes'][epno][
                            'date aired'], seasons_info[seasno]['episodes'])),
                set(seasons_info) - set([0])))
        maxDate = max(
            map(
                lambda seasno: max(
                    map(
                        lambda epno: seasons_info[seasno]['episodes'][epno][
                            'date aired'], seasons_info[seasno]['episodes'])),
                set(seasons_info) - set([0])))

        html = BeautifulSoup(
            """
        <html>
        <p>Summary for %s.</p>
        <p>%s.</p>
        <p>%02d episodes, %02d missing.</p>
        <p>First episode aired on %s.</p>
        <p>Last episode aired on %s.</p>
        <p>
        </html>""" %
            (seriesName, show_status, num_total, num_missing,
             minDate.strftime('%B %d, %Y'), maxDate.strftime('%B %d, %Y')),
            'lxml')
        body_elem = html.find_all('body')[0]
        html2 = BeautifulSoup(
            """
        <html>
        <body>
        </body>
        </html>""", 'lxml')
        body2_elem = html2.find_all('body')[0]
        if len(overview) != 0:
            summary_tag = html2.new_tag("p")
            summary_tag.string = overview
            body2_elem.append(summary_tag)
        average_duration_in_secs = numpy.array(
            list(
                chain.from_iterable(
                    map(
                        lambda seasno: list(
                            map(
                                lambda epno: seasons_info[seasno]['episodes'][
                                    epno]['duration'], seasons_info[seasno][
                                        'episodes'])),
                        set(seasons_info) - set([0]))))).mean()
        average_size_in_bytes = numpy.array(
            list(
                chain.from_iterable(
                    map(
                        lambda seasno: list(
                            map(
                                lambda epno: seasons_info[seasno]['episodes']
                                [epno]['size'], seasons_info[seasno]['episodes'
                                                                     ])),
                        set(seasons_info) - set([0]))))).mean()
        dur_tag = html.new_tag("p")
        dur_tag.string = "average duration of %02d episodes: %s." % (
            num_total, get_formatted_duration(average_duration_in_secs))
        siz_tag = html.new_tag("p")
        siz_tag.string = "average size of %02d episodes: %s." % (
            num_total, get_formatted_size(average_size_in_bytes))
        body_elem.append(dur_tag)
        body_elem.append(siz_tag)
        return html.prettify(), html2.prettify()