Esempio n. 1
0
def _fuzzy_title(a, b):
    ''' Determines how much of a is in b
    a (str): String to match against b
    b (str): String to match a against

    Order of a and b matters.

    A is broken down and words are compared against B's words.

    ie:
    _fuzzy_title('This is string a', 'This is string b and has extra words.')
    Returns 75 since 75% of a is in b.

    Returns int
    '''

    a = a.replace('&', 'and')
    b = b.replace('&', 'and')

    a_words = Url.normalize(a).split(' ')
    b_words = Url.normalize(b).split(' ')

    m = 0
    a_len = len(a_words)

    for i in a_words:
        if i in b_words:
            b_words.remove(i)
            m += 1

    return int((m / a_len) * 100)
Esempio n. 2
0
    def fuzzy_title(self, titles):
        ''' Score and remove results based on title match
        titles (list): titles to match against

        If titles is an empty list every result is treated as a perfect match

        Iterates through self.results and removes any entry that does not
            fuzzy match 'title' > 70.
        Adds fuzzy_score / 20 points to ['score']

        Does not return
        '''

        logging.info('Checking title match.')

        lst = []
        if titles == []:
            for result in self.results:
                result['score'] += 20
                lst.append(result)
        else:
            for result in self.results:
                if result['type'] == 'import' and result not in lst:
                    result['score'] += 20
                    lst.append(result)
                    continue
                test = Url.normalize(result['title'])
                matches = [fuzz.partial_ratio(Url.normalize(title), test) for title in titles]
                if any([match > 70 for match in matches]):
                    result['score'] += int(max(matches) / 5)
                    lst.append(result)
                else:
                    logging.debug('{} best title match was {}%, removing search result.'.format(test, max(matches)))
        self.results = lst
        logging.info('Keeping {} results.'.format(len(self.results)))
Esempio n. 3
0
    def get_imdbid(self, tmdbid=None, title=None, year=''):
        ''' Gets imdbid from tmdbid or title and year
        tmdbid: str TMDB movie id #
        title: str movie title
        year str year of movie release

        MUST supply either tmdbid or title. Year is optional with title, but results
            are more reliable with it.

        Returns str imdbid or None on failure
        '''

        if not tmdbid and not title:
            logging.warning(
                'Neither tmdbid or title supplied. Unable to find imdbid.')
            return None

        if not tmdbid:
            title = Url.normalize(title)
            year = Url.normalize(year)

            url = 'https://api.themoviedb.org/3/search/movie?api_key={}&language=en-US&query={}&year={}&page=1&include_adult=false'.format(
                _k(b'tmdb'), title, year)

            while self.get_tokens() < 3:
                sleep(0.3)
            self.use_token()

            try:
                results = json.loads(Url.open(url).text)
                results = results['results']
                if results:
                    tmdbid = results[0]['id']
                else:
                    return None
            except (SystemExit, KeyboardInterrupt):
                raise
            except Exception as e:  # noqa
                logging.error('Error attempting to get TMDBID from TMDB.',
                              exc_info=True)
                return None

        url = 'https://api.themoviedb.org/3/movie/{}?api_key={}'.format(
            tmdbid, _k(b'tmdb'))

        while self.get_tokens() < 3:
            sleep(0.3)
        self.use_token()

        try:
            results = json.loads(Url.open(url).text)
            return results.get('imdb_id')
        except Exception as e:  # noqa
            logging.error('Error attempting to get IMDBID from TMDB.',
                          exc_info=True)
            return None
Esempio n. 4
0
    def get_imdbid(tmdbid=None, title=None, year=''):
        ''' Gets imdbid from tmdbid or title and year
        tmdbid (str): themoviedatabase id #
        title (str): movie title
        year (str/int): year of movie release

        MUST supply either tmdbid or title. Year is optional with title, but results
            are more reliable with it.

        Returns str imdbid
        '''

        if not tmdbid and not title:
            logging.warning(
                'Neither tmdbid or title supplied. Unable to find imdbid.')
            return ''

        if not tmdbid:
            title = Url.normalize(title)
            year = Url.normalize(year)

            url = 'https://api.themoviedb.org/3/search/movie?api_key={}&language=en-US&query={}&year={}&page=1&include_adult={}'.format(
                _k(b'tmdb'), title, year,
                'true' if core.CONFIG['Search']['allowadult'] else 'false')

            TheMovieDatabase._use_token()

            try:
                results = json.loads(Url.open(url).text)
                results = results['results']
                if results:
                    tmdbid = results[0]['id']
                else:
                    return ''
            except (SystemExit, KeyboardInterrupt):
                raise
            except Exception as e:
                logging.error('Error attempting to get TMDBID from TMDB.',
                              exc_info=True)
                return ''

        url = 'https://api.themoviedb.org/3/movie/{}?api_key={}'.format(
            tmdbid, _k(b'tmdb'))

        TheMovieDatabase._use_token()

        try:
            results = json.loads(Url.open(url).text)
            return results.get('imdb_id')
        except Exception as e:
            logging.error('Error attempting to get IMDBID from TMDB.',
                          exc_info=True)
            return ''
Esempio n. 5
0
    def fuzzy_title(self, titles):
        ''' Score and remove results based on title match
        titles (list): titles to match against

        If titles is an empty list every result is treated as a perfect match

        Iterates through self.results and removes any entry that does not
            fuzzy match 'title' > 70.
        Adds fuzzy_score / 20 points to ['score']

        Does not return
        '''

        logging.info('Checking title match.')

        lst = []
        if titles == []:
            logging.debug(
                'No titles available to compare, scoring all as perfect match.'
            )
            for result in self.results:
                result['score'] += 20
                lst.append(result)
        else:
            for result in self.results:
                if result['type'] == 'import' and result not in lst:
                    logging.debug(
                        '{} is an Import, soring as a perfect match.'.format(
                            result['title']))
                    result['score'] += 20
                    lst.append(result)
                    continue
                release = Url.normalize(result['title'])

                logging.debug('Comparing release {} with titles {}.'.format(
                    result['title'], titles))
                matches = [
                    lm.score(release, Url.normalize(title)) * 100
                    for title in titles
                ]
                if any(match > 70 for match in matches):
                    result['score'] += int(max(matches) / 5)
                    lst.append(result)
                else:
                    logging.debug(
                        '{} best title match was {}%, removing search result.'.
                        format(release, max(matches)))
        self.results = lst
        logging.info('Keeping {} results.'.format(len(self.results)))
Esempio n. 6
0
def trailer(title_date):
    ''' Gets trailer embed ID from Youtube.
    title_date (str): movie title and date ('Movie Title 2016')

    Attempts to connect 3 times in case Youtube is down or not responding
    Can fail if no response is received.

    Returns str
    '''

    logging.info('Getting trailer url from YouTube for {}'.format(title_date))

    search_term = Url.normalize((title_date + '+trailer'))

    url = 'https://www.googleapis.com/youtube/v3/search?part=snippet&q={}&maxResults=1&key={}'.format(search_term, _k(b'youtube'))

    tries = 0
    while tries < 3:
        try:
            results = json.loads(Url.open(url).text)
            return results['items'][0]['id']['videoId']
        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception as e:
            if tries == 2:
                logging.error('Unable to get trailer from Youtube.', exc_info=True)
            tries += 1
    return ''
Esempio n. 7
0
    def get_trailer(self, title_date):
        ''' Gets trailer embed url from Youtube.
        :param title_date: str movie title and date ("Movie Title 2016")

        Attempts to connect 3 times in case Youtube is down or not responding
        Can fail if no response is recieved.

        Returns str or None
        '''

        search_term = Url.normalize((title_date + '+trailer'))

        url = u"https://www.googleapis.com/youtube/v3/search?part=snippet&q={}&maxResults=1&key={}".format(
            search_term, _k(b'youtube'))

        tries = 0
        while tries < 3:
            try:
                results = json.loads(Url.open(url).text)
                return results['items'][0]['id']['videoId']
            except (SystemExit, KeyboardInterrupt):
                raise
            except Exception as e:  # noqa
                if tries == 2:
                    logging.error('Unable to get trailer from Youtube.',
                                  exc_info=True)
                tries += 1
        return None
Esempio n. 8
0
    def _search_title(self, title):
        ''' Search TMDB for title
        title (str): movie title

        Title can include year ie Move Title 2017

        Returns list of results
        '''

        title = Url.normalize(title)

        url = 'https://api.themoviedb.org/3/search/movie?page=1&include_adult=false&'
        if title[-4:].isdigit():
            query = 'query={}&year={}'.format(title[:-5], title[-4:])
        else:
            query = 'query={}'.format(title)

        url = url + query
        logging.info('Searching TMDB {}'.format(url))
        url = url + '&api_key={}'.format(_k(b'tmdb'))

        self.use_token()

        try:
            results = json.loads(Url.open(url).text)
            if results.get('success') == 'false':
                return []
            else:
                return results['results'][:6]
        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception as e:
            logging.error('Error searching for title on TMDB.', exc_info=True)
            return []
Esempio n. 9
0
def _search_db(title_year):
    ''' Helper for backlog_search
    title_year (str): movie title and year 'Black Swan 2010'

    Returns list of found predb entries
    '''

    title_year = Url.normalize(title_year, ascii_only=True)

    categories = 'movies'
    if core.CONFIG['Search'].get('predb_unknown'):
        categories += ',unknown'
    url = 'http://predb.me/?cats={}&search={}&rss=1'.format(
        categories, title_year)

    try:
        response = Url.open(url).text
        results_xml = response.replace('&', '%26')
        items = _parse_predb_xml(results_xml)
        return items
    except (SystemExit, KeyboardInterrupt):
        raise
    except Exception as e:
        logging.error('Predb.me search failed.', exc_info=True)
        return []
Esempio n. 10
0
    def add_nzb(data):
        ''' Adds nzb file to sab to download
        :param data: dict of nzb information

        Returns dict {'response': True, 'downloadid': 'id'}
                     {'response': False, 'error': 'exception'}

        '''

        conf = core.CONFIG['Downloader']['Usenet']['Sabnzbd']

        host = conf['host']
        port = conf['port']
        api = conf['api']

        base_url = 'http://{}:{}/sabnzbd/api?apikey={}'.format(host, port, api)

        mode = 'addurl'
        name = urllib.parse.quote(data['guid'])
        nzbname = Url.normalize(data['title'])
        cat = conf['category']
        priority_keys = {
            'Paused': '-2',
            'Low': '-1',
            'Normal': '0',
            'High': '1',
            'Forced': '2'
        }
        priority = priority_keys[conf['priority']]

        command_url = '&mode={}&name={}&nzbname={}&cat={}&priority={}&output=json'.format(
            mode, name, nzbname, cat, priority)

        url = base_url + command_url

        try:
            response = json.loads(Url.open(url).text)

            if response['status'] is True and len(response['nzo_ids']) > 0:
                downloadid = response['nzo_ids'][0]
                logging.info(
                    'NZB sent to SABNzbd - downloadid {}.'.format(downloadid))
                return {'response': True, 'downloadid': downloadid}
            else:
                logging.error(
                    'Unable to send NZB to Sabnzbd. {}'.format(response))
                return {'response': False, 'error': 'Unable to add NZB.'}

        except Exception as e:
            logging.error('Unable to send NZB to Sabnzbd.', exc_info=True)
            return {'response': False, 'error': str(e)}
Esempio n. 11
0
    def _search_db(self, title_year):
        ''' Helper for backlog_search
        title_year (str): movie title and year 'Black Swan 2010'

        Returns list of found predb entries
        '''

        title_year = Url.normalize(title_year)

        url = 'http://predb.me/?cats=movies&search={}&rss=1'.format(title_year)

        try:
            response = Url.open(url).text
            results_xml = response.replace('&', '%26')
            items = self._parse_predb_xml(results_xml)
            return items
        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception as e:
            logging.error('Predb.me search failed.', exc_info=True)
            return []
Esempio n. 12
0
    def _fuzzy_match(self, predb_titles, title, year):
        ''' Fuzzy matches title with predb titles
        predb_titles (list): titles in predb response
        title (str): title to match to rss titles
        year (str): year of movie release

        Checks for any fuzzy match over 60%

        Returns bool
        '''

        movie = Url.normalize('{}.{}'.format(title, year), ascii_only=True).replace(' ', '.')
        for pdb in predb_titles:
            if year not in pdb:
                continue
            pdb = pdb.split(year)[0] + year
            match = lm.score(pdb.replace(' ', '.'), movie) * 100
            if match > 60:
                logging.debug('{} matches {} at {}%'.format(pdb, movie, int(match)))
                return True
        return False
Esempio n. 13
0
    def search_rss(self, title_year):
        ''' Searches predb rss for title_year
        :param title_year: str movie title and year 'Black Swan 2010'

        Returns list of found rss entries or None if not found.
        '''

        title_year = Url.normalize(title_year)

        url = 'http://predb.me/?cats=movies&search={}&rss=1'.format(title_year)

        try:
            response = Url.open(url).text
            results_xml = response.replace('&', '%26')
            items = self.parse_predb_xml(results_xml)
            return items
        except (SystemExit, KeyboardInterrupt):
            raise
        except Exception as e:  # noqa
            logging.error('Predb.me search failed.', exc_info=True)
            return None
Esempio n. 14
0
    def search_all(self, imdbid, title, year):
        ''' Performs backlog search for all indexers.
        imdbid (str): imdb id #
        title (str): movie title
        year (str/int): year of movie release

        Returns list of dicts with sorted release information.
        '''

        torz_indexers = core.CONFIG['Indexers']['TorzNab'].values()

        self.imdbid = imdbid

        results = []

        term = Url.normalize('{} {}'.format(title, year))

        for indexer in torz_indexers:
            if indexer[2] is False:
                continue
            url_base = indexer[0]
            logging.info('Searching TorzNab indexer {}'.format(url_base))
            if url_base[-1] != '/':
                url_base = url_base + '/'
            apikey = indexer[1]

            caps = core.sql.torznab_caps(url_base)
            if not caps:
                caps = self._get_caps(url_base, apikey)
                if caps is None:
                    logging.error('Unable to get caps for {}'.format(url_base))
                    continue

            if 'imdbid' in caps:
                logging.info('{} supports imdbid search.'.format(url_base))
                r = self.search_newznab(url_base,
                                        apikey,
                                        t='movie',
                                        cat=2000,
                                        imdbid=imdbid)
            else:
                logging.info(
                    '{} does not support imdbid search, using q={}'.format(
                        url_base, term))
                r = self.search_newznab(url_base,
                                        apikey,
                                        t='search',
                                        cat=2000,
                                        q=term)
            for i in r:
                results.append(i)

        torrent_indexers = core.CONFIG['Indexers']['Torrent']

        title = Url.normalize(title)
        year = Url.normalize(str(year))

        if torrent_indexers['rarbg']:
            rarbg_results = Rarbg.search(imdbid)
            for i in rarbg_results:
                if i not in results:
                    results.append(i)
        if torrent_indexers['limetorrents']:
            lime_results = LimeTorrents.search(imdbid, term)
            for i in lime_results:
                if i not in results:
                    results.append(i)
        if torrent_indexers['skytorrents']:
            sky_results = SkyTorrents.search(imdbid, term)
            for i in sky_results:
                if i not in results:
                    results.append(i)
        if torrent_indexers['torrentz2']:
            torrentz_results = Torrentz2.search(imdbid, term)
            for i in torrentz_results:
                if i not in results:
                    results.append(i)
        if torrent_indexers['thepiratebay']:
            tpb_results = ThePirateBay.search(imdbid)
            for i in tpb_results:
                if i not in results:
                    results.append(i)
        if torrent_indexers['yts']:
            yts_results = YTS.search(imdbid, term)
            for i in yts_results:
                if i not in results:
                    results.append(i)
        if torrent_indexers['zooqle']:
            zooqle_results = Zooqle.search(imdbid, term)
            for i in zooqle_results:
                if i not in results:
                    results.append(i)

        self.imdbid = None
        return results
Esempio n. 15
0
    def search_all(self, imdbid, title, year):
        ''' Performs backlog search for all indexers.
        imdbid (str): imdb id #
        title (str): movie title
        year (str/int): year of movie release

        Returns list of dicts with sorted release information.
        '''

        torz_indexers = core.CONFIG['Indexers']['TorzNab'].values()

        results = []

        term = Url.normalize('{} {}'.format(title, year))

        for indexer in torz_indexers:
            if indexer[2] is False:
                continue
            url_base = indexer[0]
            logging.info('Searching TorzNab indexer {}'.format(url_base))
            if url_base[-1] != '/':
                url_base = url_base + '/'
            apikey = indexer[1]

            caps = core.sql.torznab_caps(url_base)
            if not caps:
                caps = self._get_caps(url_base, apikey)
                if caps is None:
                    logging.error('Unable to get caps for {}'.format(url_base))
                    continue

            if 'imdbid' in caps:
                logging.info('{} supports imdbid search.'.format(url_base))
                r = self.search_newznab(url_base,
                                        apikey,
                                        'movie',
                                        imdbid=imdbid)
            else:
                logging.info(
                    '{} does not support imdbid search, using q={}'.format(
                        url_base, term))
                r = self.search_newznab(url_base,
                                        apikey,
                                        'search',
                                        q=term,
                                        imdbid=imdbid)
            for i in r:
                results.append(i)

        for indexer, enabled in core.CONFIG['Indexers']['Torrent'].items():
            if enabled:
                if not hasattr(torrent_modules, indexer):
                    logging.warning(
                        'Torrent indexer {} enabled but not found in torrent_modules.'
                        .format(indexer))
                    continue
                else:
                    for i in getattr(torrent_modules,
                                     indexer).search(imdbid, term):
                        if i not in results:
                            results.append(i)

        return results
Esempio n. 16
0
    def search_all(self, imdbid, title, year):
        ''' Performs backlog search for all indexers.
        imdbid (str): imdb id #
        title (str): movie title
        year (str/int): year of movie release

        Returns list of dicts with sorted release information.
        '''

        torz_indexers = core.CONFIG['Indexers']['TorzNab'].values()

        self.imdbid = imdbid

        results = []

        term = Url.normalize('{} {}'.format(title, year))

        for indexer in torz_indexers:
            if indexer[2] is False:
                continue
            url_base = indexer[0]
            logging.info('Searching TorzNab indexer {}'.format(url_base))
            if url_base[-1] != '/':
                url_base = url_base + '/'
            apikey = indexer[1]

            caps = core.sql.torznab_caps(url_base)
            if not caps:
                caps = self._get_caps(url_base, apikey)
                if caps is None:
                    logging.error('Unable to get caps for {}'.format(url_base))
                    continue

            if 'imdbid' in caps:
                logging.info('{} supports imdbid search.'.format(url_base))
                r = self.search_newznab(url_base, apikey, t='movie', cat=2000, imdbid=imdbid)
            else:
                logging.info('{} does not support imdbid search, using q={}'.format(url_base, term))
                r = self.search_newznab(url_base, apikey, t='search', cat=2000, q=term)
            for i in r:
                results.append(i)

        torrent_indexers = core.CONFIG['Indexers']['Torrent']

        title = Url.normalize(title)
        year = Url.normalize(str(year))


        logging.info("Starting search")

        for provider in TorrentProvider.__subclasses__():
            if torrent_indexers[provider.id]:
                logging.info("Searching {}".format(provider.name))

                indexer_result = provider.search(imdbid, term)

                for i in indexer_result:
                    if i not in results:
                        results.append(i)

        self.imdbid = None
        return results