示例#1
0
    def test_search(self):
        self.url = 'http://kickass.unblocked.li'
        searchURL = '{}/usearch/American%20Dad%20S08/'.format(self.url)

        data = getURL(searchURL, session=requests.Session())
        if not data:
            return

        with bs4_parser(data) as html:
            torrent_table = html.find('table', attrs={'class': 'data'})

        # Continue only if one Release is found
        torrent_rows = torrent_table.find_all('tr') if torrent_table else []
        if len(torrent_rows) < 2:
            print("The data returned does not contain any torrents")
            return

        for tr in torrent_rows[1:]:
            try:
                link = urlparse.urljoin(self.url, (tr.find('div', {'class': 'torrentname'}).find_all('a')[1])['href'])
                id = tr.get('id')[-7:]
                title = (tr.find('div', {'class': 'torrentname'}).find_all('a')[1]).text \
                        or (tr.find('div', {'class': 'torrentname'}).find_all('a')[2]).text
                url = tr.find('a', 'imagnet')['href']
                verified = True if tr.find('a', 'iverify') else False
                trusted = True if tr.find('img', {'alt': 'verified'}) else False
                seeders = int(tr.find_all('td')[-2].text)
                leechers = int(tr.find_all('td')[-1].text)
            except (AttributeError, TypeError):
                continue

            print title
示例#2
0
def _xem_exceptions_fetcher():
    if shouldRefresh('xem'):
        for indexer in sickrage.INDEXER_API().indexers:
            sickrage.LOGGER.info("Checking for XEM scene exception updates for " + sickrage.INDEXER_API(indexer).name)

            url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickrage.INDEXER_API(indexer).config[
                'xem_origin']

            parsedJSON = getURL(url, session=xem_session, timeout=90, json=True)
            if not parsedJSON:
                sickrage.LOGGER.debug("Check scene exceptions update failed for " + sickrage.INDEXER_API(
                        indexer).name + ", Unable to get URL: " + url)
                continue

            if parsedJSON[b'result'] == 'failure':
                continue

            for indexerid, names in parsedJSON[b'data'].iteritems():
                try:
                    xem_exception_dict[int(indexerid)] = names
                except Exception as e:
                    sickrage.LOGGER.warning("XEM: Rejected entry: indexerid:{0}; names:{1}".format(indexerid, names))
                    sickrage.LOGGER.debug("XEM: Rejected entry error message:{0}".format(str(e)))

        setLastRefresh('xem')

    return xem_exception_dict
示例#3
0
    def test_search(self):
        self.url = 'http://kickass.to/'
        searchURL = 'http://kickass.to/usearch/American%20Dad%21%20S08%20-S08E%20category%3Atv/?field=seeders&sorder=desc'

        data = getURL(searchURL, session=requests.Session())
        if not data:
            return

        with BS4Parser(data, markup_type="HTML", features=["html5lib", "permissive"]) as html:
            torrent_table = html.find('table', attrs={'class': 'data'})

        # Continue only if one Release is found
        torrent_rows = torrent_table.find_all('tr') if torrent_table else []
        if len(torrent_rows) < 2:
            print("The data returned does not contain any torrents")
            return

        for tr in torrent_rows[1:]:
            try:
                link = urlparse.urljoin(self.url, (tr.find('div', {'class': 'torrentname'}).find_all('a')[1])['href'])
                id = tr.get('id')[-7:]
                title = (tr.find('div', {'class': 'torrentname'}).find_all('a')[1]).text \
                        or (tr.find('div', {'class': 'torrentname'}).find_all('a')[2]).text
                url = tr.find('a', 'imagnet')['href']
                verified = True if tr.find('a', 'iverify') else False
                trusted = True if tr.find('img', {'alt': 'verified'}) else False
                seeders = int(tr.find_all('td')[-2].text)
                leechers = int(tr.find_all('td')[-1].text)
            except (AttributeError, TypeError):
                continue

            print title
示例#4
0
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = getURL(self.url, session=self.session, params=self.params, headers={"Referer": "http://akas.imdb.com/"})
        if not data:
            return None

        with bs4_parser(data) as soup:
            results = soup.find("table", {"class": "results"})
            rows = results.find_all("tr")

        for row in rows:
            show = {}
            image_td = row.find("td", {"class": "image"})

            if image_td:
                image = image_td.find("img")
                show[b"image_url_large"] = self.change_size(image[b"src"], 3)
                show[b"image_path"] = os.path.join("images", "imdb_popular", os.path.basename(show[b"image_url_large"]))

                self.cache_image(show[b"image_url_large"])

            td = row.find("td", {"class": "title"})

            if td:
                show[b"name"] = td.find("a").contents[0]
                show[b"imdb_url"] = "http://www.imdb.com" + td.find("a")["href"]
                show[b"imdb_tt"] = show[b"imdb_url"][-10:][0:9]
                show[b"year"] = td.find("span", {"class": "year_type"}).contents[0].split(" ")[0][1:]

                rating_all = td.find("div", {"class": "user_rating"})
                if rating_all:
                    rating_string = rating_all.find("div", {"class": "rating rating-list"})
                    if rating_string:
                        rating_string = rating_string[b"title"]

                        match = re.search(r".* (.*)\/10.*\((.*)\).*", rating_string)
                        if match:
                            matches = match.groups()
                            show[b"rating"] = matches[0]
                            show[b"votes"] = matches[1]
                        else:
                            show[b"rating"] = None
                            show[b"votes"] = None
                else:
                    show[b"rating"] = None
                    show[b"votes"] = None

                outline = td.find("span", {"class": "outline"})
                if outline:
                    show[b"outline"] = outline.contents[0]
                else:
                    show[b"outline"] = ""

                popular_shows.append(show)

        return popular_shows
示例#5
0
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://sickragetv.github.io/sb_network_timezones/network_timezones.txt'
    url_data = getURL(url, session=requests.Session())
    if not url_data:
        sickrage.LOGGER.warning(
            'Updating network timezones failed, this can happen from time to time. URL: %s'
            % url)
        load_network_dict()
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.strip().rsplit(':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    network_list = dict(
        cache_db.CacheDB().select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in d.iteritems():
        existing = network_list.has_key(network)
        if not existing:
            queries.append([
                'INSERT OR IGNORE INTO network_timezones VALUES (?,?);',
                [network, timezone]
            ])
        elif network_list[network] is not timezone:
            queries.append([
                'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                [timezone, network]
            ])

        if existing:
            del network_list[network]

    if network_list:
        purged = [x for x in network_list]
        queries.append([
            'DELETE FROM network_timezones WHERE network_name IN (%s);' %
            ','.join(['?'] * len(purged)), purged
        ])

    if queries:
        cache_db.CacheDB().mass_action(queries)
        load_network_dict()
示例#6
0
def getShowImage(url, imgNum=None):
    if url is None:
        return None

    # if they provided a fanart number try to use it instead
    if imgNum is not None:
        tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
    else:
        tempURL = url

    sickrage.LOGGER.debug("Fetching image from " + tempURL)

    image_data = getURL(tempURL, session=meta_session, needBytes=True)
    if image_data is None:
        sickrage.LOGGER.warning("There was an error trying to retrieve the image, aborting")
        return

    return image_data
示例#7
0
def getShowImage(url, imgNum=None):
    if url is None:
        return None

    # if they provided a fanart number try to use it instead
    if imgNum is not None:
        tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
    else:
        tempURL = url

    sickrage.LOGGER.debug("Fetching image from " + tempURL)

    image_data = getURL(tempURL, session=meta_session, needBytes=True)
    if image_data is None:
        sickrage.LOGGER.warning(
            "There was an error trying to retrieve the image, aborting")
        return

    return image_data
示例#8
0
    def check_for_new_news(self, force=False):
        """
        Checks GitHub for the latest news.

        returns: unicode, a copy of the news

        force: ignored
        """

        news = ''

        # Grab a copy of the news
        sickrage.LOGGER.debug('check_for_new_news: Checking GitHub for latest news.')
        try:
            news = getURL(sickrage.NEWS_URL, session=self.session)
        except:
            sickrage.LOGGER.warning('check_for_new_news: Could not load news from repo.')

        if news:
            dates = re.finditer(r'^####(\d{4}-\d{2}-\d{2})####$', news, re.M)
            if not list(dates):
                return news or ''

            try:
                last_read = datetime.datetime.strptime(sickrage.NEWS_LAST_READ, '%Y-%m-%d')
            except:
                last_read = 0

            sickrage.NEWS_UNREAD = 0
            gotLatest = False
            for match in dates:
                if not gotLatest:
                    gotLatest = True
                    sickrage.NEWS_LATEST = match.group(1)

                try:
                    if datetime.datetime.strptime(match.group(1), '%Y-%m-%d') > last_read:
                        sickrage.NEWS_UNREAD += 1
                except Exception:
                    pass

        return news
示例#9
0
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://sickragetv.github.io/sb_network_timezones/network_timezones.txt'
    url_data = getURL(url, session=requests.Session())
    if not url_data:
        sickrage.LOGGER.warning('Updating network timezones failed, this can happen from time to time. URL: %s' % url)
        load_network_dict()
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.strip().rsplit(':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    network_list = dict(cache_db.CacheDB().select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in d.iteritems():
        existing = network_list.has_key(network)
        if not existing:
            queries.append(['INSERT OR IGNORE INTO network_timezones VALUES (?,?);', [network, timezone]])
        elif network_list[network] is not timezone:
            queries.append(['UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                            [timezone, network]])

        if existing:
            del network_list[network]

    if network_list:
        purged = [x for x in network_list]
        queries.append(
                ['DELETE FROM network_timezones WHERE network_name IN (%s);' % ','.join(['?'] * len(purged)), purged])

    if queries:
        cache_db.CacheDB().mass_action(queries)
        load_network_dict()
示例#10
0
    def getDBcompare(self):
        try:
            self.updater.need_update()
            cur_hash = str(self.updater.get_newest_commit_hash)
            assert len(cur_hash) is 40, "Commit hash wrong length: %s hash: %s" % (len(cur_hash), cur_hash)

            check_url = "http://cdn.rawgit.com/%s/%s/%s/sickrage/databases/main_db.py" % (
                sickrage.GIT_ORG, sickrage.GIT_REPO, cur_hash)
            response = getURL(check_url, session=self.session)
            assert response, "Empty response from %s" % check_url

            match = re.search(r"MAX_DB_VERSION\s=\s(?P<version>\d{2,3})", response)
            branchDestDBversion = int(match.group('version'))
            branchCurrDBversion = main_db.MainDB().checkDBVersion()
            if branchDestDBversion > branchCurrDBversion:
                return 'upgrade'
            elif branchDestDBversion == branchCurrDBversion:
                return 'equal'
            else:
                return 'downgrade'
        except:
            raise
示例#11
0
def _xem_exceptions_fetcher():
    if shouldRefresh('xem'):
        for indexer in sickrage.INDEXER_API().indexers:
            sickrage.LOGGER.info(
                "Checking for XEM scene exception updates for " +
                sickrage.INDEXER_API(indexer).name)

            url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickrage.INDEXER_API(
                indexer).config['xem_origin']

            parsedJSON = getURL(url,
                                session=xem_session,
                                timeout=90,
                                json=True)
            if not parsedJSON:
                sickrage.LOGGER.debug(
                    "Check scene exceptions update failed for " +
                    sickrage.INDEXER_API(indexer).name +
                    ", Unable to get URL: " + url)
                continue

            if parsedJSON[b'result'] == 'failure':
                continue

            for indexerid, names in parsedJSON[b'data'].iteritems():
                try:
                    xem_exception_dict[int(indexerid)] = names
                except Exception as e:
                    sickrage.LOGGER.warning(
                        "XEM: Rejected entry: indexerid:{0}; names:{1}".format(
                            indexerid, names))
                    sickrage.LOGGER.debug(
                        "XEM: Rejected entry error message:{0}".format(str(e)))

        setLastRefresh('xem')

    return xem_exception_dict
示例#12
0
    def test_search(self):
        self.url = 'http://kickass.unblocked.li'
        searchURL = '{}/usearch/American%20Dad%20S08/'.format(self.url)

        data = getURL(searchURL, session=requests.Session())
        if not data:
            return

        with bs4_parser(data) as html:
            torrent_table = html.find('table', attrs={'class': 'data'})

        # Continue only if one Release is found
        torrent_rows = torrent_table.find_all('tr') if torrent_table else []
        if len(torrent_rows) < 2:
            print("The data returned does not contain any torrents")
            return

        for tr in torrent_rows[1:]:
            try:
                link = urlparse.urljoin(self.url,
                                        (tr.find('div', {
                                            'class': 'torrentname'
                                        }).find_all('a')[1])['href'])
                id = tr.get('id')[-7:]
                title = (tr.find('div', {'class': 'torrentname'}).find_all('a')[1]).text \
                        or (tr.find('div', {'class': 'torrentname'}).find_all('a')[2]).text
                url = tr.find('a', 'imagnet')['href']
                verified = True if tr.find('a', 'iverify') else False
                trusted = True if tr.find('img',
                                          {'alt': 'verified'}) else False
                seeders = int(tr.find_all('td')[-2].text)
                leechers = int(tr.find_all('td')[-1].text)
            except (AttributeError, TypeError):
                continue

            print title
示例#13
0
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = getURL(self.url, session=self.session, params=self.params,
                      headers={'Referer': 'http://akas.imdb.com/'})
        if not data:
            return None

        soup = BeautifulSoup(data, 'html.parser')
        results = soup.find("table", {"class": "results"})
        rows = results.find_all("tr")

        for row in rows:
            show = {}
            image_td = row.find("td", {"class": "image"})

            if image_td:
                image = image_td.find("img")
                show[b'image_url_large'] = self.change_size(image[b'src'], 3)
                show[b'image_path'] = os.path.join('images', 'imdb_popular',
                                                   os.path.basename(show[b'image_url_large']))

                self.cache_image(show[b'image_url_large'])

            td = row.find("td", {"class": "title"})

            if td:
                show[b'name'] = td.find("a").contents[0]
                show[b'imdb_url'] = "http://www.imdb.com" + td.find("a")["href"]
                show[b'imdb_tt'] = show[b'imdb_url'][-10:][0:9]
                show[b'year'] = td.find("span", {"class": "year_type"}).contents[0].split(" ")[0][1:]

                rating_all = td.find("div", {"class": "user_rating"})
                if rating_all:
                    rating_string = rating_all.find("div", {"class": "rating rating-list"})
                    if rating_string:
                        rating_string = rating_string[b'title']

                        match = re.search(r".* (.*)\/10.*\((.*)\).*", rating_string)
                        if match:
                            matches = match.groups()
                            show[b'rating'] = matches[0]
                            show[b'votes'] = matches[1]
                        else:
                            show[b'rating'] = None
                            show[b'votes'] = None
                else:
                    show[b'rating'] = None
                    show[b'votes'] = None

                outline = td.find("span", {"class": "outline"})
                if outline:
                    show[b'outline'] = outline.contents[0]
                else:
                    show[b'outline'] = ''

                popular_shows.append(show)

        return popular_shows
示例#14
0
def retrieve_exceptions():
    """
    Looks up the exceptions on github, parses them into a dict, and inserts them into the
    scene_exceptions table in cache.db. Also clears the scene name cache.
    """

    for indexer in sickrage.INDEXER_API().indexers:
        if shouldRefresh(sickrage.INDEXER_API(indexer).name):
            sickrage.LOGGER.info("Checking for scene exception updates for " +
                                 sickrage.INDEXER_API(indexer).name + "")

            loc = sickrage.INDEXER_API(indexer).config[b'scene_loc']
            try:
                data = getURL(loc,
                              session=sickrage.INDEXER_API(indexer).session)
            except Exception:
                continue

            if data is None:
                # When data is None, trouble connecting to github, or reading file failed
                sickrage.LOGGER.debug(
                    "Check scene exceptions update failed. Unable to update from: "
                    + loc)
                continue

            setLastRefresh(sickrage.INDEXER_API(indexer).name)

            # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
            for cur_line in data.splitlines():
                indexer_id, _, aliases = cur_line.partition(
                    ':')  # @UnusedVariable

                if not aliases:
                    continue

                indexer_id = int(indexer_id)

                # regex out the list of shows, taking \' into account
                # alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                alias_list = [{
                    re.sub(r'\\(.)', r'\1', x): -1
                } for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                exception_dict[indexer_id] = alias_list
                del alias_list

            # cleanup
            del data

    # XEM scene exceptions
    _xem_exceptions_fetcher()
    for xem_ex in xem_exception_dict:
        if xem_ex in exception_dict:
            exception_dict[
                xem_ex] = exception_dict[xem_ex] + xem_exception_dict[xem_ex]
        else:
            exception_dict[xem_ex] = xem_exception_dict[xem_ex]

    # AniDB scene exceptions
    _anidb_exceptions_fetcher()
    for anidb_ex in anidb_exception_dict:
        if anidb_ex in exception_dict:
            exception_dict[anidb_ex] = exception_dict[
                anidb_ex] + anidb_exception_dict[anidb_ex]
        else:
            exception_dict[anidb_ex] = anidb_exception_dict[anidb_ex]

    queries = []
    for cur_indexer_id in exception_dict:
        sql_ex = cache_db.CacheDB().select(
            "SELECT * FROM scene_exceptions WHERE indexer_id = ?;",
            [cur_indexer_id])
        existing_exceptions = [x[b"show_name"] for x in sql_ex]
        if not cur_indexer_id in exception_dict:
            continue

        for cur_exception_dict in exception_dict[cur_indexer_id]:
            for ex in cur_exception_dict.iteritems():
                cur_exception, curSeason = ex
                if cur_exception not in existing_exceptions:
                    queries.append([
                        "INSERT OR IGNORE INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?);",
                        [cur_indexer_id, cur_exception, curSeason]
                    ])
    if queries:
        cache_db.CacheDB().mass_action(queries)
        sickrage.LOGGER.debug("Updated scene exceptions")
    else:
        sickrage.LOGGER.debug("No scene exceptions update needed")

    # cleanup
    exception_dict.clear()
    anidb_exception_dict.clear()
    xem_exception_dict.clear()
示例#15
0
def xem_refresh(indexer_id, indexer, force=False):
    """
    Refresh data from xem for a tv show

    :param indexer_id: int
    """
    if not indexer_id or indexer_id < 1:
        return

    indexer_id = int(indexer_id)
    indexer = int(indexer)

    MAX_REFRESH_AGE_SECS = 86400  # 1 day

    rows = main_db.MainDB().select("SELECT last_refreshed FROM xem_refresh WHERE indexer = ? AND indexer_id = ?",
                       [indexer, indexer_id])
    if rows:
        lastRefresh = int(rows[0][b'last_refreshed'])
        refresh = int(time.mktime(datetime.datetime.today().timetuple())) > lastRefresh + MAX_REFRESH_AGE_SECS
    else:
        refresh = True

    if refresh or force:
        sickrage.LOGGER.debug(
                'Looking up XEM scene mapping for show %s on %s' % (indexer_id, sickrage.INDEXER_API(indexer).name))

        # mark refreshed
        main_db.MainDB().upsert("xem_refresh",
                                {'indexer': indexer,
                     'last_refreshed': int(time.mktime(datetime.datetime.today().timetuple()))},
                                {'indexer_id': indexer_id})

        try:
            from scene_exceptions import xem_session

            # XEM MAP URL
            url = "http://thexem.de/map/havemap?origin=%s" % sickrage.INDEXER_API(indexer).config[b'xem_origin']
            parsedJSON = getURL(url, session=xem_session, json=True)
            if not parsedJSON or 'result' not in parsedJSON or 'success' not in parsedJSON[
                b'result'] or 'data' not in parsedJSON or str(indexer_id) not in parsedJSON[b'data']:
                return

            # XEM API URL
            url = "http://thexem.de/map/all?id={}&origin={}&destination=scene".format(
                    indexer_id, sickrage.INDEXER_API(indexer).config[b'xem_origin'])

            parsedJSON = getURL(url, session=xem_session, json=True)
            if not ((parsedJSON and 'result' in parsedJSON) and 'success' in parsedJSON[b'result']):
                sickrage.LOGGER.info('No XEM data for show "%s on %s"' % (indexer_id, sickrage.INDEXER_API(indexer).name,))
                return

            cl = []
            for entry in parsedJSON[b'data']:
                if 'scene' in entry:
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [entry[b'scene'][b'season'],
                         entry[b'scene'][b'episode'],
                         entry[b'scene'][b'absolute'],
                         indexer_id,
                         entry[sickrage.INDEXER_API(indexer).config[b'xem_origin']][b'season'],
                         entry[sickrage.INDEXER_API(indexer).config[b'xem_origin']][b'episode']
                         ]])
                if 'scene_2' in entry:  # for doubles
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [entry[b'scene_2'][b'season'],
                         entry[b'scene_2'][b'episode'],
                         entry[b'scene_2'][b'absolute'],
                         indexer_id,
                         entry[sickrage.INDEXER_API(indexer).config[b'xem_origin']][b'season'],
                         entry[sickrage.INDEXER_API(indexer).config[b'xem_origin']][b'episode']
                         ]])

            if len(cl) > 0:
                main_db.MainDB().mass_action(cl)

        except Exception as e:
            sickrage.LOGGER.warning(
                    "Exception while refreshing XEM data for show " + str(indexer_id) + " on " + sickrage.INDEXER_API(
                            indexer).name + ": {}".format(e))
            sickrage.LOGGER.debug(traceback.format_exc())
示例#16
0
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = getURL(self.url,
                      session=self.session,
                      params=self.params,
                      headers={'Referer': 'http://akas.imdb.com/'})
        if not data:
            return None

        with bs4_parser(data) as soup:
            results = soup.find("table", {"class": "results"})
            rows = results.find_all("tr")

        for row in rows:
            show = {}
            image_td = row.find("td", {"class": "image"})

            if image_td:
                image = image_td.find("img")
                show[b'image_url_large'] = self.change_size(image[b'src'], 3)
                show[b'image_path'] = os.path.join(
                    'images', 'imdb_popular',
                    os.path.basename(show[b'image_url_large']))

                self.cache_image(show[b'image_url_large'])

            td = row.find("td", {"class": "title"})

            if td:
                show[b'name'] = td.find("a").contents[0]
                show[b'imdb_url'] = "http://www.imdb.com" + td.find(
                    "a")["href"]
                show[b'imdb_tt'] = show[b'imdb_url'][-10:][0:9]
                show[b'year'] = td.find("span", {
                    "class": "year_type"
                }).contents[0].split(" ")[0][1:]

                rating_all = td.find("div", {"class": "user_rating"})
                if rating_all:
                    rating_string = rating_all.find(
                        "div", {"class": "rating rating-list"})
                    if rating_string:
                        rating_string = rating_string[b'title']

                        match = re.search(r".* (.*)\/10.*\((.*)\).*",
                                          rating_string)
                        if match:
                            matches = match.groups()
                            show[b'rating'] = matches[0]
                            show[b'votes'] = matches[1]
                        else:
                            show[b'rating'] = None
                            show[b'votes'] = None
                else:
                    show[b'rating'] = None
                    show[b'votes'] = None

                outline = td.find("span", {"class": "outline"})
                if outline:
                    show[b'outline'] = outline.contents[0]
                else:
                    show[b'outline'] = ''

                popular_shows.append(show)

        return popular_shows
示例#17
0
def splitNZBResult(result):
    """
    Split result into seperate episodes

    :param result: search result object
    :return: False upon failure, a list of episode objects otherwise
    """
    urlData = getURL(result.url, session=requests.Session(), needBytes=True)
    if urlData is None:
        sickrage.LOGGER.error("Unable to load url " + result.url +
                              ", can't download season NZB")
        return False

    # parse the season ep name
    try:
        np = NameParser(False, showObj=result.show)
        parse_result = np.parse(result.name)
    except InvalidNameException:
        sickrage.LOGGER.debug("Unable to parse the filename " + result.name +
                              " into a valid episode")
        return False
    except InvalidShowException:
        sickrage.LOGGER.debug("Unable to parse the filename " + result.name +
                              " into a valid show")
        return False

    # bust it up
    season = parse_result.season_number if parse_result.season_number is not None else 1

    separateNZBs, xmlns = getSeasonNZBs(result.name, urlData, season)

    resultList = []

    for newNZB in separateNZBs:

        sickrage.LOGGER.debug("Split out " + newNZB + " from " + result.name)

        # parse the name
        try:
            np = NameParser(False, showObj=result.show)
            parse_result = np.parse(newNZB)
        except InvalidNameException:
            sickrage.LOGGER.debug("Unable to parse the filename " + newNZB +
                                  " into a valid episode")
            return False
        except InvalidShowException:
            sickrage.LOGGER.debug("Unable to parse the filename " + newNZB +
                                  " into a valid show")
            return False

        # make sure the result is sane
        if (parse_result.season_number is not None
                and parse_result.season_number != season) or (
                    parse_result.season_number is None and season != 1):
            sickrage.LOGGER.warning(
                "Found " + newNZB + " inside " + result.name +
                " but it doesn't seem to belong to the same season, ignoring it"
            )
            continue
        elif len(parse_result.episode_numbers) == 0:
            sickrage.LOGGER.warning(
                "Found " + newNZB + " inside " + result.name +
                " but it doesn't seem to be a valid episode NZB, ignoring it")
            continue

        wantEp = True
        for epNo in parse_result.episode_numbers:
            if not result.extraInfo[0].wantEpisode(season, epNo,
                                                   result.quality):
                sickrage.LOGGER.info(
                    "Ignoring result " + newNZB +
                    " because we don't want an episode that is " +
                    Quality.qualityStrings[result.quality])
                wantEp = False
                break
        if not wantEp:
            continue

        # get all the associated episode objects
        epObjList = []
        for curEp in parse_result.episode_numbers:
            epObjList.append(result.extraInfo[0].getEpisode(season, curEp))

        # make a result
        curResult = classes.NZBDataSearchResult(epObjList)
        curResult.name = newNZB
        curResult.provider = result.provider
        curResult.quality = result.quality
        curResult.extraInfo = [createNZBString(separateNZBs[newNZB], xmlns)]

        resultList.append(curResult)

    return resultList
示例#18
0
def splitNZBResult(result):
    """
    Split result into seperate episodes

    :param result: search result object
    :return: False upon failure, a list of episode objects otherwise
    """
    urlData = getURL(result.url, session=requests.Session(), needBytes=True)
    if urlData is None:
        sickrage.LOGGER.error("Unable to load url " + result.url + ", can't download season NZB")
        return False

    # parse the season ep name
    try:
        np = NameParser(False, showObj=result.show)
        parse_result = np.parse(result.name)
    except InvalidNameException:
        sickrage.LOGGER.debug("Unable to parse the filename " + result.name + " into a valid episode")
        return False
    except InvalidShowException:
        sickrage.LOGGER.debug("Unable to parse the filename " + result.name + " into a valid show")
        return False

    # bust it up
    season = parse_result.season_number if parse_result.season_number is not None else 1

    separateNZBs, xmlns = getSeasonNZBs(result.name, urlData, season)

    resultList = []

    for newNZB in separateNZBs:

        sickrage.LOGGER.debug("Split out " + newNZB + " from " + result.name)

        # parse the name
        try:
            np = NameParser(False, showObj=result.show)
            parse_result = np.parse(newNZB)
        except InvalidNameException:
            sickrage.LOGGER.debug("Unable to parse the filename " + newNZB + " into a valid episode")
            return False
        except InvalidShowException:
            sickrage.LOGGER.debug("Unable to parse the filename " + newNZB + " into a valid show")
            return False

        # make sure the result is sane
        if (parse_result.season_number is not None and parse_result.season_number != season) or (
                        parse_result.season_number is None and season != 1):
            sickrage.LOGGER.warning(
                    "Found " + newNZB + " inside " + result.name + " but it doesn't seem to belong to the same season, ignoring it")
            continue
        elif len(parse_result.episode_numbers) == 0:
            sickrage.LOGGER.warning(
                    "Found " + newNZB + " inside " + result.name + " but it doesn't seem to be a valid episode NZB, ignoring it")
            continue

        wantEp = True
        for epNo in parse_result.episode_numbers:
            if not result.extraInfo[0].wantEpisode(season, epNo, result.quality):
                sickrage.LOGGER.info("Ignoring result " + newNZB + " because we don't want an episode that is " +
                             Quality.qualityStrings[result.quality])
                wantEp = False
                break
        if not wantEp:
            continue

        # get all the associated episode objects
        epObjList = []
        for curEp in parse_result.episode_numbers:
            epObjList.append(result.extraInfo[0].getEpisode(season, curEp))

        # make a result
        curResult = classes.NZBDataSearchResult(epObjList)
        curResult.name = newNZB
        curResult.provider = result.provider
        curResult.quality = result.quality
        curResult.extraInfo = [createNZBString(separateNZBs[newNZB], xmlns)]

        resultList.append(curResult)

    return resultList
示例#19
0
def xem_refresh(indexer_id, indexer, force=False):
    """
    Refresh data from xem for a tv show

    :param indexer_id: int
    """
    if not indexer_id or indexer_id < 1:
        return

    indexer_id = int(indexer_id)
    indexer = int(indexer)

    MAX_REFRESH_AGE_SECS = 86400  # 1 day

    rows = main_db.MainDB().select(
        "SELECT last_refreshed FROM xem_refresh WHERE indexer = ? AND indexer_id = ?",
        [indexer, indexer_id])
    if rows:
        lastRefresh = int(rows[0][b'last_refreshed'])
        refresh = int(time.mktime(datetime.datetime.today().timetuple())
                      ) > lastRefresh + MAX_REFRESH_AGE_SECS
    else:
        refresh = True

    if refresh or force:
        sickrage.LOGGER.debug(
            'Looking up XEM scene mapping for show %s on %s' %
            (indexer_id, sickrage.INDEXER_API(indexer).name))

        # mark refreshed
        main_db.MainDB().upsert(
            "xem_refresh", {
                'indexer':
                indexer,
                'last_refreshed':
                int(time.mktime(datetime.datetime.today().timetuple()))
            }, {'indexer_id': indexer_id})

        try:
            from scene_exceptions import xem_session

            # XEM MAP URL
            url = "http://thexem.de/map/havemap?origin=%s" % sickrage.INDEXER_API(
                indexer).config[b'xem_origin']
            parsedJSON = getURL(url, session=xem_session, json=True)
            if not parsedJSON or 'result' not in parsedJSON or 'success' not in parsedJSON[
                    b'result'] or 'data' not in parsedJSON or str(
                        indexer_id) not in parsedJSON[b'data']:
                return

            # XEM API URL
            url = "http://thexem.de/map/all?id={}&origin={}&destination=scene".format(
                indexer_id,
                sickrage.INDEXER_API(indexer).config[b'xem_origin'])

            parsedJSON = getURL(url, session=xem_session, json=True)
            if not ((parsedJSON and 'result' in parsedJSON)
                    and 'success' in parsedJSON[b'result']):
                sickrage.LOGGER.info('No XEM data for show "%s on %s"' % (
                    indexer_id,
                    sickrage.INDEXER_API(indexer).name,
                ))
                return

            cl = []
            for entry in parsedJSON[b'data']:
                if 'scene' in entry:
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [
                            entry[b'scene'][b'season'],
                            entry[b'scene'][b'episode'],
                            entry[b'scene'][b'absolute'], indexer_id,
                            entry[sickrage.INDEXER_API(
                                indexer).config[b'xem_origin']][b'season'],
                            entry[sickrage.INDEXER_API(
                                indexer).config[b'xem_origin']][b'episode']
                        ]
                    ])
                if 'scene_2' in entry:  # for doubles
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [
                            entry[b'scene_2'][b'season'],
                            entry[b'scene_2'][b'episode'],
                            entry[b'scene_2'][b'absolute'], indexer_id,
                            entry[sickrage.INDEXER_API(
                                indexer).config[b'xem_origin']][b'season'],
                            entry[sickrage.INDEXER_API(
                                indexer).config[b'xem_origin']][b'episode']
                        ]
                    ])

            if len(cl) > 0:
                main_db.MainDB().mass_action(cl)

        except Exception as e:
            sickrage.LOGGER.warning(
                "Exception while refreshing XEM data for show " +
                str(indexer_id) + " on " + sickrage.INDEXER_API(indexer).name +
                ": {}".format(e))
            sickrage.LOGGER.debug(traceback.format_exc())
示例#20
0
def retrieve_exceptions():
    """
    Looks up the exceptions on github, parses them into a dict, and inserts them into the
    scene_exceptions table in cache.db. Also clears the scene name cache.
    """

    for indexer in sickrage.INDEXER_API().indexers:
        if shouldRefresh(sickrage.INDEXER_API(indexer).name):
            sickrage.LOGGER.info("Checking for scene exception updates for " + sickrage.INDEXER_API(indexer).name + "")

            loc = sickrage.INDEXER_API(indexer).config[b'scene_loc']
            try:
                data = getURL(loc, session=sickrage.INDEXER_API(indexer).session)
            except Exception:
                continue

            if data is None:
                # When data is None, trouble connecting to github, or reading file failed
                sickrage.LOGGER.debug("Check scene exceptions update failed. Unable to update from: " + loc)
                continue

            setLastRefresh(sickrage.INDEXER_API(indexer).name)

            # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
            for cur_line in data.splitlines():
                indexer_id, _, aliases = cur_line.partition(':')  # @UnusedVariable

                if not aliases:
                    continue

                indexer_id = int(indexer_id)

                # regex out the list of shows, taking \' into account
                # alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                alias_list = [{re.sub(r'\\(.)', r'\1', x): -1} for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                exception_dict[indexer_id] = alias_list
                del alias_list

            # cleanup
            del data

    # XEM scene exceptions
    _xem_exceptions_fetcher()
    for xem_ex in xem_exception_dict:
        if xem_ex in exception_dict:
            exception_dict[xem_ex] = exception_dict[xem_ex] + xem_exception_dict[xem_ex]
        else:
            exception_dict[xem_ex] = xem_exception_dict[xem_ex]

    # AniDB scene exceptions
    _anidb_exceptions_fetcher()
    for anidb_ex in anidb_exception_dict:
        if anidb_ex in exception_dict:
            exception_dict[anidb_ex] = exception_dict[anidb_ex] + anidb_exception_dict[anidb_ex]
        else:
            exception_dict[anidb_ex] = anidb_exception_dict[anidb_ex]

    queries = []
    for cur_indexer_id in exception_dict:
        sql_ex = cache_db.CacheDB().select("SELECT * FROM scene_exceptions WHERE indexer_id = ?;", [cur_indexer_id])
        existing_exceptions = [x[b"show_name"] for x in sql_ex]
        if not cur_indexer_id in exception_dict:
            continue

        for cur_exception_dict in exception_dict[cur_indexer_id]:
            for ex in cur_exception_dict.iteritems():
                cur_exception, curSeason = ex
                if cur_exception not in existing_exceptions:
                    queries.append(
                            ["INSERT OR IGNORE INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?);",
                             [cur_indexer_id, cur_exception, curSeason]])
    if queries:
        cache_db.CacheDB().mass_action(queries)
        sickrage.LOGGER.debug("Updated scene exceptions")
    else:
        sickrage.LOGGER.debug("No scene exceptions update needed")

    # cleanup
    exception_dict.clear()
    anidb_exception_dict.clear()
    xem_exception_dict.clear()