Exemple #1
0
    def test_search(self):
        self.url = "http://kickass.unblocked.li"
        searchURL = "{}/usearch/American%20Dad%20S08/".format(self.url)

        data = getURL(searchURL, session=requests.Session())
        if not data:
            return

        with bs4_parser(data) as html:
            torrent_table = html.find("table", attrs={"class": "data"})

        # Continue only if one Release is found
        torrent_rows = torrent_table.find_all("tr") if torrent_table else []
        if len(torrent_rows) < 2:
            print ("The data returned does not contain any torrents")
            return

        for tr in torrent_rows[1:]:
            try:
                link = urlparse.urljoin(self.url, (tr.find("div", {"class": "torrentname"}).find_all("a")[1])["href"])
                id = tr.get("id")[-7:]
                title = (tr.find("div", {"class": "torrentname"}).find_all("a")[1]).text or (
                    tr.find("div", {"class": "torrentname"}).find_all("a")[2]
                ).text
                url = tr.find("a", "imagnet")["href"]
                verified = True if tr.find("a", "iverify") else False
                trusted = True if tr.find("img", {"alt": "verified"}) else False
                seeders = int(tr.find_all("td")[-2].text)
                leechers = int(tr.find_all("td")[-1].text)
            except (AttributeError, TypeError):
                continue

            print title
Exemple #2
0
def _xem_exceptions_fetcher():
    if shouldRefresh('xem'):
        for indexer in sickrage.srCore.INDEXER_API().indexers:
            sickrage.srLogger.info("Checking for XEM scene exception updates for " + sickrage.srCore.INDEXER_API(indexer).name)

            url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickrage.srCore.INDEXER_API(indexer).config[
                'xem_origin']

            parsedJSON = getURL(url, timeout=90, json=True)
            if not parsedJSON:
                sickrage.srLogger.debug("Check scene exceptions update failed for " + sickrage.srCore.INDEXER_API(
                        indexer).name + ", Unable to get URL: " + url)
                continue

            if parsedJSON[b'result'] == 'failure':
                continue

            for indexerid, names in parsedJSON[b'data'].iteritems():
                try:
                    xem_exception_dict[int(indexerid)] = names
                except Exception as e:
                    sickrage.srLogger.warning("XEM: Rejected entry: indexerid:{0}; names:{1}".format(indexerid, names))
                    sickrage.srLogger.debug("XEM: Rejected entry error message:{0}".format(str(e)))

        setLastRefresh('xem')

    for xem_ex in xem_exception_dict:
        if xem_ex in exception_dict:
            exception_dict[xem_ex] = exception_dict[xem_ex] + xem_exception_dict[xem_ex]
        else:
            exception_dict[xem_ex] = xem_exception_dict[xem_ex]

    return xem_exception_dict
Exemple #3
0
    def getURL(self, url, post_data=None, params=None, timeout=30, json=False, needBytes=False):
        """
        By default this is just a simple urlopen call but this method should be overridden
        for providers with special URL requirements (like cookies)
        """

        return getURL(url, post_data=post_data, params=params, headers=self.headers, timeout=timeout,
                      session=self.session, json=json, needBytes=needBytes)
Exemple #4
0
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://sickragetv.github.io/network_timezones/network_timezones.txt'
    url_data = getURL(url)
    if not url_data:
        sickrage.srLogger.warning(
            'Updating network timezones failed, this can happen from time to time. URL: %s'
            % url)
        load_network_dict()
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.strip().rsplit(':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    network_timezones = dict(
        cache_db.CacheDB().select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in d.iteritems():
        existing = network in network_timezones
        if not existing:
            queries.append([
                'INSERT OR IGNORE INTO network_timezones VALUES (?,?);',
                [network, timezone]
            ])
        elif network_timezones[network] is not timezone:
            queries.append([
                'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                [timezone, network]
            ])

        if existing:
            del network_timezones[network]

    if network_timezones:
        purged = [x for x in network_timezones]
        queries.append([
            'DELETE FROM network_timezones WHERE network_name IN (%s);' %
            ','.join(['?'] * len(purged)), purged
        ])

    if len(queries) > 0:
        cache_db.CacheDB().mass_action(queries)
        del queries  # cleanup
Exemple #5
0
    def check_for_new_news(self, force=False):
        """
        Checks GitHub for the latest news.

        returns: unicode, a copy of the news

        force: ignored
        """

        news = ''

        # Grab a copy of the news
        sickrage.srLogger.debug(
            'check_for_new_news: Checking GitHub for latest news.')
        try:
            news = getURL(sickrage.srConfig.NEWS_URL, session=self.session)
        except:
            sickrage.srLogger.warning(
                'check_for_new_news: Could not load news from repo.')

        if news:
            dates = re.finditer(r'^####(\d{4}-\d{2}-\d{2})####$', news, re.M)
            if not list(dates):
                return news or ''

            try:
                last_read = datetime.strptime(sickrage.srConfig.NEWS_LAST_READ,
                                              '%Y-%m-%d')
            except:
                last_read = 0

            sickrage.srConfig.NEWS_UNREAD = 0
            got_latest = False
            for match in dates:
                if not got_latest:
                    got_latest = True
                    sickrage.srConfig.NEWS_LATEST = match.group(1)

                try:
                    if datetime.strptime(match.group(1),
                                         '%Y-%m-%d') > last_read:
                        sickrage.srConfig.NEWS_UNREAD += 1
                except Exception:
                    pass

        return news
Exemple #6
0
def getShowImage(url, imgNum=None):
    if url is None:
        return None

    # if they provided a fanart number try to use it instead
    tempURL = url
    if imgNum:
        tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"

    sickrage.srLogger.debug("Fetching image from " + tempURL)

    image_data = getURL(tempURL, needBytes=True)
    if image_data is None:
        sickrage.srLogger.warning("There was an error trying to retrieve the image, aborting")
        return

    return image_data
Exemple #7
0
def getShowImage(url, imgNum=None):
    if url is None:
        return None

    # if they provided a fanart number try to use it instead
    tempURL = url
    if imgNum:
        tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"

    sickrage.srLogger.debug("Fetching image from " + tempURL)

    image_data = getURL(tempURL, needBytes=True)
    if image_data is None:
        sickrage.srLogger.warning(
            "There was an error trying to retrieve the image, aborting")
        return

    return image_data
Exemple #8
0
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://sickragetv.github.io/network_timezones/network_timezones.txt'
    url_data = getURL(url)
    if not url_data:
        sickrage.srLogger.warning(
            'Updating network timezones failed, this can happen from time to time. URL: %s' % url)
        load_network_dict()
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.strip().rsplit(':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    network_timezones = dict(cache_db.CacheDB().select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in d.iteritems():
        existing = network in network_timezones
        if not existing:
            queries.append(['INSERT OR IGNORE INTO network_timezones VALUES (?,?);', [network, timezone]])
        elif network_timezones[network] is not timezone:
            queries.append(['UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                            [timezone, network]])

        if existing:
            del network_timezones[network]

    if network_timezones:
        purged = [x for x in network_timezones]
        queries.append(
            ['DELETE FROM network_timezones WHERE network_name IN (%s);' % ','.join(['?'] * len(purged)), purged])

    if len(queries) > 0:
        cache_db.CacheDB().mass_action(queries)
        del queries  # cleanup
Exemple #9
0
    def check_for_new_news(self, force=False):
        """
        Checks GitHub for the latest news.

        returns: unicode, a copy of the news

        force: ignored
        """

        news = ''

        # Grab a copy of the news
        sickrage.srLogger.debug('check_for_new_news: Checking GitHub for latest news.')
        try:
            news = getURL(sickrage.srConfig.NEWS_URL, session=self.session)
        except:
            sickrage.srLogger.warning('check_for_new_news: Could not load news from repo.')

        if news:
            dates = re.finditer(r'^####(\d{4}-\d{2}-\d{2})####$', news, re.M)
            if not list(dates):
                return news or ''

            try:
                last_read = datetime.strptime(sickrage.srConfig.NEWS_LAST_READ, '%Y-%m-%d')
            except:
                last_read = 0

            sickrage.srConfig.NEWS_UNREAD = 0
            got_latest = False
            for match in dates:
                if not got_latest:
                    got_latest = True
                    sickrage.srConfig.NEWS_LATEST = match.group(1)

                try:
                    if datetime.strptime(match.group(1), '%Y-%m-%d') > last_read:
                        sickrage.srConfig.NEWS_UNREAD += 1
                except Exception:
                    pass

        return news
Exemple #10
0
def _xem_exceptions_fetcher():
    if shouldRefresh('xem'):
        for indexer in sickrage.srCore.INDEXER_API().indexers:
            sickrage.srLogger.info(
                "Checking for XEM scene exception updates for " +
                sickrage.srCore.INDEXER_API(indexer).name)

            url = "http://thexem.de/map/allNames?origin=%s&seasonNumbers=1" % sickrage.srCore.INDEXER_API(
                indexer).config['xem_origin']

            parsedJSON = getURL(url, timeout=90, json=True)
            if not parsedJSON:
                sickrage.srLogger.debug(
                    "Check scene exceptions update failed for " +
                    sickrage.srCore.INDEXER_API(indexer).name +
                    ", Unable to get URL: " + url)
                continue

            if parsedJSON[b'result'] == 'failure':
                continue

            for indexerid, names in parsedJSON[b'data'].iteritems():
                try:
                    xem_exception_dict[int(indexerid)] = names
                except Exception as e:
                    sickrage.srLogger.warning(
                        "XEM: Rejected entry: indexerid:{0}; names:{1}".format(
                            indexerid, names))
                    sickrage.srLogger.debug(
                        "XEM: Rejected entry error message:{0}".format(str(e)))

        setLastRefresh('xem')

    for xem_ex in xem_exception_dict:
        if xem_ex in exception_dict:
            exception_dict[
                xem_ex] = exception_dict[xem_ex] + xem_exception_dict[xem_ex]
        else:
            exception_dict[xem_ex] = xem_exception_dict[xem_ex]

    return xem_exception_dict
Exemple #11
0
    def test_search(self):
        self.url = 'http://kickass.unblocked.li'
        searchURL = '{}/usearch/American%20Dad%20S08/'.format(self.url)

        data = getURL(searchURL, session=requests.Session())
        if not data:
            return

        with bs4_parser(data) as html:
            torrent_table = html.find('table', attrs={'class': 'data'})

        # Continue only if one Release is found
        torrent_rows = torrent_table.find_all('tr') if torrent_table else []
        if len(torrent_rows) < 2:
            print("The data returned does not contain any torrents")
            return

        for tr in torrent_rows[1:]:
            try:
                link = urlparse.urljoin(self.url,
                                        (tr.find('div', {
                                            'class': 'torrentname'
                                        }).find_all('a')[1])['href'])
                id = tr.get('id')[-7:]
                title = (tr.find('div', {'class': 'torrentname'}).find_all('a')[1]).text \
                        or (tr.find('div', {'class': 'torrentname'}).find_all('a')[2]).text
                url = tr.find('a', 'imagnet')['href']
                verified = True if tr.find('a', 'iverify') else False
                trusted = True if tr.find('img',
                                          {'alt': 'verified'}) else False
                seeders = int(tr.find_all('td')[-2].text)
                leechers = int(tr.find_all('td')[-1].text)
            except (AttributeError, TypeError):
                continue

            print title
Exemple #12
0
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = getURL(self.url, session=self.session, params=self.params,
                      headers={'Referer': 'http://akas.imdb.com/'})
        if not data:
            return None

        with bs4_parser(data) as soup:
            results = soup.find("table", {"class": "results"})
            rows = results.find_all("tr")

        for row in rows:
            show = {}
            image_td = row.find("td", {"class": "image"})

            if image_td:
                image = image_td.find("img")
                show[b'image_url_large'] = self.change_size(image[b'src'], 3)
                show[b'image_path'] = os.path.join('images', 'imdb_popular',
                                                   os.path.basename(show[b'image_url_large']))

                self.cache_image(show[b'image_url_large'])

            td = row.find("td", {"class": "title"})

            if td:
                show[b'name'] = td.find("a").contents[0]
                show[b'imdb_url'] = "http://www.imdb.com" + td.find("a")["href"]
                show[b'imdb_tt'] = show[b'imdb_url'][-10:][0:9]
                show[b'year'] = td.find("span", {"class": "year_type"}).contents[0].split(" ")[0][1:]

                rating_all = td.find("div", {"class": "user_rating"})
                if rating_all:
                    rating_string = rating_all.find("div", {"class": "rating rating-list"})
                    if rating_string:
                        rating_string = rating_string[b'title']

                        match = re.search(r".* (.*)\/10.*\((.*)\).*", rating_string)
                        if match:
                            matches = match.groups()
                            show[b'rating'] = matches[0]
                            show[b'votes'] = matches[1]
                        else:
                            show[b'rating'] = None
                            show[b'votes'] = None
                else:
                    show[b'rating'] = None
                    show[b'votes'] = None

                outline = td.find("span", {"class": "outline"})
                if outline:
                    show[b'outline'] = outline.contents[0]
                else:
                    show[b'outline'] = ''

                popular_shows.append(show)

        return popular_shows
Exemple #13
0
def splitNZBResult(result):
    """
    Split result into seperate episodes

    :param result: search result object
    :return: False upon failure, a list of episode objects otherwise
    """
    urlData = getURL(result.url, session=requests.Session(), needBytes=True)
    if urlData is None:
        sickrage.srLogger.error("Unable to load url " + result.url +
                                ", can't download season NZB")
        return False

    # parse the season ep name
    try:
        np = NameParser(False, showObj=result.show)
        parse_result = np.parse(result.name)
    except InvalidNameException:
        sickrage.srLogger.debug("Unable to parse the filename " + result.name +
                                " into a valid episode")
        return False
    except InvalidShowException:
        sickrage.srLogger.debug("Unable to parse the filename " + result.name +
                                " into a valid show")
        return False

    # bust it up
    season = parse_result.season_number if parse_result.season_number is not None else 1

    separateNZBs, xmlns = getSeasonNZBs(result.name, urlData, season)

    resultList = []

    for newNZB in separateNZBs:

        sickrage.srLogger.debug("Split out " + newNZB + " from " + result.name)

        # parse the name
        try:
            np = NameParser(False, showObj=result.show)
            parse_result = np.parse(newNZB)
        except InvalidNameException:
            sickrage.srLogger.debug("Unable to parse the filename " + newNZB +
                                    " into a valid episode")
            return False
        except InvalidShowException:
            sickrage.srLogger.debug("Unable to parse the filename " + newNZB +
                                    " into a valid show")
            return False

        # make sure the result is sane
        if (parse_result.season_number is not None
                and parse_result.season_number != season) or (
                    parse_result.season_number is None and season != 1):
            sickrage.srLogger.warning(
                "Found " + newNZB + " inside " + result.name +
                " but it doesn't seem to belong to the same season, ignoring it"
            )
            continue
        elif len(parse_result.episode_numbers) == 0:
            sickrage.srLogger.warning(
                "Found " + newNZB + " inside " + result.name +
                " but it doesn't seem to be a valid episode NZB, ignoring it")
            continue

        wantEp = True
        for epNo in parse_result.episode_numbers:
            if not result.extraInfo[0].wantEpisode(season, epNo,
                                                   result.quality):
                sickrage.srLogger.info(
                    "Ignoring result " + newNZB +
                    " because we don't want an episode that is " +
                    Quality.qualityStrings[result.quality])
                wantEp = False
                break
        if not wantEp:
            continue

        # get all the associated episode objects
        epObjList = []
        for curEp in parse_result.episode_numbers:
            epObjList.append(result.extraInfo[0].getEpisode(season, curEp))

        # make a result
        curResult = classes.NZBDataSearchResult(epObjList)
        curResult.name = newNZB
        curResult.provider = result.provider
        curResult.quality = result.quality
        curResult.extraInfo = [createNZBString(separateNZBs[newNZB], xmlns)]

        resultList.append(curResult)

    return resultList
Exemple #14
0
 def _check_for_new_version(self):
     git_version_url = "https://raw.githubusercontent.com/{}/{}/master/sickrage/version.txt".format(
         sickrage.srConfig.GIT_ORG, sickrage.srConfig.GIT_REPO)
     git_version = getURL(git_version_url) or self._find_installed_version()
     return git_version
Exemple #15
0
    def fetch_popular_shows(self):
        """Get popular show information from IMDB"""

        popular_shows = []

        data = getURL(self.url,
                      session=self.session,
                      params=self.params,
                      headers={'Referer': 'http://akas.imdb.com/'})
        if not data:
            return None

        with bs4_parser(data) as soup:
            results = soup.find("table", {"class": "results"})
            rows = results.find_all("tr")

        for row in rows:
            show = {}
            image_td = row.find("td", {"class": "image"})

            if image_td:
                image = image_td.find("img")
                show[b'image_url_large'] = self.change_size(image[b'src'], 3)
                show[b'image_path'] = os.path.join(
                    'images', 'imdb_popular',
                    os.path.basename(show[b'image_url_large']))

                self.cache_image(show[b'image_url_large'])

            td = row.find("td", {"class": "title"})

            if td:
                show[b'name'] = td.find("a").contents[0]
                show[b'imdb_url'] = "http://www.imdb.com" + td.find(
                    "a")["href"]
                show[b'imdb_tt'] = show[b'imdb_url'][-10:][0:9]
                show[b'year'] = td.find("span", {
                    "class": "year_type"
                }).contents[0].split(" ")[0][1:]

                rating_all = td.find("div", {"class": "user_rating"})
                if rating_all:
                    rating_string = rating_all.find(
                        "div", {"class": "rating rating-list"})
                    if rating_string:
                        rating_string = rating_string[b'title']

                        match = re.search(r".* (.*)\/10.*\((.*)\).*",
                                          rating_string)
                        if match:
                            matches = match.groups()
                            show[b'rating'] = matches[0]
                            show[b'votes'] = matches[1]
                        else:
                            show[b'rating'] = None
                            show[b'votes'] = None
                else:
                    show[b'rating'] = None
                    show[b'votes'] = None

                outline = td.find("span", {"class": "outline"})
                if outline:
                    show[b'outline'] = outline.contents[0]
                else:
                    show[b'outline'] = ''

                popular_shows.append(show)

        return popular_shows
Exemple #16
0
def splitNZBResult(result):
    """
    Split result into seperate episodes

    :param result: search result object
    :return: False upon failure, a list of episode objects otherwise
    """
    urlData = getURL(result.url, session=requests.Session(), needBytes=True)
    if urlData is None:
        sickrage.srLogger.error("Unable to load url " + result.url + ", can't download season NZB")
        return False

    # parse the season ep name
    try:
        np = NameParser(False, showObj=result.show)
        parse_result = np.parse(result.name)
    except InvalidNameException:
        sickrage.srLogger.debug("Unable to parse the filename " + result.name + " into a valid episode")
        return False
    except InvalidShowException:
        sickrage.srLogger.debug("Unable to parse the filename " + result.name + " into a valid show")
        return False

    # bust it up
    season = parse_result.season_number if parse_result.season_number is not None else 1

    separateNZBs, xmlns = getSeasonNZBs(result.name, urlData, season)

    resultList = []

    for newNZB in separateNZBs:

        sickrage.srLogger.debug("Split out " + newNZB + " from " + result.name)

        # parse the name
        try:
            np = NameParser(False, showObj=result.show)
            parse_result = np.parse(newNZB)
        except InvalidNameException:
            sickrage.srLogger.debug("Unable to parse the filename " + newNZB + " into a valid episode")
            return False
        except InvalidShowException:
            sickrage.srLogger.debug("Unable to parse the filename " + newNZB + " into a valid show")
            return False

        # make sure the result is sane
        if (parse_result.season_number is not None and parse_result.season_number != season) or (
                        parse_result.season_number is None and season != 1):
            sickrage.srLogger.warning(
                    "Found " + newNZB + " inside " + result.name + " but it doesn't seem to belong to the same season, ignoring it")
            continue
        elif len(parse_result.episode_numbers) == 0:
            sickrage.srLogger.warning(
                    "Found " + newNZB + " inside " + result.name + " but it doesn't seem to be a valid episode NZB, ignoring it")
            continue

        wantEp = True
        for epNo in parse_result.episode_numbers:
            if not result.extraInfo[0].wantEpisode(season, epNo, result.quality):
                sickrage.srLogger.info("Ignoring result " + newNZB + " because we don't want an episode that is " +
                                        Quality.qualityStrings[result.quality])
                wantEp = False
                break
        if not wantEp:
            continue

        # get all the associated episode objects
        epObjList = []
        for curEp in parse_result.episode_numbers:
            epObjList.append(result.extraInfo[0].getEpisode(season, curEp))

        # make a result
        curResult = classes.NZBDataSearchResult(epObjList)
        curResult.name = newNZB
        curResult.provider = result.provider
        curResult.quality = result.quality
        curResult.extraInfo = [createNZBString(separateNZBs[newNZB], xmlns)]

        resultList.append(curResult)

    return resultList
Exemple #17
0
 def _check_for_new_version(self):
     git_version_url = "https://raw.githubusercontent.com/{}/{}/master/sickrage/version.txt".format(
         sickrage.srConfig.GIT_ORG, sickrage.srConfig.GIT_REPO)
     git_version = getURL(git_version_url) or self._find_installed_version()
     return git_version
Exemple #18
0
def retrieve_exceptions(get_xem=True, get_anidb=True):
    """
    Looks up the exceptions on github, parses them into a dict, and inserts them into the
    scene_exceptions table in cache.db. Also clears the scene name cache.
    """

    for indexer in sickrage.srCore.INDEXER_API().indexers:
        indexer_name = sickrage.srCore.INDEXER_API(indexer).name
        if shouldRefresh(indexer_name):
            sickrage.srLogger.info(
                "Checking for SiCKRAGE scene exception updates for {}".format(
                    indexer_name))
            loc = sickrage.srCore.INDEXER_API(indexer).config[b'scene_loc']

            try:
                # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
                cur_line = None
                for cur_line in getURL(loc).splitlines():
                    indexer_id, _, aliases = cur_line.partition(
                        ':')  # @UnusedVariable
                    if not aliases:
                        continue

                    # regex out the list of shows, taking \' into account
                    exception_dict[int(indexer_id)] = [{
                        re.sub(r'\\(.)', r'\1', x):
                        -1
                    } for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                if cur_line is None:
                    sickrage.srLogger.debug(
                        "Check scene exceptions update failed. Unable to update from: {}"
                        .format(loc))
                    continue

                # refreshed successfully
                setLastRefresh(indexer_name)
            except Exception:
                continue

    # XEM scene exceptions
    if get_xem:
        _xem_exceptions_fetcher()

    # AniDB scene exceptions
    if get_anidb:
        _anidb_exceptions_fetcher()

    sql_l = []
    for cur_indexer_id in exception_dict:
        sql_ex = cache_db.CacheDB().select(
            "SELECT * FROM scene_exceptions WHERE indexer_id = ?;",
            [cur_indexer_id])
        existing_exceptions = [x[b"show_name"] for x in sql_ex]
        if not cur_indexer_id in exception_dict:
            continue

        for cur_exception_dict in exception_dict[cur_indexer_id]:
            for ex in cur_exception_dict.iteritems():
                cur_exception, curSeason = ex
                if cur_exception not in existing_exceptions:
                    sql_l.append([
                        "INSERT OR IGNORE INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?);",
                        [cur_indexer_id, cur_exception, curSeason]
                    ])
    if len(sql_l) > 0:
        cache_db.CacheDB().mass_action(sql_l)
        sickrage.srLogger.debug("Updated scene exceptions")
        del sql_l  # cleanup
    else:
        sickrage.srLogger.debug("No scene exceptions update needed")

    # cleanup
    exception_dict.clear()
    anidb_exception_dict.clear()
    xem_exception_dict.clear()
Exemple #19
0
def xem_refresh(indexer_id, indexer, force=False):
    """
    Refresh data from xem for a tv show

    :param indexer_id: int
    """
    if not indexer_id or indexer_id < 1:
        return

    indexer_id = int(indexer_id)
    indexer = int(indexer)
    xem_session = _setUpSession()

    MAX_REFRESH_AGE_SECS = 86400  # 1 day

    rows = main_db.MainDB().select(
        "SELECT last_refreshed FROM xem_refresh WHERE indexer = ? AND indexer_id = ?",
        [indexer, indexer_id])
    if rows:
        lastRefresh = int(rows[0][b'last_refreshed'])
        refresh = int(time.mktime(
            datetime.today().timetuple())) > lastRefresh + MAX_REFRESH_AGE_SECS
    else:
        refresh = True

    if refresh or force:
        sickrage.srLogger.debug(
            'Looking up XEM scene mapping for show %s on %s' %
            (indexer_id, sickrage.srCore.INDEXER_API(indexer).name))

        # mark refreshed
        main_db.MainDB().upsert(
            "xem_refresh", {
                'indexer': indexer,
                'last_refreshed': int(time.mktime(
                    datetime.today().timetuple()))
            }, {'indexer_id': indexer_id})

        try:
            # XEM MAP URL
            url = "http://thexem.de/map/havemap?origin=%s" % sickrage.srCore.INDEXER_API(
                indexer).config[b'xem_origin']
            parsedJSON = getURL(url, session=xem_session, json=True)
            if not parsedJSON or 'result' not in parsedJSON or 'success' not in parsedJSON[b'result'] \
                    or 'data' not in parsedJSON or str(indexer_id) not in parsedJSON[b'data']:
                return

            # XEM API URL
            url = "http://thexem.de/map/all?id={}&origin={}&destination=scene".format(
                indexer_id,
                sickrage.srCore.INDEXER_API(indexer).config[b'xem_origin'])

            parsedJSON = getURL(url, session=xem_session, json=True)
            if not ((parsedJSON and 'result' in parsedJSON)
                    and 'success' in parsedJSON[b'result']):
                sickrage.srLogger.info('No XEM data for show "%s on %s"' % (
                    indexer_id,
                    sickrage.srCore.INDEXER_API(indexer).name,
                ))
                return

            cl = []
            for entry in parsedJSON[b'data']:
                if 'scene' in entry:
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [
                            entry[b'scene'][b'season'],
                            entry[b'scene'][b'episode'],
                            entry[b'scene'][b'absolute'], indexer_id,
                            entry[sickrage.srCore.INDEXER_API(
                                indexer).config[b'xem_origin']][b'season'],
                            entry[sickrage.srCore.INDEXER_API(
                                indexer).config[b'xem_origin']][b'episode']
                        ]
                    ])
                if 'scene_2' in entry:  # for doubles
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [
                            entry[b'scene_2'][b'season'],
                            entry[b'scene_2'][b'episode'],
                            entry[b'scene_2'][b'absolute'], indexer_id,
                            entry[sickrage.srCore.INDEXER_API(
                                indexer).config[b'xem_origin']][b'season'],
                            entry[sickrage.srCore.INDEXER_API(
                                indexer).config[b'xem_origin']][b'episode']
                        ]
                    ])

            if len(cl) > 0:
                main_db.MainDB().mass_action(cl)
                del cl  # cleanup

        except Exception as e:
            sickrage.srLogger.warning(
                "Exception while refreshing XEM data for show " +
                str(indexer_id) + " on " +
                sickrage.srCore.INDEXER_API(indexer).name +
                ": {}".format(e.message))
            sickrage.srLogger.debug(traceback.format_exc())
Exemple #20
0
def retrieve_exceptions(get_xem=True, get_anidb=True):
    """
    Looks up the exceptions on github, parses them into a dict, and inserts them into the
    scene_exceptions table in cache.db. Also clears the scene name cache.
    """

    for indexer in sickrage.srCore.INDEXER_API().indexers:
        indexer_name = sickrage.srCore.INDEXER_API(indexer).name
        if shouldRefresh(indexer_name):
            sickrage.srLogger.info("Checking for SiCKRAGE scene exception updates for {}".format(indexer_name))
            loc = sickrage.srCore.INDEXER_API(indexer).config[b'scene_loc']

            try:
                # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
                cur_line = None
                for cur_line in getURL(loc).splitlines():
                    indexer_id, _, aliases = cur_line.partition(':')  # @UnusedVariable
                    if not aliases:
                        continue

                    # regex out the list of shows, taking \' into account
                    exception_dict[int(indexer_id)] = [{re.sub(r'\\(.)', r'\1', x): -1} for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                if cur_line is None:
                    sickrage.srLogger.debug("Check scene exceptions update failed. Unable to update from: {}".format(loc))
                    continue

                # refreshed successfully
                setLastRefresh(indexer_name)
            except Exception:
                continue

    # XEM scene exceptions
    if get_xem:
        _xem_exceptions_fetcher()

    # AniDB scene exceptions
    if get_anidb:
        _anidb_exceptions_fetcher()

    sql_l = []
    for cur_indexer_id in exception_dict:
        sql_ex = cache_db.CacheDB().select("SELECT * FROM scene_exceptions WHERE indexer_id = ?;", [cur_indexer_id])
        existing_exceptions = [x[b"show_name"] for x in sql_ex]
        if not cur_indexer_id in exception_dict:
            continue

        for cur_exception_dict in exception_dict[cur_indexer_id]:
            for ex in cur_exception_dict.iteritems():
                cur_exception, curSeason = ex
                if cur_exception not in existing_exceptions:
                    sql_l.append(
                            ["INSERT OR IGNORE INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?);",
                             [cur_indexer_id, cur_exception, curSeason]])
    if len(sql_l) > 0:
        cache_db.CacheDB().mass_action(sql_l)
        sickrage.srLogger.debug("Updated scene exceptions")
    else:
        sickrage.srLogger.debug("No scene exceptions update needed")

    # cleanup
    exception_dict.clear()
    anidb_exception_dict.clear()
    xem_exception_dict.clear()
Exemple #21
0
def xem_refresh(indexer_id, indexer, force=False):
    """
    Refresh data from xem for a tv show

    :param indexer_id: int
    """
    if not indexer_id or indexer_id < 1:
        return

    indexer_id = int(indexer_id)
    indexer = int(indexer)
    xem_session = _setUpSession()

    MAX_REFRESH_AGE_SECS = 86400  # 1 day

    rows = main_db.MainDB().select("SELECT last_refreshed FROM xem_refresh WHERE indexer = ? AND indexer_id = ?",
                                   [indexer, indexer_id])
    if rows:
        lastRefresh = int(rows[0][b'last_refreshed'])
        refresh = int(time.mktime(datetime.today().timetuple())) > lastRefresh + MAX_REFRESH_AGE_SECS
    else:
        refresh = True

    if refresh or force:
        sickrage.srLogger.debug(
            'Looking up XEM scene mapping for show %s on %s' % (indexer_id, sickrage.srCore.INDEXER_API(indexer).name))

        # mark refreshed
        main_db.MainDB().upsert("xem_refresh",
                                {'indexer': indexer,
                                 'last_refreshed': int(time.mktime(datetime.today().timetuple()))},
                                {'indexer_id': indexer_id})

        try:
            # XEM MAP URL
            url = "http://thexem.de/map/havemap?origin=%s" % sickrage.srCore.INDEXER_API(indexer).config[b'xem_origin']
            parsedJSON = getURL(url, session=xem_session, json=True)
            if not parsedJSON or 'result' not in parsedJSON or 'success' not in parsedJSON[b'result'] \
                    or 'data' not in parsedJSON or str(indexer_id) not in parsedJSON[b'data']:
                return

            # XEM API URL
            url = "http://thexem.de/map/all?id={}&origin={}&destination=scene".format(
                indexer_id, sickrage.srCore.INDEXER_API(indexer).config[b'xem_origin'])

            parsedJSON = getURL(url, session=xem_session, json=True)
            if not ((parsedJSON and 'result' in parsedJSON) and 'success' in parsedJSON[b'result']):
                sickrage.srLogger.info(
                    'No XEM data for show "%s on %s"' % (indexer_id, sickrage.srCore.INDEXER_API(indexer).name,))
                return

            cl = []
            for entry in parsedJSON[b'data']:
                if 'scene' in entry:
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [entry[b'scene'][b'season'],
                         entry[b'scene'][b'episode'],
                         entry[b'scene'][b'absolute'],
                         indexer_id,
                         entry[sickrage.srCore.INDEXER_API(indexer).config[b'xem_origin']][b'season'],
                         entry[sickrage.srCore.INDEXER_API(indexer).config[b'xem_origin']][b'episode']
                         ]])
                if 'scene_2' in entry:  # for doubles
                    cl.append([
                        "UPDATE tv_episodes SET scene_season = ?, scene_episode = ?, scene_absolute_number = ? WHERE showid = ? AND season = ? AND episode = ?",
                        [entry[b'scene_2'][b'season'],
                         entry[b'scene_2'][b'episode'],
                         entry[b'scene_2'][b'absolute'],
                         indexer_id,
                         entry[sickrage.srCore.INDEXER_API(indexer).config[b'xem_origin']][b'season'],
                         entry[sickrage.srCore.INDEXER_API(indexer).config[b'xem_origin']][b'episode']
                         ]])

            if len(cl) > 0:
                main_db.MainDB().mass_action(cl)

        except Exception as e:
            sickrage.srLogger.warning(
                "Exception while refreshing XEM data for show " + str(
                    indexer_id) + " on " + sickrage.srCore.INDEXER_API(
                    indexer).name + ": {}".format(e.message))
            sickrage.srLogger.debug(traceback.format_exc())