コード例 #1
0
ファイル: tz_updater.py プロジェクト: ipmcc/SiCKRAGE
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time
    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network, load_network_dict())

    hr = 0
    m = 0

    if parsed_time:
        hr = tryInt(parsed_time.group('hour'))
        m = tryInt(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.fromordinal(max(tryInt(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
コード例 #2
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time
    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network, load_network_dict())

    hr = 0
    m = 0

    if parsed_time:
        hr = tryInt(parsed_time.group('hour'))
        m = tryInt(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.fromordinal(max(tryInt(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
コード例 #3
0
ファイル: helpers.py プロジェクト: tioxxx/SiCKRAGE-1
def getFileMetadata(filename):
    import enzyme

    try:
        p = enzyme.parse(filename)

        # Video codec
        vc = ('H264' if p.video[0].codec == 'AVC1' else
              'x265' if p.video[0].codec == 'HEVC' else p.video[0].codec)

        # Audio codec
        ac = p.audio[0].codec
        try:
            ac = audio_codec_map.get(p.audio[0].codec)
        except:
            pass

        # Find title in video headers
        titles = []

        try:
            if p.title:
                titles.append(p.title)
        except:
            sickrage.srLogger.error('Failed getting title from meta: %s',
                                    traceback.format_exc())

        for video in p.video:
            try:
                if video.title:
                    titles.append(video.title)
            except:
                sickrage.srLogger.error('Failed getting title from meta: %s',
                                        traceback.format_exc())

        return {
            'titles': list(set(titles)),
            'video': vc,
            'audio': ac,
            'resolution_width': tryInt(p.video[0].width),
            'resolution_height': tryInt(p.video[0].height),
            'audio_channels': p.audio[0].channels,
        }
    except enzyme.exceptions.ParseError:
        sickrage.srLogger.debug('Failed to parse meta for %s', filename)
    except enzyme.exceptions.NoParserError:
        sickrage.srLogger.debug('No parser found for %s', filename)
    except:
        sickrage.srLogger.debug('Failed parsing %s', filename)

    return {}
コード例 #4
0
ファイル: helpers.py プロジェクト: Aeronaut/SiCKRAGE
def getFileMetadata(filename):
    import enzyme

    try:
        p = enzyme.parse(filename)

        # Video codec
        vc = ('H264' if p.video[0].codec == 'AVC1' else 'x265' if p.video[0].codec == 'HEVC' else p.video[0].codec)

        # Audio codec
        ac = p.audio[0].codec
        try: ac = audio_codec_map.get(p.audio[0].codec)
        except: pass

        # Find title in video headers
        titles = []

        try:
            if p.title:
                titles.append(p.title)
        except:
            sickrage.srLogger.error('Failed getting title from meta: %s', traceback.format_exc())

        for video in p.video:
            try:
                if video.title:
                    titles.append(video.title)
            except:
                sickrage.srLogger.error('Failed getting title from meta: %s', traceback.format_exc())

        return {
            'titles': list(set(titles)),
            'video': vc,
            'audio': ac,
            'resolution_width': tryInt(p.video[0].width),
            'resolution_height': tryInt(p.video[0].height),
            'audio_channels': p.audio[0].channels,
        }
    except enzyme.exceptions.ParseError:
        sickrage.srLogger.debug('Failed to parse meta for %s', filename)
    except enzyme.exceptions.NoParserError:
        sickrage.srLogger.debug('No parser found for %s', filename)
    except:
        sickrage.srLogger.debug('Failed parsing %s', filename)

    return {}
コード例 #5
0
ファイル: torrentproject.py プロジェクト: tioxxx/SiCKRAGE-1
    def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():  # Mode = RSS, Season, Episode
            sickrage.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:
                if mode is not 'RSS':
                    sickrage.srLogger.debug("Search string: %s " % search_string)

                searchURL = self.urls['api'] + "?s=%s&out=json&filter=2101&num=150" % quote_plus(
                        search_string.encode('utf-8'))

                sickrage.srLogger.debug("Search URL: %s" % searchURL)
                torrents = self.getURL(searchURL, json=True)
                if not (torrents and "total_found" in torrents and int(torrents[b"total_found"]) > 0):
                    sickrage.srLogger.debug("Data returned from provider does not contain any torrents")
                    continue

                del torrents[b"total_found"]

                results = []
                for i in torrents:
                    title = torrents[i][b"title"]
                    seeders = tryInt(torrents[i][b"seeds"], 1)
                    leechers = tryInt(torrents[i][b"leechs"], 0)
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode is not 'RSS':
                            sickrage.srLogger.debug("Torrent doesn't meet minimum seeds & leechers not selecting : %s" % title)
                        continue

                    t_hash = torrents[i][b"torrent_hash"]
                    size = int(torrents[i][b"torrent_size"])

                    try:
                        assert seeders < 10
                        assert mode is not 'RSS'
                        sickrage.srLogger.debug("Torrent has less than 10 seeds getting dyn trackers: " + title)
                        trackerUrl = self.urls['api'] + "" + t_hash + "/trackers_json"
                        jdata = self.getURL(trackerUrl, json=True)
                        assert jdata is not "maintenance"
                        download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "".join(
                                ["&tr=" + s for s in jdata])
                    except (Exception, AssertionError):
                        download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + title + "&tr=udp://tracker.openbittorrent.com:80&tr=udp://tracker.coppersurfer.tk:6969&tr=udp://open.demonii.com:1337&tr=udp://tracker.leechers-paradise.org:6969&tr=udp://exodus.desync.com:6969"

                    if not all([title, download_url]):
                        continue

                    item = title, download_url, size, seeders, leechers

                    if mode is not 'RSS':
                        sickrage.srLogger.debug("Found result: %s" % title)

                    items[mode].append(item)

            # For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
コード例 #6
0
ファイル: freshontv.py プロジェクト: tioxxx/SiCKRAGE-1
    def _doSearch(self,
                  search_params,
                  search_mode='eponly',
                  epcount=0,
                  age=0,
                  epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        freeleech = '3' if self.freeleech else '0'

        if not self._doLogin():
            return results

        for mode in search_params.keys():
            sickrage.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_params[mode]:

                if mode is not 'RSS':
                    sickrage.srLogger.debug("Search string: %s " %
                                            search_string)

                searchURL = self.urls['search'] % (freeleech, search_string)
                sickrage.srLogger.debug("Search URL: %s" % searchURL)
                data = self.getURL(searchURL)
                max_page_number = 0

                if not data:
                    sickrage.srLogger.debug("No data returned from provider")
                    continue

                try:
                    with bs4_parser(data) as html:

                        # Check to see if there is more than 1 page of results
                        pager = html.find('div', {'class': 'pager'})
                        if pager:
                            page_links = pager.find_all('a', href=True)
                        else:
                            page_links = []

                        if len(page_links) > 0:
                            for lnk in page_links:
                                link_text = lnk.text.strip()
                                if link_text.isdigit():
                                    page_int = int(link_text)
                                    if page_int > max_page_number:
                                        max_page_number = page_int

                        # limit page number to 15 just in case something goes wrong
                        if max_page_number > 15:
                            max_page_number = 15
                        # limit RSS search
                        if max_page_number > 3 and mode is 'RSS':
                            max_page_number = 3
                except Exception:
                    sickrage.srLogger.error(
                        "Failed parsing provider. Traceback: %s" %
                        traceback.format_exc())
                    continue

                data_response_list = [data]

                # Freshon starts counting pages from zero, even though it displays numbers from 1
                if max_page_number > 1:
                    for i in range(1, max_page_number):

                        time.sleep(1)
                        page_searchURL = searchURL + '&page=' + str(i)
                        # '.log(u"Search string: " + page_searchURL, LOGGER.DEBUG)
                        page_html = self.getURL(page_searchURL)

                        if not page_html:
                            continue

                        data_response_list.append(page_html)

                try:

                    for data in data_response_list:

                        with bs4_parser(data) as html:

                            torrent_rows = html.findAll(
                                "tr", {"class": re.compile('torrent_[0-9]*')})

                            # Continue only if a Release is found
                            if len(torrent_rows) == 0:
                                sickrage.srLogger.debug(
                                    "Data returned from provider does not contain any torrents"
                                )
                                continue

                            for individual_torrent in torrent_rows:

                                # skip if torrent has been nuked due to poor quality
                                if individual_torrent.find(
                                        'img', alt='Nuked') is not None:
                                    continue

                                try:
                                    title = individual_torrent.find(
                                        'a', {'class': 'torrent_name_link'
                                              })['title']
                                except Exception:
                                    sickrage.srLogger.warning(
                                        "Unable to parse torrent title. Traceback: %s "
                                        % traceback.format_exc())
                                    continue

                                try:
                                    details_url = individual_torrent.find(
                                        'a',
                                        {'class': 'torrent_name_link'})['href']
                                    torrent_id = int((re.match(
                                        '.*?([0-9]+)$',
                                        details_url).group(1)).strip())
                                    download_url = self.urls['download'] % (
                                        str(torrent_id))
                                    seeders = tryInt(
                                        individual_torrent.find(
                                            'td', {
                                                'class': 'table_seeders'
                                            }).find('span').text.strip(), 1)
                                    leechers = tryInt(
                                        individual_torrent.find(
                                            'td', {
                                                'class': 'table_leechers'
                                            }).find('a').text.strip(), 0)
                                    # FIXME
                                    size = -1
                                except Exception:
                                    continue

                                if not all([title, download_url]):
                                    continue

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode is not 'RSS':
                                        sickrage.srLogger.debug(
                                            "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})"
                                            .format(title, seeders, leechers))
                                    continue

                                item = title, download_url, size, seeders, leechers
                                if mode is not 'RSS':
                                    sickrage.srLogger.debug(
                                        "Found result: %s " % title)

                                items[mode].append(item)

                except Exception:
                    sickrage.srLogger.error(
                        "Failed parsing provider. Traceback: %s" %
                        traceback.format_exc())

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
コード例 #7
0
ファイル: nzbget_client.py プロジェクト: Aeronaut/SiCKRAGE
    def sendNZB(nzb, proper=False):
        """
        Sends NZB to NZBGet client

        :param nzb: nzb object
        :param proper: True if this is a Proper download, False if not. Defaults to False
        """
        addToTop = False
        nzbgetprio = 0
        category = sickrage.srConfig.NZBGET_CATEGORY
        if nzb.show.is_anime:
            category = sickrage.srConfig.NZBGET_CATEGORY_ANIME

        if sickrage.srConfig.NZBGET_USE_HTTPS:
            nzbgetXMLrpc = "https://%(username)s:%(password)s@%(host)s/xmlrpc"
        else:
            nzbgetXMLrpc = "http://%(username)s:%(password)s@%(host)s/xmlrpc"

        if sickrage.srConfig.NZBGET_HOST is None:
            sickrage.srLogger.error("No NZBget host found in configuration. Please configure it.")
            return False

        url = nzbgetXMLrpc % {"host": sickrage.srConfig.NZBGET_HOST, "username": sickrage.srConfig.NZBGET_USERNAME,
                              "password": sickrage.srConfig.NZBGET_PASSWORD}

        nzbGetRPC = xmlrpclib.ServerProxy(url)
        try:
            if nzbGetRPC.writelog("INFO", "SiCKRAGE connected to drop of %s any moment now." % (nzb.name + ".nzb")):
                sickrage.srLogger.debug("Successful connected to NZBget")
            else:
                sickrage.srLogger.error("Successful connected to NZBget, but unable to send a message")

        except httplib.socket.error:
            sickrage.srLogger.error(
                    "Please check your NZBget host and port (if it is running). NZBget is not responding to this combination")
            return False

        except xmlrpclib.ProtocolError as e:
            if e.errmsg == "Unauthorized":
                sickrage.srLogger.error("NZBget username or password is incorrect.")
            else:
                sickrage.srLogger.error("Protocol Error: " + e.errmsg)
            return False

        dupekey = ""
        dupescore = 0
        # if it aired recently make it high priority and generate DupeKey/Score
        for curEp in nzb.episodes:
            if dupekey == "":
                if curEp.show.indexer == 1:
                    dupekey = "SiCKRAGE-" + str(curEp.show.indexerid)
                elif curEp.show.indexer == 2:
                    dupekey = "SiCKRAGE-tvr" + str(curEp.show.indexerid)
            dupekey += "-" + str(curEp.season) + "." + str(curEp.episode)
            if date.today() - curEp.airdate <= timedelta(days=7):
                addToTop = True
                nzbgetprio = sickrage.srConfig.NZBGET_PRIORITY
            else:
                category = sickrage.srConfig.NZBGET_CATEGORY_BACKLOG
                if nzb.show.is_anime:
                    category = sickrage.srConfig.NZBGET_CATEGORY_ANIME_BACKLOG

        if nzb.quality != Quality.UNKNOWN:
            dupescore = nzb.quality * 100
        if proper:
            dupescore += 10

        nzbcontent64 = None
        if nzb.resultType == "nzbdata":
            data = nzb.extraInfo[0]
            nzbcontent64 = standard_b64encode(data)

        sickrage.srLogger.info("Sending NZB to NZBget")
        sickrage.srLogger.debug("URL: " + url)

        try:
            # Find out if nzbget supports priority (Version 9.0+), old versions beginning with a 0.x will use the old command
            nzbget_version_str = nzbGetRPC.version()
            nzbget_version = tryInt(nzbget_version_str[:nzbget_version_str.find(".")])
            if nzbget_version == 0:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, addToTop, nzbcontent64)
                else:
                    if nzb.resultType == "nzb":
                        genProvider = GenericProvider("")
                        data = genProvider.getURL(nzb.url)
                        if data is None:
                            return False
                        nzbcontent64 = standard_b64encode(data)
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, addToTop, nzbcontent64)
            elif nzbget_version == 12:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, nzbgetprio, False,
                                                     nzbcontent64, False, dupekey, dupescore, "score")
                else:
                    nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb", category, nzbgetprio, False,
                                                        nzb.url, False, dupekey, dupescore, "score")
            # v13+ has a new combined append method that accepts both (url and content)
            # also the return value has changed from boolean to integer
            # (Positive number representing NZBID of the queue item. 0 and negative numbers represent error codes.)
            elif nzbget_version >= 13:
                nzbget_result = True if nzbGetRPC.append(nzb.name + ".nzb",
                                                         nzbcontent64 if nzbcontent64 is not None else nzb.url,
                                                         category, nzbgetprio, False, False, dupekey, dupescore,
                                                         "score") > 0 else False
            else:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb", category, nzbgetprio, False,
                                                     nzbcontent64)
                else:
                    nzbget_result = nzbGetRPC.appendurl(nzb.name + ".nzb", category, nzbgetprio, False,
                                                        nzb.url)

            if nzbget_result:
                sickrage.srLogger.debug("NZB sent to NZBget successfully")
                return True
            else:
                sickrage.srLogger.error("NZBget could not add %s to the queue" % (nzb.name + ".nzb"))
                return False
        except Exception:
            sickrage.srLogger.error("Connect Error to NZBget: could not add %s to the queue" % (nzb.name + ".nzb"))
            return False
コード例 #8
0
ファイル: freshontv.py プロジェクト: Aeronaut/SiCKRAGE
    def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        freeleech = '3' if self.freeleech else '0'

        if not self._doLogin():
            return results

        for mode in search_params.keys():
            sickrage.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_params[mode]:

                if mode is not 'RSS':
                    sickrage.srLogger.debug("Search string: %s " % search_string)

                searchURL = self.urls['search'] % (freeleech, search_string)
                sickrage.srLogger.debug("Search URL: %s" % searchURL)
                data = self.getURL(searchURL)
                max_page_number = 0

                if not data:
                    sickrage.srLogger.debug("No data returned from provider")
                    continue

                try:
                    with bs4_parser(data) as html:

                        # Check to see if there is more than 1 page of results
                        pager = html.find('div', {'class': 'pager'})
                        if pager:
                            page_links = pager.find_all('a', href=True)
                        else:
                            page_links = []

                        if len(page_links) > 0:
                            for lnk in page_links:
                                link_text = lnk.text.strip()
                                if link_text.isdigit():
                                    page_int = int(link_text)
                                    if page_int > max_page_number:
                                        max_page_number = page_int

                        # limit page number to 15 just in case something goes wrong
                        if max_page_number > 15:
                            max_page_number = 15
                        # limit RSS search
                        if max_page_number > 3 and mode is 'RSS':
                            max_page_number = 3
                except Exception:
                    sickrage.srLogger.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())
                    continue

                data_response_list = [data]

                # Freshon starts counting pages from zero, even though it displays numbers from 1
                if max_page_number > 1:
                    for i in range(1, max_page_number):

                        time.sleep(1)
                        page_searchURL = searchURL + '&page=' + str(i)
                        # '.log(u"Search string: " + page_searchURL, LOGGER.DEBUG)
                        page_html = self.getURL(page_searchURL)

                        if not page_html:
                            continue

                        data_response_list.append(page_html)

                try:

                    for data in data_response_list:

                        with bs4_parser(data) as html:

                            torrent_rows = html.findAll("tr", {"class": re.compile('torrent_[0-9]*')})

                            # Continue only if a Release is found
                            if len(torrent_rows) == 0:
                                sickrage.srLogger.debug("Data returned from provider does not contain any torrents")
                                continue

                            for individual_torrent in torrent_rows:

                                # skip if torrent has been nuked due to poor quality
                                if individual_torrent.find('img', alt='Nuked') is not None:
                                    continue

                                try:
                                    title = individual_torrent.find('a', {'class': 'torrent_name_link'})['title']
                                except Exception:
                                    sickrage.srLogger.warning(
                                            "Unable to parse torrent title. Traceback: %s " % traceback.format_exc())
                                    continue

                                try:
                                    details_url = individual_torrent.find('a', {'class': 'torrent_name_link'})['href']
                                    torrent_id = int((re.match('.*?([0-9]+)$', details_url).group(1)).strip())
                                    download_url = self.urls['download'] % (str(torrent_id))
                                    seeders = tryInt(individual_torrent.find('td', {'class': 'table_seeders'}).find(
                                            'span').text.strip(), 1)
                                    leechers = tryInt(individual_torrent.find('td', {'class': 'table_leechers'}).find(
                                            'a').text.strip(), 0)
                                    # FIXME
                                    size = -1
                                except Exception:
                                    continue

                                if not all([title, download_url]):
                                    continue

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode is not 'RSS':
                                        sickrage.srLogger.debug(
                                                "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                                        title, seeders, leechers))
                                    continue

                                item = title, download_url, size, seeders, leechers
                                if mode is not 'RSS':
                                    sickrage.srLogger.debug("Found result: %s " % title)

                                items[mode].append(item)

                except Exception:
                    sickrage.srLogger.error("Failed parsing provider. Traceback: %s" % traceback.format_exc())

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
コード例 #9
0
ファイル: extratorrent.py プロジェクト: tioxxx/SiCKRAGE-1
    def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():
            sickrage.srLogger.debug("Search Mode: %s" % mode)
            for search_string in search_strings[mode]:

                if mode is not 'RSS':
                    sickrage.srLogger.debug("Search string: %s " % search_string)

                try:
                    self.search_params.update({'type': ('search', 'rss')[mode is 'RSS'], 'search': search_string})
                    data = self.getURL(self.urls['rss'], params=self.search_params)
                    if not data:
                        sickrage.srLogger.debug("No data returned from provider")
                        continue

                    if not data.startswith('<?xml'):
                        sickrage.srLogger.info('Expected xml but got something else, is your mirror failing?')
                        continue

                    try:
                        data = xmltodict.parse(data)
                    except ExpatError:
                        sickrage.srLogger.error("Failed parsing provider. Traceback: %r\n%r" % (traceback.format_exc(), data))
                        continue

                    if not all([data, 'rss' in data, 'channel' in data[b'rss'], 'item' in data[b'rss'][b'channel']]):
                        sickrage.srLogger.debug("Malformed rss returned, skipping")
                        continue

                    # https://github.com/martinblech/xmltodict/issues/111
                    entries = data[b'rss'][b'channel'][b'item']
                    entries = entries if isinstance(entries, list) else [entries]

                    for item in entries:
                        title = item[b'title'].decode('utf-8')
                        # info_hash = item[b'info_hash']
                        size = int(item[b'size'])
                        seeders = tryInt(item[b'seeders'], 0)
                        leechers = tryInt(item[b'leechers'], 0)
                        download_url = item[b'enclosure']['@url'] if 'enclosure' in item else self._magnet_from_details(
                                item[b'link'])

                        if not all([title, download_url]):
                            continue

                            # Filter unseeded torrent
                        if seeders < self.minseed or leechers < self.minleech:
                            if mode is not 'RSS':
                                sickrage.srLogger.debug(
                                        "Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
                                                title, seeders, leechers))
                            continue

                        item = title, download_url, size, seeders, leechers
                        if mode is not 'RSS':
                            sickrage.srLogger.debug("Found result: %s " % title)

                        items[mode].append(item)

                except (AttributeError, TypeError, KeyError, ValueError):
                    sickrage.srLogger.error("Failed parsing provider. Traceback: %r" % traceback.format_exc())

            # For each search mode sort all the items by seeders if available
            items[mode].sort(key=lambda tup: tup[3], reverse=True)

            results += items[mode]

        return results
コード例 #10
0
ファイル: nzbget_client.py プロジェクト: tioxxx/SiCKRAGE-1
    def sendNZB(nzb, proper=False):
        """
        Sends NZB to NZBGet client

        :param nzb: nzb object
        :param proper: True if this is a Proper download, False if not. Defaults to False
        """
        addToTop = False
        nzbgetprio = 0
        category = sickrage.srConfig.NZBGET_CATEGORY
        if nzb.show.is_anime:
            category = sickrage.srConfig.NZBGET_CATEGORY_ANIME

        if sickrage.srConfig.NZBGET_USE_HTTPS:
            nzbgetXMLrpc = "https://%(username)s:%(password)s@%(host)s/xmlrpc"
        else:
            nzbgetXMLrpc = "http://%(username)s:%(password)s@%(host)s/xmlrpc"

        if sickrage.srConfig.NZBGET_HOST is None:
            sickrage.srLogger.error(
                "No NZBget host found in configuration. Please configure it.")
            return False

        url = nzbgetXMLrpc % {
            "host": sickrage.srConfig.NZBGET_HOST,
            "username": sickrage.srConfig.NZBGET_USERNAME,
            "password": sickrage.srConfig.NZBGET_PASSWORD
        }

        nzbGetRPC = xmlrpclib.ServerProxy(url)
        try:
            if nzbGetRPC.writelog(
                    "INFO",
                    "SiCKRAGE connected to drop of %s any moment now." %
                (nzb.name + ".nzb")):
                sickrage.srLogger.debug("Successful connected to NZBget")
            else:
                sickrage.srLogger.error(
                    "Successful connected to NZBget, but unable to send a message"
                )

        except httplib.socket.error:
            sickrage.srLogger.error(
                "Please check your NZBget host and port (if it is running). NZBget is not responding to this combination"
            )
            return False

        except xmlrpclib.ProtocolError as e:
            if e.errmsg == "Unauthorized":
                sickrage.srLogger.error(
                    "NZBget username or password is incorrect.")
            else:
                sickrage.srLogger.error("Protocol Error: " + e.errmsg)
            return False

        dupekey = ""
        dupescore = 0
        # if it aired recently make it high priority and generate DupeKey/Score
        for curEp in nzb.episodes:
            if dupekey == "":
                if curEp.show.indexer == 1:
                    dupekey = "SiCKRAGE-" + str(curEp.show.indexerid)
                elif curEp.show.indexer == 2:
                    dupekey = "SiCKRAGE-tvr" + str(curEp.show.indexerid)
            dupekey += "-" + str(curEp.season) + "." + str(curEp.episode)
            if date.today() - curEp.airdate <= timedelta(days=7):
                addToTop = True
                nzbgetprio = sickrage.srConfig.NZBGET_PRIORITY
            else:
                category = sickrage.srConfig.NZBGET_CATEGORY_BACKLOG
                if nzb.show.is_anime:
                    category = sickrage.srConfig.NZBGET_CATEGORY_ANIME_BACKLOG

        if nzb.quality != Quality.UNKNOWN:
            dupescore = nzb.quality * 100
        if proper:
            dupescore += 10

        nzbcontent64 = None
        if nzb.resultType == "nzbdata":
            data = nzb.extraInfo[0]
            nzbcontent64 = standard_b64encode(data)

        sickrage.srLogger.info("Sending NZB to NZBget")
        sickrage.srLogger.debug("URL: " + url)

        try:
            # Find out if nzbget supports priority (Version 9.0+), old versions beginning with a 0.x will use the old command
            nzbget_version_str = nzbGetRPC.version()
            nzbget_version = tryInt(
                nzbget_version_str[:nzbget_version_str.find(".")])
            if nzbget_version == 0:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, addToTop,
                                                     nzbcontent64)
                else:
                    if nzb.resultType == "nzb":
                        genProvider = GenericProvider("")
                        data = genProvider.getURL(nzb.url)
                        if data is None:
                            return False
                        nzbcontent64 = standard_b64encode(data)
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, addToTop,
                                                     nzbcontent64)
            elif nzbget_version == 12:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, nzbgetprio,
                                                     False, nzbcontent64,
                                                     False, dupekey, dupescore,
                                                     "score")
                else:
                    nzbget_result = nzbGetRPC.appendurl(
                        nzb.name + ".nzb", category, nzbgetprio, False,
                        nzb.url, False, dupekey, dupescore, "score")
            # v13+ has a new combined append method that accepts both (url and content)
            # also the return value has changed from boolean to integer
            # (Positive number representing NZBID of the queue item. 0 and negative numbers represent error codes.)
            elif nzbget_version >= 13:
                nzbget_result = True if nzbGetRPC.append(
                    nzb.name + ".nzb", nzbcontent64 if nzbcontent64 is not None
                    else nzb.url, category, nzbgetprio, False, False, dupekey,
                    dupescore, "score") > 0 else False
            else:
                if nzbcontent64 is not None:
                    nzbget_result = nzbGetRPC.append(nzb.name + ".nzb",
                                                     category, nzbgetprio,
                                                     False, nzbcontent64)
                else:
                    nzbget_result = nzbGetRPC.appendurl(
                        nzb.name + ".nzb", category, nzbgetprio, False,
                        nzb.url)

            if nzbget_result:
                sickrage.srLogger.debug("NZB sent to NZBget successfully")
                return True
            else:
                sickrage.srLogger.error(
                    "NZBget could not add %s to the queue" %
                    (nzb.name + ".nzb"))
                return False
        except Exception:
            sickrage.srLogger.error(
                "Connect Error to NZBget: could not add %s to the queue" %
                (nzb.name + ".nzb"))
            return False