Пример #1
0
 def _get_title_and_url(self, item):
     """
     Retrieves the title and URL data from the item XML node.
     Overridden here so that we can have a preference for magnets.
     
     item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     Returns: A tuple containing two strings representing title and URL respectively
     """
     title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
     url = None
     try:
         if sickbeard.PREFER_MAGNETS:
             try:
                 url = helpers.get_xml_text(item.getElementsByTagName('magnetURI')[0])
                 torrent_hash = self.getHashFromMagnet(url)
                 if not torrent_hash:
                     logger.log(u'magnetURI "%s" found for "%s", but it has no valid hash - ignoring' % (url, title),
                                logger.WARNING)
                     url = None
             except Exception:
                 pass
         if url is None:
             url = helpers.get_xml_text(item.getElementsByTagName('link')[0])
             if url:
                 url = url.replace('&amp;','&')
     except IndexError:
         url = None
     
     return (title, url)
Пример #2
0
    def _get_title_and_url(self, item):
        
        title, url = None, None

        self.cache._remove_namespace(item)

        title = helpers.get_xml_text(item.find('title'))
        
        attempt_list = [lambda: helpers.get_xml_text(item.find('magnetURI')),
                        
                        lambda: item.find('enclosure').get('url'),
                        
                        lambda: helpers.get_xml_text(item.find('link'))]

        
        for cur_attempt in attempt_list:
            try:
                url = cur_attempt()
            except:
                continue
        
            if title and url:
                return (title, url)
            
        return (title, url)
Пример #3
0
    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node.
        Overridden here so that we can have a preference for magnets.
        
        item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
        Returns: A tuple containing two strings representing title and URL respectively
        """
        title = helpers.get_xml_text(item.getElementsByTagName("title")[0])
        url = None
        try:
            if sickbeard.PREFER_MAGNETS:
                try:
                    url = helpers.get_xml_text(item.getElementsByTagName("magnetURI")[0])
                    torrent_hash = self.getHashFromMagnet(url)
                    if not torrent_hash:
                        logger.log(
                            u'magnetURI "%s" found for "%s", but it has no valid hash - ignoring' % (url, title),
                            logger.WARNING,
                        )
                        url = None
                except Exception:
                    pass
            if url is None:
                url = helpers.get_xml_text(item.getElementsByTagName("link")[0])
                if url:
                    url = url.replace("&amp;", "&")
        except IndexError:
            url = None

        return (title, url)
Пример #4
0
    def _get_title_and_url(self, item):
        
        title, url = None, None

        self.cache._remove_namespace(item)

        title = helpers.get_xml_text(item.find('title'))
        
        attempt_list = [lambda: helpers.get_xml_text(item.find('magnetURI')),
                        
                        lambda: item.find('enclosure').get('url'),
                        
                        lambda: helpers.get_xml_text(item.find('link'))]

        
        for cur_attempt in attempt_list:
            try:
                url = cur_attempt()
            except:
                continue
        
            if title and url:
                return (title, url)
            
        return (title, url)
Пример #5
0
    def _get_title_and_url(self, item):
        title = helpers.get_xml_text(item.find('title'))
        # logger.log('publichd got title' + title)
        url = None
        if sickbeard.PREFER_MAGNETS:
            magnetURI = helpers.get_xml_text(item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}magnetURI'))
            # logger.log('publichd got magnetURI' + magnetURI)
            if magnetURI:
                url = magnetURI

        if not url:
            enclos = item.find('enclosure')
            if enclos is not None:
                url = enclos.get('url')
                # logger.log('publichd got enclosure url ' + url)
                if url:
                    url = url.replace('&amp;', '&')

        if title.startswith('[TORRENT] '):
            title = title[10:]

        # these guys also get creative with the torrent names, adding crud at the
        # end like "[PublicHD]", "[P2PDL]" etc. which confuses sb.  Best to
        # just remove it if present
        crudAtEndMatch = re.match(r'(.*) \[\w+\]$', title)
        if crudAtEndMatch:
            title = crudAtEndMatch.group(1)

        return (title, url)
Пример #6
0
    def _get_title_and_url(self, item):
        title = helpers.get_xml_text(item.find('title'))
        # logger.log('publichd got title' + title)
        url = None
        if sickbeard.PREFER_MAGNETS:
            magnetURI = helpers.get_xml_text(
                item.find(
                    '{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}magnetURI'
                ))
            # logger.log('publichd got magnetURI' + magnetURI)
            if magnetURI:
                url = magnetURI

        if not url:
            enclos = item.find('enclosure')
            if enclos is not None:
                url = enclos.get('url')
                # logger.log('publichd got enclosure url ' + url)
                if url:
                    url = url.replace('&amp;', '&')

        if title.startswith('[TORRENT] '):
            title = title[10:]

        # these guys also get creative with the torrent names, adding crud at the
        # end like "[PublicHD]", "[P2PDL]" etc. which confuses sb.  Best to
        # just remove it if present
        crudAtEndMatch = re.match(r'(.*) \[\w+\]$', title)
        if crudAtEndMatch:
            title = crudAtEndMatch.group(1)

        return (title, url)
Пример #7
0
    def _doSearch(self, searchString, show=None, season=None, french=None):
        results = []
        searchUrl = self.url+'rdirect.php?type=search&'+searchString.replace('!','')
        logger.log(u"Search URL: " + searchUrl, logger.DEBUG)
        
        data = self.getURL(searchUrl)
        if "bad key" in str(data).lower() :
            logger.log(u"GKS key invalid, check your config", logger.ERROR)
            return []

        parsedXML = parseString(data)
        channel = parsedXML.getElementsByTagName('channel')[0]
        description = channel.getElementsByTagName('description')[0]
        description_text = helpers.get_xml_text(description).lower()
        
        if "user can't be found" in description_text:
            logger.log(u"GKS invalid digest, check your config", logger.ERROR)
            return []
        elif "invalid hash" in description_text:
            logger.log(u"GKS invalid hash, check your config", logger.ERROR)
            return []
        else :
            items = channel.getElementsByTagName('item')
            for item in items:
                title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
                if "aucun resultat" in title.lower() :
                    logger.log(u"No results found in " + searchUrl, logger.DEBUG)
                    return []
                count=1
                if season:
                    count=0
                    if show:
                        if show.audio_lang=='fr' or french:
                            for frword in['french', 'truefrench', 'multi']:
                                if frword in title.lower():
                                    count+=1
                        else:
                            count +=1
                    else:
                        count +=1
                if count==0:
                    continue                                
                else :
                    downloadURL = helpers.get_xml_text(item.getElementsByTagName('link')[0])
                    quality = Quality.nameQuality(title)
                    if quality==Quality.UNKNOWN and title:
                        if '720p' not in title.lower() and '1080p' not in title.lower():
                            quality=Quality.SDTV
                    if show and french==None:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                    elif show and french:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                    else:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality ) )
        return results
Пример #8
0
    def _parseItem(self, item):
        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = helpers.get_xml_text(item.getElementsByTagName('link')[0])

        if not title or not url:
            logger.log("The XML returned from the FreshOnTV RSS feed is incomplete, this result is unusable", logger.ERROR)
            return

        logger.log("Adding item from RSS to cache: " + title, logger.DEBUG)
        self._addCacheEntry(title, url,
                            quality=common.Quality.nameQuality(title))
Пример #9
0
    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node

        item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed

        Returns: A tuple containing two strings representing title and URL respectively
        """
        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = helpers.get_xml_text(item.getElementsByTagName('link')[0])
        if url:
            url = url.replace('&amp;','&')
        
        return (title, url)
Пример #10
0
    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node
        item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
        Returns: A tuple containing two strings representing title and URL respectively
        """
        title = helpers.get_xml_text(item.getElementsByTagName("title")[0])
        try:
            url = helpers.get_xml_text(item.getElementsByTagName("link")[0])
            if url:
                url = url.replace("&amp;", "&")
        except IndexError:
            url = None

        return (title, url)
Пример #11
0
 def _get_title_and_url(self, item):
     """
     Retrieves the title and URL data from the item XML node
     item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     Returns: A tuple containing two strings representing title and URL respectively
     """
     title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
     try:
         url = helpers.get_xml_text(item.getElementsByTagName('link')[0])
         if url:
             url = url.replace('&amp;','&')
     except IndexError:
         url = None
     
     return (title, url)
Пример #12
0
 def _get_title_and_url(self, item):
     title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
     url = item.getElementsByTagName('enclosure')[0].getAttribute('url').replace('&amp;','&')
     if title.startswith('[TORRENT] '):
         title = title[10:]    
     
     return (title, url)
Пример #13
0
    def _get_title_and_url(self, item):
        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
        
        if url and url.startswith(('http://twitter.com/', 'https://twitter.com/')):
            # this feed came from twitter
            #
            # we need to extract the filename and url from the twitter message (the url we have currently is just that of the twitter message,
            # which is of little use to us).
            # The message looks something like this:
            # eztv_it: History Ch The Universe Season 4 08of12 Space Wars XviD AC3 [MVGroup org] - http://t.co/mGTrhB4a
            #
            # Start by splitting the (real) url from the filename
            title, url = title.rsplit(' - http', 1)
            url = 'http' + url
            
            # Then strip off the leading eztv_it:
            if title.startswith('eztv_it:'):
                title = title[8:]
                
            # For safety we remove any whitespace too.
            title = title.strip()
            
            logger.log(u"Extracted the name %s and url %s from the twitter link"%(title, url), logger.DEBUG)
        else:
            # this feed came from ezrss
            torrent_node = item.getElementsByTagName('torrent')[0]
            filename_node = torrent_node.getElementsByTagName('fileName')[0]
            filename = get_xml_text(filename_node)
        
            new_title = self._extract_name_from_filename(filename)
            if new_title:
                title = new_title
                logger.log(u"Extracted the name "+title+" from the torrent link", logger.DEBUG)

        return (title, url)
Пример #14
0
    def _get_title_and_url(self, item):
        description_node = item.getElementsByTagName("description")[0]

        title = get_xml_text(description_node).replace("_", ".").split(" (")[0]
        url = item.getElementsByTagName("enclosure")[0].getAttribute("url")

        return (title, url)
    def findPropers(self, date=None):

        results = []

        for curString in (".PROPER.", ".REPACK."):

            for curResult in self._doSearch(curString):

                (title, url) = self._get_title_and_url(curResult)

                pubDate_node = curResult.getElementsByTagName('pubDate')[0]
                pubDate = helpers.get_xml_text(pubDate_node)

                match = re.search(
                    '(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}',
                    pubDate)
                if not match:
                    continue

                resultDate = datetime.datetime.strptime(
                    match.group(1), "%a, %d %b %Y %H:%M:%S")

                if date == None or resultDate > date:
                    results.append(classes.Proper(title, url, resultDate))

        return results
Пример #16
0
    def _get_title_and_url(self, item):
        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)

        filename = helpers.get_xml_text(item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}fileName'))
        magnet = helpers.get_xml_text(item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}magnetURI'))

        if filename:
            new_title = self._extract_name_from_filename(filename)
            if new_title:
                title = new_title
                logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG)

        if self.useMagnet() and magnet:
            url = magnet

        return (title, url)
Пример #17
0
    def findPropers(self, date=None):

        results = []

        for curResult in self._doSearch("(PROPER,REPACK)"):

            (title, url) = self._get_title_and_url(curResult)

            description_node = curResult.getElementsByTagName('description')[0]
            descriptionStr = helpers.get_xml_text(description_node)

            dateStr = re.search(
                '<b>Added:</b> (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d)',
                descriptionStr).group(1)
            if not dateStr:
                logger.log(u"Unable to figure out the date for entry " +
                           title + ", skipping it")
                continue
            else:
                resultDate = datetime.datetime.strptime(
                    dateStr, "%Y-%m-%d %H:%M:%S")

            if date == None or resultDate > date:
                results.append(classes.Proper(title, url, resultDate))

        return results
Пример #18
0
    def getQuality(self, item):
        attributes = item.report[0]
        attr_dict = {}

        for attribute in attributes.getElementsByTagName('report:attribute'):
            cur_attr = attribute.getAttribute('type')
            cur_attr_value = helpers.get_xml_text(attribute)
            if cur_attr not in attr_dict:
                attr_dict[cur_attr] = [cur_attr_value]
            else:
                attr_dict[cur_attr].append(cur_attr_value)

        logger.log("Finding quality of item based on attributes " + str(attr_dict), logger.DEBUG)

        if self._is_SDTV(attr_dict):
            quality = Quality.SDTV
        elif self._is_SDDVD(attr_dict):
            quality = Quality.SDDVD
        elif self._is_HDTV(attr_dict):
            quality = Quality.HDTV
        elif self._is_WEBDL(attr_dict):
            quality = Quality.HDWEBDL
        elif self._is_720pBluRay(attr_dict):
            quality = Quality.HDBLURAY
        elif self._is_1080pBluRay(attr_dict):
            quality = Quality.FULLHDBLURAY
        else:
            quality = Quality.UNKNOWN

        logger.log("Resulting quality: " + str(quality), logger.DEBUG)

        return quality
Пример #19
0
    def _get_title_and_url(self, item):
        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)

        filename = helpers.get_xml_text(
            item.find(
                '{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}fileName'
            ))
        new_title = self._extract_name_from_filename(filename)
        if new_title:
            title = new_title
            logger.log(
                u"Extracted the name " + title + " from the torrent link",
                logger.DEBUG)

        # feedburner adds "[eztv] " to the start of all titles, so trim it off
        if title and title[:7] == "[eztv] ":
            title = title[7:]
            logger.log(u"Trimmed [eztv] from title to get %s" % title,
                       logger.DEBUG)

        # ditto VTV:
        if title and title[:6] == "[VTV] ":
            title = title[6:]
            logger.log(u"Trimmed [VTV] from title to get %s" % title,
                       logger.DEBUG)

        return (title, url)
Пример #20
0
    def _get_title_and_url(self, item):
        # (title, url) = generic.TorrentProvider._get_title_and_url(self, item)

        title = helpers.get_xml_text(item.getElementsByTagName("title")[0])
        url = item.getElementsByTagName("enclosure")[0].getAttribute("url").replace("&amp;", "&")

        return (title, url)
Пример #21
0
    def _parseItem(self, item):

        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = helpers.get_xml_text(item.getElementsByTagName('link')[0])

        self._checkItemAuth(title, url)

        if not title or not url:
            logger.log(u"The XML returned from the "+self.provider.name+" feed is incomplete, this result is unusable", logger.ERROR)
            return

        url = self._translateLinkURL(url)

        logger.log(u"Adding item from RSS to cache: "+title, logger.DEBUG)

        self._addCacheEntry(title, url)
Пример #22
0
    def getQuality(self, item):

        filename = helpers.get_xml_text(
            item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}fileName'))
        quality = Quality.nameQuality(filename)

        return quality
Пример #23
0
    def _get_title_and_url(self, item):
        description_node = item.getElementsByTagName('description')[0]

        title = get_xml_text(description_node).replace('_', '.').split(' (')[0]
        url = item.getElementsByTagName('enclosure')[0].getAttribute('url')

        return (title, url)
Пример #24
0
    def findPropers(self, search_date=None):

        search_terms = ['.proper.', '.repack.']

        cache_results = self.cache.listPropers(search_date)
        results = [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time'])) for x in cache_results]

        for term in search_terms:
            for item in self._doSearch({'q': term}, max_age=4):

                (title, url) = self._get_title_and_url(item)

                description_node = item.find('pubDate')
                description_text = helpers.get_xml_text(description_node)

                try:
                    # we could probably do dateStr = descriptionStr but we want date in this format
                    date_text = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', description_text).group(1)
                except:
                    date_text = None

                if not date_text:
                    logger.log(u"Unable to figure out the date for entry " + title + ", skipping it")
                    continue
                else:

                    result_date = email.utils.parsedate(date_text)
                    if result_date:
                        result_date = datetime.datetime(*result_date[0:6])

                if not search_date or result_date > search_date:
                    search_result = classes.Proper(title, url, result_date)
                    results.append(search_result)

        return results
Пример #25
0
    def _get_title_and_url(self, item):
        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
        
        
        try:
            # ezrss feed will have a 'torrent' node
            torrent_node = item.getElementsByTagName('torrent')[0]
            filename_node = torrent_node.getElementsByTagName('fileName')[0]
            filename = get_xml_text(filename_node)
        
            new_title = self._extract_name_from_filename(filename)
            if new_title:
                title = new_title
                logger.log(u"Extracted the name "+title+" from the torrent link", logger.DEBUG)
        except IndexError:
            # we'll get an IndexError above when there's no 'torrent' node,
            # which likely means that this isn't in the special ezrss format.
            # So assume we're working with with a standard rss feed.
            logger.log(u"IndexError while parsing the ezrss feed, maybe it's just standard RSS? Trying that ...", logger.DEBUG)
            (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
                
        # feedburner adds "[eztv] " to the start of all titles, so trim it off
        if title and title[:7] == "[eztv] ":
            title = title[7:]
            logger.log(u"Trimmed [eztv] from title to get %s" % title, logger.DEBUG)
            
        # ditto VTV:
        if title and title[:6] == "[VTV] ":
            title = title[6:]
            logger.log(u"Trimmed [VTV] from title to get %s" % title, logger.DEBUG)

        return (title, url)
Пример #26
0
    def _get_title_and_url(self, item):
        description_node = item.getElementsByTagName('description')[0]

        title = get_xml_text(description_node).replace('_', '.').split(' (')[0]
        url = item.getElementsByTagName('enclosure')[0].getAttribute('url')

        return (title, url)
Пример #27
0
    def getQuality(self, item, anime=False):
        attributes = item.getElementsByTagName('report:attributes')[0]
        attr_dict = {}

        for attribute in attributes.getElementsByTagName('report:attribute'):
            cur_attr = attribute.getAttribute('type')
            cur_attr_value = helpers.get_xml_text(attribute)
            if cur_attr not in attr_dict:
                attr_dict[cur_attr] = [cur_attr_value]
            else:
                attr_dict[cur_attr].append(cur_attr_value)

        logger.log(
            "Finding quality of item based on attributes " + str(attr_dict),
            logger.DEBUG)

        if self._is_SDTV(attr_dict, anime):
            quality = Quality.SDTV
        elif self._is_SDDVD(attr_dict, anime):
            quality = Quality.SDDVD
        elif self._is_HDTV(attr_dict, anime):
            quality = Quality.HDTV
        elif self._is_WEBDL(attr_dict, anime):
            quality = Quality.HDWEBDL
        elif self._is_720pBluRay(attr_dict, anime):
            quality = Quality.HDBLURAY
        elif self._is_1080pBluRay(attr_dict, anime):
            quality = Quality.FULLHDBLURAY
        else:
            quality = Quality.UNKNOWN

        logger.log("Resulting quality: " + str(quality), logger.DEBUG)

        return quality
Пример #28
0
    def _parseItem(self, item):

        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = helpers.get_xml_text(item.getElementsByTagName('link')[0])

        self._checkItemAuth(title, url)

        if not title or not url:
            logger.log(u"The XML returned from the "+self.provider.name+" feed is incomplete, this result is unusable", logger.ERROR)
            return

        url = self._translateLinkURL(url)

        logger.log(u"Adding item from RSS to cache: "+title, logger.DEBUG)

        self._addCacheEntry(title, url)
Пример #29
0
    def getQuality(self, item):

        filename = helpers.get_xml_text(
            item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}fileName'))
        quality = Quality.nameQuality(filename)

        return quality
Пример #30
0
    def _get_title_and_url(self, item):
        #(title, url) = generic.TorrentProvider._get_title_and_url(self, item)

        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        
        url = None
            # if we have a preference for magnets, go straight for the throat...
        try:
            url = helpers.get_xml_text(item.getElementsByTagName('magnetURI')[0])
        except Exception:
            pass
                
        if url is None:
            url = item.getElementsByTagName('enclosure')[0].getAttribute('url').replace('&amp;','&')

        return (title, url)
Пример #31
0
 def _get_seeders(self, item):
     try:
         return int(
             helpers.get_xml_text(
                 item.find('{http://xmlns.ezrss.it/0.1/}seeds')))
     except ValueError:
         return 1  # safer to return 1 than 0, otherwise if this breaks all torrents would be ignored!
Пример #32
0
    def findPropers(self, search_date=None):

        search_terms = ['.proper.', '.repack.']

        cache_results = self.cache.listPropers(search_date)
        results = [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time'])) for x in cache_results]

        for term in search_terms:
            for item in self._doSearch({'q': term}, max_age=4):

                (title, url) = self._get_title_and_url(item)

                description_node = item.find('pubDate')
                description_text = helpers.get_xml_text(description_node)

                try:
                    # we could probably do dateStr = descriptionStr but we want date in this format
                    date_text = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', description_text).group(1)
                except:
                    date_text = None

                if not date_text:
                    logger.log(u"Unable to figure out the date for entry " + title + ", skipping it")
                    continue
                else:

                    result_date = email.utils.parsedate(date_text)
                    if result_date:
                        result_date = datetime.datetime(*result_date[0:6])

                if not search_date or result_date > search_date:
                    search_result = classes.Proper(title, url, result_date)
                    results.append(search_result)

        return results
Пример #33
0
    def _get_title_and_url(self, item):
        #(title, url) = generic.TorrentProvider._get_title_and_url(self, item)

        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = item.getElementsByTagName('enclosure')[0].getAttribute('url').replace('&amp;','&')

        return (title, url)
Пример #34
0
    def _parseItem(self, item):

        title = helpers.get_xml_text(item.find('title'))
        url = helpers.get_xml_text(item.find('link'))

        self._checkItemAuth(title, url)

        if title and url:
            title = self._translateTitle(title)
            url = self._translateLinkURL(url)

            logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG)
            self._addCacheEntry(title, url)

        else:
            logger.log(u"The XML returned from the " + self.provider.name + " feed is incomplete, this result is unusable", logger.DEBUG)
            return
Пример #35
0
    def _parseItem(self, item):
        try:      
            title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
            torrentz_url = helpers.get_xml_text(item.getElementsByTagName('guid')[0])
            url = self.provider._getTorrentzCache(torrentz_url)

            if not title or not url:
                #logger.log(u"The XML returned from the " + self.provider.name + " RSS feed is incomplete, this result is unusable: " + torrentz_url, logger.ERROR)
                return

            logger.log(u"Adding item from " + self.provider.name + " RSS to cache: "+title, logger.DEBUG)
            
            self._addCacheEntry(title, url)
        
        except Exception, e:
            logger.log(u"Error trying to parse " + self.provider.name + " cache: "+str(e).decode('utf-8'), logger.ERROR)
            raise 
Пример #36
0
    def _parseItem(self, item):
        try:      
            title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
            url = helpers.get_xml_text(item.getElementsByTagName('torrentLink')[0]).replace('&amp;','&')

            if not title or not url:
                logger.log(u"The XML returned from the KICKASS RSS feed is incomplete, this result is unusable", logger.ERROR)
                return

            logger.log(u"Adding item from KICKASS RSS to cache: "+title, logger.DEBUG)
            
            self._addCacheEntry(title, url)
        
        except Exception, e:
            logger.log(u"Error trying to parse KICKASS cache: "+str(e).decode('utf-8'), logger.ERROR)
            traceback.print_exc()
            raise 
Пример #37
0
    def _parseItem(self, item):

        title = helpers.get_xml_text(item.find('title'))
        url = helpers.get_xml_text(item.find('link'))

        self._checkItemAuth(title, url)

        if title and url:
            title = self._translateTitle(title)
            url = self._translateLinkURL(url)

            logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG)
            self._addCacheEntry(title, url)

        else:
            logger.log(u"The XML returned from the " + self.provider.name + " feed is incomplete, this result is unusable", logger.DEBUG)
            return
Пример #38
0
    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node

        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed

        Returns: A tuple containing two strings representing title and URL respectively
        """
        title = helpers.get_xml_text(item.find('title'))
        if title:
            title = title.replace(' ', '.')

        url = helpers.get_xml_text(item.find('link'))
        if url:
            url = url.replace('&amp;', '&')
            
        return (title, url)
Пример #39
0
    def _parseItem(self, item):
        try:      
            title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
            torrentz_url = helpers.get_xml_text(item.getElementsByTagName('guid')[0])
            url = self.provider._getTorrentzCache(torrentz_url)

            if not title or not url:
                #logger.log(u"The XML returned from the " + self.provider.name + " RSS feed is incomplete, this result is unusable: " + torrentz_url, logger.ERROR)
                return

            logger.log(u"Adding item from " + self.provider.name + " RSS to cache: "+title, logger.DEBUG)
            
            self._addCacheEntry(title, url)
        
        except Exception, e:
            logger.log(u"Error trying to parse " + self.provider.name + " cache: "+str(e).decode('utf-8'), logger.ERROR)
            raise 
Пример #40
0
    def _get_title_and_url(self, item):
        #(title, url) = generic.TorrentProvider._get_title_and_url(self, item)

        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = item.getElementsByTagName('enclosure')[0].getAttribute(
            'url').replace('&amp;', '&')

        return (title, url)
Пример #41
0
    def _get_title_and_url(self, item):
        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        
        # Finding the url for the torrent can be a bit tricky, as everyone seems to have their own
        # ideas as to where it should be.
        # cf. http://www.bittorrent.org/beps/bep_0036.html (but note that this isn't entirely reliable, 
        # or indeed correct).
        
        # If there's an 'enclosure' tag, then we can be reasonably confident that
        # its url attribute will be the torrent url.
        url = None
        try:
            url = item.getElementsByTagName('enclosure')[0].getAttribute('url').replace('&amp;','&')
        except IndexError:
            # next port-of-call is the 'link' tag, we use this if it looks like
            # a torrent link
            url = helpers.get_xml_text(item.getElementsByTagName('link')[0])
            if url.startswith('magnet:') or url.endswith('.torrent'):
                # found!
                pass
            else:
                # link tag doesn't look like a torrent, look for a torrent tag
                try:
                    torrTag = item.getElementsByTagName('torrent')[0]
                    try:
                        url = helpers.get_xml_text(torrTag.getElementsByTagName('magnetURI')[0])
                    except IndexError:
                        # No magnetURI?  then use the infoHash
                        infoHash = helpers.get_xml_text(torrTag.getElementsByTagName('infoHash')[0])
                        url = 'magnet:?xt=urn:btih:' + infoHash
                except IndexError:
                    # no torrent tag?  They I guess we just have to use the link
                    # tag, even if it doesn't look like a torrent
                    url = helpers.get_xml_text(item.getElementsByTagName('link')[0])
             
        if title:
            # Badly formed rss sometimes will wrap the title in newlines, which 
            # of course throws off the regex's.  This should fix it.
            title = title.strip() 
                  
        if url:
            # Ditto, badly formed rss can have newlines and other crap around the 
            # url, and even spaces in the url.
            url = url.replace('&amp;','&').strip().replace(' ', '%20')

        return (title, url)
Пример #42
0
    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node

        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed

        Returns: A tuple containing two strings representing title and URL respectively
        """
        title = helpers.get_xml_text(item.find('title'))
        if title:
            title = title.replace(' ', '.')

        url = helpers.get_xml_text(item.find('link'))
        if url:
            url = url.replace('&amp;', '&')

        return (title, url)
Пример #43
0
    def getQuality(self, item):

        torrent_node = item.getElementsByTagName("torrent")[0]
        filename_node = torrent_node.getElementsByTagName("fileName")[0]
        filename = get_xml_text(filename_node)

        quality = Quality.nameQuality(filename)

        return quality
Пример #44
0
    def getQuality(self, item, anime=False):

        torrent_node = item.getElementsByTagName('torrent')[0]
        filename_node = torrent_node.getElementsByTagName('fileName')[0]
        filename = get_xml_text(filename_node)

        quality = Quality.nameQuality(filename, anime)

        return quality
Пример #45
0
    def getQuality(self, item):
        
        torrent_node = item.getElementsByTagName('torrent')[0]
        filename_node = torrent_node.getElementsByTagName('fileName')[0]
        filename = get_xml_text(filename_node)

        quality = Quality.sceneQuality(filename)
        
        return quality
Пример #46
0
    def _doSearch(self, searchString, show=None, season=None):
        results = []
        searchUrl = self.url + 'rdirect.php?type=search&' + searchString
        logger.log(u"Search URL: " + searchUrl, logger.DEBUG)

        data = self.getURL(searchUrl)
        if "bad key" in str(data).lower():
            logger.log(u"GKS key invalid, check your config", logger.ERROR)
            return []

        parsedXML = parseString(data)
        channel = parsedXML.getElementsByTagName('channel')[0]
        description = channel.getElementsByTagName('description')[0]
        description_text = helpers.get_xml_text(description).lower()

        if "user can't be found" in description_text:
            logger.log(u"GKS invalid digest, check your config", logger.ERROR)
            return []
        elif "invalid hash" in description_text:
            logger.log(u"GKS invalid hash, check your config", logger.ERROR)
            return []
        else:
            items = channel.getElementsByTagName('item')
            for item in items:
                title = helpers.get_xml_text(
                    item.getElementsByTagName('title')[0])
                if "aucun resultat" in title.lower():
                    logger.log(u"No results found in " + searchUrl,
                               logger.DEBUG)
                    return []
                else:
                    downloadURL = helpers.get_xml_text(
                        item.getElementsByTagName('link')[0])
                    quality = Quality.nameQuality(title)
                    if show:
                        results.append(
                            GksSearchResult(self.opener, title, downloadURL,
                                            quality, str(show.audio_lang)))
                    else:
                        results.append(
                            GksSearchResult(self.opener, title, downloadURL,
                                            quality))
        return results
Пример #47
0
    def _get_title_and_url(self, item):
        #     <item>
        #         <title>The Dealership S01E03 HDTV x264 C4TV</title>
        #         <category>TV</category>
        #         <link>http://kickass.to/the-dealership-s01e03-hdtv-x264-c4tv-t7739376.html</link>
        #         <guid>http://kickass.to/the-dealership-s01e03-hdtv-x264-c4tv-t7739376.html</guid>
        #         <pubDate>Thu, 15 Aug 2013 20:54:03 +0000</pubDate>
        #         <torrent:contentLength>311302749</torrent:contentLength>
        #         <torrent:infoHash>F94F9B44A03DDA439E5818E2C2F18342103522EF</torrent:infoHash>
        #         <torrent:magnetURI><![CDATA[magnet:?xt=urn:btih:F94F9B44A03DDA439E5818E2C2F18342103522EF&dn=the+dealership+s01e03+hdtv+x264+c4tv&tr=udp%3A%2F%2Ftracker.publicbt.com%3A80&tr=udp%3A%2F%2Fopen.demonii.com%3A1337]]></torrent:magnetURI>
        #         <torrent:seeds>0</torrent:seeds>
        #         <torrent:peers>0</torrent:peers>
        #         <torrent:verified>0</torrent:verified>
        #         <torrent:fileName>the.dealership.s01e03.hdtv.x264.c4tv.torrent</torrent:fileName>
        #         <enclosure url="http://torcache.net/torrent/F94F9B44A03DDA439E5818E2C2F18342103522EF.torrent?title=[kickass.to]the.dealership.s01e03.hdtv.x264.c4tv" length="311302749" type="application/x-bittorrent" />
        #     </item>

        title = helpers.get_xml_text(item.find('title'))

        url = None
        if sickbeard.PREFER_MAGNETS:
            # if we have a preference for magnets, go straight for the throat...
            url = helpers.get_xml_text(
                item.find(
                    '{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}magnetURI'
                ))
            if not url:
                # The above, although standard, is unlikely in kat.  They kinda screw up their namespaces, so we get
                # this instead...
                url = helpers.get_xml_text(
                    item.find('{http://xmlns.ezrss.it/0.1/}magnetURI'))

        if not url:
            enclos = item.find('enclosure')
            if enclos is not None:
                url = enclos.get('url')
                if url:
                    url = url.replace('&amp;', '&')

        if not url:
            url = None  # this should stop us returning empty strings as urls

        return (title, url)
Пример #48
0
    def _parseItem(self, item):
        """Return None
        parse a single rss feed item and add its info to the chache
        will check for needed infos
        """
        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = helpers.get_xml_text(item.getElementsByTagName('link')[0])

        self._checkItemAuth(title, url)

        # we at least need a title and url, if one is missing stop
        if not title or not url:
            logger.log(u"The XML returned from the "+self.provider.name+" feed is incomplete, this result is unusable", logger.ERROR)
            return

        url = self._translateLinkURL(url)

        logger.log(u"Adding item from RSS to cache: "+title, logger.DEBUG)

        self._addCacheEntry(title, url)
Пример #49
0
    def _parseItem(self, item):
        """Return None
        parse a single rss feed item and add its info to the chache
        will check for needed infos
        """
        title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
        url = helpers.get_xml_text(item.getElementsByTagName('link')[0])

        self._checkItemAuth(title, url)

        # we at least need a title and url, if one is missing stop
        if not title or not url:
            logger.log(u"The XML returned from the "+self.provider.name+" feed is incomplete, this result is unusable", logger.ERROR)
            return

        url = self._translateLinkURL(url)

        logger.log(u"Adding item from RSS to cache: "+title, logger.DEBUG)

        self._addCacheEntry(title, url)
Пример #50
0
 def isValidCategory(self, item):
     """
     Decides if the category of an item (from the rss feed) could be a valid
     tv show.
     @param item: An elementTree Node representing the <item> tag of the RSS feed
     @return: boolean
     """
     category = helpers.get_xml_text(item.find('category'))
     return category in ('BluRay 720p', 'BluRay 1080p', 'BluRay Remux',
                         'BluRay', 'BluRay 3D', 'XviD', 'BRRip', 'HDTV',
                         'SDTV', 'TV WEB-DL', 'TV Packs')
Пример #51
0
 def isValidCategory(self, item):
     """
     Decides if the category of an item (from the rss feed) could be a valid
     tv show.
     @param item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     @return: boolean
     """
     category = helpers.get_xml_text(item.getElementsByTagName('category')[0])
     return category in ('BluRay 720p', 'BluRay 1080p', 'BluRay Remux',
                         'BluRay', 'BluRay 3D', 'XviD', 'BRRip',
                         'HDTV', 'SDTV', 'TV WEB-DL', 'TV Packs')
Пример #52
0
    def _checkAuthFromData(self, parsedXML):

        if parsedXML is None:
            return self._checkAuth()

        description_text = helpers.get_xml_text(parsedXML.find('.//channel/item/description'))

        if "Your RSS key is invalid" in description_text:
            logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(description_text), logger.DEBUG)
            raise AuthException(u"Your authentication credentials for " + self.name + " are incorrect, check your config")

        return True
Пример #53
0
    def _checkAuthFromData(self, parsedXML):

        if parsedXML is None:
            return self._checkAuth()

        description_text = helpers.get_xml_text(parsedXML.find('.//channel/description'))

        if "User can't be found" in description_text or "Invalid Hash" in description_text:
            logger.log(u"Incorrect authentication credentials for " + self.name + " : " + str(description_text), logger.DEBUG)
            raise AuthException(u"Your authentication credentials for " + self.name + " are incorrect, check your config")

        return True
Пример #54
0
    def _get_title_and_url(self, item):
        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)

        filename = helpers.get_xml_text(item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}fileName'))

        if filename:
            new_title = self._extract_name_from_filename(filename)
            if new_title:
                title = new_title
                logger.log(u"Extracted the name " + title + " from the torrent link", logger.DEBUG)

        return (title, url)
Пример #55
0
    def getQuality(self, item):
        
        #torrent_node = item.getElementsByTagName('torrent')[0]
        #filename_node = torrent_node.getElementsByTagName('title')[0]
        #filename = get_xml_text(filename_node)
        
        # I think the only place we can get anything resembing the filename is in 
        # the title
        filename = helpers.get_xml_text(item.getElementsByTagName('title')[0])

        quality = Quality.nameQuality(filename)
        
        return quality
Пример #56
0
    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node

        item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed

        Returns: A tuple containing two strings representing title and URL respectively
        """
        """we are here in the search provider it is ok to delete the /.
        i am doing this because some show get posted with a / in the name
        and during qulaity check it is reduced to the base name
        """
        title = helpers.get_xml_text(
            item.getElementsByTagName('title')[0]).replace("/", " ")
        try:
            url = helpers.get_xml_text(item.getElementsByTagName('link')[0])
            if url:
                url = url.replace('&amp;', '&')
        except IndexError:
            url = None

        return (title, url)
Пример #57
0
    def _get_title_and_url(self, item):
        (title, url) = generic.TorrentProvider._get_title_and_url(self, item)
        
        torrent_node = item.getElementsByTagName('torrent')[0]
        filename_node = torrent_node.getElementsByTagName('fileName')[0]
        filename = get_xml_text(filename_node)
        
        new_title = self._extract_name_from_filename(filename)
        if new_title:
            title = new_title
            logger.log(u"Extracted the name "+title+" from the torrent link", logger.DEBUG)

        return (title, url)
Пример #58
0
    def _get_title_and_url(self, item):
        """
        Retrieves the title and URL data from the item XML node.

        Some Einstein decided to change `item` from a xml.dom.minidom.Node to
        an elementtree.ElementTree element upstream, without taking into
        account that this is the base for *LOTS* of classes, so it will
        basically break every one of them unless they are all changed.
        Why does python even allow this crap?  Strong typing is a good thing
        for a language Guido!

        (so, rant over, we now need to cater for both cases here)

        @param item: An xml.dom.minidom.Node (or an elementtree.ElementTree
                element) representing the <item> tag of the RSS feed.
        @return: A tuple containing two strings representing title and URL
                respectively.
        """
        if isinstance(item, xml.dom.minidom.Node):
            title = helpers.get_xml_text(item.getElementsByTagName('title')[0],
                                         mini_dom=True)
            try:
                url = helpers.get_xml_text(
                    item.getElementsByTagName('link')[0], mini_dom=True)
                if url:
                    url = url.replace('&amp;', '&')
            except IndexError:
                url = None
        else:
            title = helpers.get_xml_text(item.find('title'))
            if title:
                title = title.replace(' ', '.')

            url = helpers.get_xml_text(item.find('link'))
            if url:
                url = url.replace('&amp;', '&')

        return (title, url)