예제 #1
0
    def getQuality(self, item):

        filename = helpers.get_xml_text(
            item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}fileName'))
        quality = Quality.nameQuality(filename)

        return quality
예제 #2
0
파일: tvbinz.py 프로젝트: kacy/Sick-Beard
	def _parseItem(self, item):

		if item.findtext('title') != None and item.findtext('title') == "You must be logged in to view this feed":
			raise exceptions.AuthException("TVBinz authentication details are incorrect, check your config")

		if item.findtext('title') == None or item.findtext('link') == None:
			logger.log(u"The XML returned from the TVBinz RSS feed is incomplete, this result is unusable: "+str(item), logger.ERROR)
			return

		title = item.findtext('title')
		url = item.findtext('link').replace('&', '&')

		sInfo = item.find('{http://tvbinz.net/rss/tvb/}seriesInfo')
		if sInfo == None:
			logger.log(u"No series info, this is some kind of non-standard release, ignoring it", logger.DEBUG)
			return

		logger.log(u"Adding item from RSS to cache: "+title, logger.DEBUG)

		quality = Quality.nameQuality(title)

		if sInfo.findtext('{http://tvbinz.net/rss/tvb/}tvrID') == None:
			tvrid = 0
		else:
			tvrid = int(sInfo.findtext('{http://tvbinz.net/rss/tvb/}tvrID'))

		# since TVBinz normalizes the scene names it's more reliable to parse the episodes out myself
		# than to rely on it, because it doesn't support multi-episode numbers in the feed
		self._addCacheEntry(title, url, tvrage_id=tvrid, quality=quality)
예제 #3
0
    def query(self, filepath, languages, keywords, series, season, episode):

        logger.debug(
            u'Getting subtitles for %s season %d episode %d with languages %r'
            % (series, season, episode, languages))
        self.init_cache()
        try:
            series = series.lower().replace('(', '').replace(')', '')
            series_id = self.get_series_id(series)
        except KeyError:
            logger.debug(u'Could not find series id for %s' % series)
            return []

        episode_id = self.get_episode_id(series, series_id, season, episode,
                                         Quality.nameQuality(filepath))
        if not episode_id:
            logger.debug(u'Could not find subtitle for series %s' % series)
            return []

        r = self.session.get(
            self.server_url +
            'index.php?option=com_remository&Itemid=6&func=fileinfo&id=' +
            episode_id)
        soup = BeautifulSoup(r.content)

        sub_link = soup.find('div', attrs={
            'id': 'remositoryfileinfo'
        }).find(href=re.compile('func=download'))['href']
        sub_language = self.get_language('it')
        path = get_subtitle_path(filepath, sub_language, self.config.multi)
        subtitle = ResultSubtitle(path, sub_language,
                                  self.__class__.__name__.lower(), sub_link)

        return [subtitle]
예제 #4
0
    def _doSearch(self, searchString, show=None, season=None, french=None):
        if not self.login_done:
            self._doLogin(sickbeard.SOTORRENT_USERNAME, sickbeard.SOTORRENT_PASSWORD)

        results = []

        search_url = "{0}/sphinx.php?{1}".format(self.url, searchString.replace('!',''))
        req = self.opener.open(search_url)
        page = BeautifulSoup(req)

        torrent_table = page.find("table", {"id" : "torrent_list"})
        if torrent_table:
            logger.log(u"So-torrent found shows ! " , logger.DEBUG)  
            torrent_rows = torrent_table.findAll("tr", {"id" : "infos_sphinx"})

            for row in torrent_rows:
                release = row.strong.string
                id_search = row.find("img", {"alt" : "+"})
                id_torrent = id_search['id'].replace('expandoGif', '')
                download_url = "https://so-torrent.com/get.php?id={0}".format(id_search['id'].replace('expandoGif', ''))
                id_quality = Quality.nameQuality(release)

                if show and french==None:
                       results.append(SOTORRENTSearchResult(self.opener, release, download_url, id_quality, str(show.audio_lang)))
                elif show and french:
                   results.append(SOTORRENTSearchResult(self.opener, release, download_url, id_quality, 'fr'))
                else:
                    results.append(SOTORRENTSearchResult(self.opener, release, download_url, id_quality))

        else:
            logger.log(u"No table founded.", logger.DEBUG)
            self.login_done = False             
        return results
예제 #5
0
파일: itasa.py 프로젝트: 13111/SickRage
    def query(self, filepath, languages, keywords, series, season, episode):

        logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
        self.init_cache()
        try:
            series = series.lower().replace('(','').replace(')','')
            series_id = self.get_series_id(series)
        except KeyError:
            logger.debug(u'Could not find series id for %s' % series)
            return []
        
        episode_id = self.get_episode_id(series, series_id, season, episode, Quality.nameQuality(filepath))
        if not episode_id:
            logger.debug(u'Could not find subtitle for series %s' % series)
            return []

        r = self.session.get(self.server_url + 'index.php?option=com_remository&Itemid=6&func=fileinfo&id=' + episode_id)
        soup = BeautifulSoup(r.content)

        sub_link = soup.find('div', attrs = {'id' : 'remositoryfileinfo'}).find(href=re.compile('func=download'))['href']
        sub_language = self.get_language('it')
        path = get_subtitle_path(filepath, sub_language, self.config.multi)
        subtitle = ResultSubtitle(path, sub_language, self.__class__.__name__.lower(), sub_link)
        
        return [subtitle]
예제 #6
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.LIBERTALIA_USERNAME,
                          sickbeard.LIBERTALIA_PASSWORD)

        results = []

        searchUrl = self.url + '/torrents.php?' + searchString.replace('!', '')

        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)
        soup = BeautifulSoup(r)

        resultsTable = soup.find("table", {"class": "torrent_table"})
        if resultsTable:
            logger.log(u"LIBERTALIA found resulttable ! ", logger.DEBUG)
            rows = resultsTable.findAll(
                "tr", {"class": "torrent_row new"})  # torrent_row new

            for row in rows:

                #bypass first row because title only
                columns = row.find('td', {"class": "torrent_name"})
                logger.log(u"LIBERTALIA found rows ! ", logger.DEBUG)
                link = columns.find("a", href=re.compile("torrents"))
                if link:
                    title = link.text
                    recherched = searchUrl.split("&[PARAMSTR]=")[1]
                    recherched = recherched.replace(".", "(.*)").replace(
                        " ", "(.*)").replace("'", "(.*)")
                    logger.log(u"LIBERTALIA TITLE : " + title, logger.DEBUG)
                    logger.log(u"LIBERTALIA CHECK MATCH : " + recherched,
                               logger.DEBUG)
                    #downloadURL =  self.url + "/" + row.find("a",href=re.compile("torrent_pass"))['href']
                    if re.match(recherched, title, re.IGNORECASE):
                        downloadURL = row.find(
                            "a", href=re.compile("torrent_pass"))['href']
                        quality = Quality.nameQuality(title)
                        if quality == Quality.UNKNOWN and title:
                            if '720p' not in title.lower(
                            ) and '1080p' not in title.lower():
                                quality = Quality.SDTV
                        if show and french == None:
                            results.append(
                                LIBERTALIASearchResult(self.opener, title,
                                                       downloadURL, quality,
                                                       str(show.audio_lang)))
                        elif show and french:
                            results.append(
                                LIBERTALIASearchResult(self.opener, title,
                                                       downloadURL, quality,
                                                       'fr'))
                        else:
                            results.append(
                                LIBERTALIASearchResult(self.opener, title,
                                                       downloadURL, quality))

        return results
예제 #7
0
    def _doSearch(self, searchString, show=None, season=None, french=None):
        
        if not self.login_done:
            self._doLogin( sickbeard.T411_USERNAME, sickbeard.T411_PASSWORD )

        results = []
        searchUrl = self.url + '/torrents/search/?' + searchString.replace('!','')
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )
        soup = BeautifulSoup( r, "html.parser" )
        resultsTable = soup.find("table", { "class" : "results" })
        if resultsTable:
            rows = resultsTable.find("tbody").findAll("tr")
    
            for row in rows:
                link = row.find("a", title=True)
                title = link['title']
                id = row.find_all('td')[2].find_all('a')[0]['href'][1:].replace('torrents/nfo/?id=','')
                downloadURL = ('http://www.t411.io/torrents/download/?id=%s' % id)
                
                quality = Quality.nameQuality( title )
                if quality==Quality.UNKNOWN and title:
                    if '720p' not in title.lower() and '1080p' not in title.lower():
                        quality=Quality.SDTV
                if show and french==None:
                    results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality, str(show.audio_lang) ) )
                elif show and french:
                    results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality, 'fr' ) )
                else:
                    results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality ) )
                
        return results
예제 #8
0
    def getQuality(self, item):

        filename = helpers.get_xml_text(
            item.find('{http://xmlns.ezrss.it/0.1/}torrent/{http://xmlns.ezrss.it/0.1/}fileName'))
        quality = Quality.nameQuality(filename)

        return quality
예제 #9
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        
        if not self.login_done:
            self._doLogin( sickbeard.LIBERTALIA_USERNAME, sickbeard.LIBERTALIA_PASSWORD )

        results = []
        
        searchUrl = self.url + '/torrents.php?' + searchString.replace('!','')
 
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )
        soup = BeautifulSoup( r)

        resultsTable = soup.find("table", { "class" : "torrent_table"  })
        if resultsTable:
            logger.log(u"LIBERTALIA found resulttable ! " , logger.DEBUG)  
            rows = resultsTable.findAll("tr" ,  {"class" : "torrent_row  new  "}  )  # torrent_row new
            
            for row in rows:
                           
                #bypass first row because title only  
                columns = row.find('td', {"class" : "torrent_name"} )                            
                logger.log(u"LIBERTALIA found rows ! " , logger.DEBUG) 
                isvfclass = row.find('td', {"class" : "sprite-vf"} )
                isvostfrclass = row.find('td', {"class" : "sprite-vostfr"} ) 
                link = columns.find("a",  href=re.compile("torrents"))  
                if link: 
                  if isvostfrclass and str(show.audio_lang)=='fr':
                    logger.log(u"LIBERTALIA found VOSTFR et demande *"+str(show.audio_lang)+"* je skip ! " + link.text , logger.DEBUG)
                    link = columns.find("a",  href=re.compile("nepastrouver"))                     
                if link:     
                  if isvfclass  and  str(show.audio_lang)!='fr'  :                     
                    logger.log(u"LIBERTALIA found VF et demande *"+str(show.audio_lang)+"* je skip ! " + link.text , logger.DEBUG)
                    link = columns.find("a",  href=re.compile("nepastrouver"))     
                if link:               
                    title = link.text
                    recherched=searchUrl.split("&[PARAMSTR]=")[1]
                    recherched=recherched.replace(".","(.*)").replace(" ","(.*)").replace("'","(.*)")
                    logger.log(u"LIBERTALIA TITLE : " + title, logger.DEBUG)  
                    logger.log(u"LIBERTALIA CHECK MATCH : " + recherched, logger.DEBUG)                                        
                    #downloadURL =  self.url + "/" + row.find("a",href=re.compile("torrent_pass"))['href']
                    if re.match(recherched,title , re.IGNORECASE):              
                        downloadURL =  row.find("a",href=re.compile("torrent_pass"))['href']                
                        quality = Quality.nameQuality( title )
                        if quality==Quality.UNKNOWN and title:
                            if '720p' not in title.lower() and '1080p' not in title.lower():
                                quality=Quality.SDTV
                        if show and french==None:
                            results.append( LIBERTALIASearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                        elif show and french:
                            results.append( LIBERTALIASearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                        else:
                            results.append( LIBERTALIASearchResult( self.opener, title, downloadURL, quality ) )
        else:
            logger.log(u"Pas de table trouvée ! je délogue", logger.DEBUG)
            self.login_done = False             
        return results
예제 #10
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.XTHOR_USERNAME, sickbeard.XTHOR_PASSWORD)

        results = []

        searchUrl = self.url + '/browse.php?' + searchString.replace('!', '')

        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)

        soup = BeautifulSoup(r)

        resultsTable = soup.find("table", {"class": "table2"})
        if resultsTable:

            rows = resultsTable.findAll("tr")

            for row in rows:

                link = row.find("a", href=re.compile("details.php"))

                if link:
                    title = link.text
                    recherched = searchUrl.split("&search=")[1]
                    recherched = recherched.replace(" ", "(.*)")
                    recherched = recherched + "(.*)"
                    logger.log(u"XTHOR TITLE : " + title, logger.DEBUG)
                    logger.log(u"XTHOR CHECK MATCH : " + recherched,
                               logger.DEBUG)
                    if re.match(recherched, title, re.IGNORECASE):
                        downloadURL = row.find(
                            "a", href=re.compile("download.php"))['href']
                        downloadURL = self.url + '/' + downloadURL
                        logger.log(u"XTHOR DOWNLOAD URL : " + downloadURL,
                                   logger.DEBUG)
                    else:
                        continue
                    quality = Quality.nameQuality(title)
                    if quality == Quality.UNKNOWN and title:
                        if '720p' not in title.lower(
                        ) and '1080p' not in title.lower():
                            quality = Quality.SDTV
                    if show and french == None:
                        results.append(
                            XTHORSearchResult(self.opener, title, downloadURL,
                                              quality, str(show.audio_lang)))
                    elif show and french:
                        results.append(
                            XTHORSearchResult(self.opener, title, downloadURL,
                                              quality, 'fr'))
                    else:
                        results.append(
                            XTHORSearchResult(self.opener, title, downloadURL,
                                              quality))

        return results
예제 #11
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.THINKGEEK_USERNAME,
                          sickbeard.THINKGEEK_PASSWORD)

        results = []

        searchUrl = self.url + '?p=torrents&pid=10&search_type=name&' + searchString.replace(
            '!', '')

        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)

        soup = BeautifulSoup(r, "html.parser")

        resultsTable = soup.find("div", {"id": "content"})
        if resultsTable:

            rows = resultsTable.findAll("div", {"class": "torrent-box"})

            for row in rows:

                link = row.find("a", href=re.compile("action=details"))

                if link:
                    title = link.text
                    recherched = searchUrl.split("&[PARAMSTR]=")[1]
                    recherched = recherched.replace(".", "(.*)").replace(
                        " ", "(.*)").replace("'", "(.*)")
                    logger.log(u"THINKGEEK TITLE : " + title, logger.DEBUG)
                    logger.log(u"THINKGEEK CHECK MATCH : " + recherched,
                               logger.DEBUG)
                    if re.match(recherched, title, re.IGNORECASE):
                        downloadURL = row.find(
                            "a", href=re.compile("action=download"))['href']
                        logger.log(u"THINKGEEK DOWNLOAD URL : " + title,
                                   logger.DEBUG)
                        quality = Quality.nameQuality(title)
                        if quality == Quality.UNKNOWN and title:
                            if '720p' not in title.lower(
                            ) and '1080p' not in title.lower():
                                quality = Quality.SDTV
                        if show and french == None:
                            results.append(
                                THINKGEEKSearchResult(self.opener, title,
                                                      downloadURL, quality,
                                                      str(show.audio_lang)))
                        elif show and french:
                            results.append(
                                THINKGEEKSearchResult(self.opener, title,
                                                      downloadURL, quality,
                                                      'fr'))
                        else:
                            results.append(
                                THINKGEEKSearchResult(self.opener, title,
                                                      downloadURL, quality))
        return results
예제 #12
0
    def _find_season_quality(self, title, torrent_id, show):
        """ Return the modified title of a Season Torrent with the quality found inspecting torrent file list """

        mediaExtensions = [
            'avi', 'mkv', 'wmv', 'divx', 'vob', 'dvr-ms', 'wtv', 'ts'
            'ogv', 'rar', 'zip'
        ]

        quality = Quality.UNKNOWN

        fileName = None

        fileURL = self.proxy._buildURL(self.url +
                                       'ajax_details_filelist.php?id=' +
                                       str(torrent_id))

        data = self.getURL(fileURL)

        if not data:
            return None

        filesList = re.findall('<td.+>(.*?)</td>', data)

        if not filesList:
            logger.log(u"Unable to get the torrent file list for " + title,
                       logger.ERROR)

        for fileName in filter(
                lambda x: x.rpartition(".")[2].lower() in mediaExtensions,
                filesList):
            quality = Quality.nameQuality(os.path.basename(fileName),
                                          show.anime)
            if quality != Quality.UNKNOWN: break

        if fileName != None and quality == Quality.UNKNOWN:
            quality = Quality.assumeQuality(os.path.basename(fileName))

        if quality == Quality.UNKNOWN:
            logger.log(u"No Season quality for " + title, logger.DEBUG)
            return None

        try:
            myParser = NameParser(show=show)
            parse_result = myParser.parse(fileName)
        except InvalidNameException:
            return None

        logger.log(
            u"Season quality for " + title + " is " +
            Quality.qualityStrings[quality], logger.DEBUG)

        if parse_result.series_name and parse_result.season_number:
            title = parse_result.series_name + ' S%02d' % int(
                parse_result.season_number) + ' ' + self._reverseQuality(
                    quality)

        return title
예제 #13
0
파일: ezrss.py 프로젝트: r-win/Sick-Beard
    def getQuality(self, item, anime=False):

        torrent_node = item.getElementsByTagName('torrent')[0]
        filename_node = torrent_node.getElementsByTagName('fileName')[0]
        filename = get_xml_text(filename_node)

        quality = Quality.nameQuality(filename, anime)

        return quality
예제 #14
0
    def getQuality(self, item):
        
        torrent_node = item.getElementsByTagName('torrent')[0]
        filename_node = torrent_node.getElementsByTagName('fileName')[0]
        filename = get_xml_text(filename_node)

        quality = Quality.nameQuality(filename)
        
        return quality
예제 #15
0
    def _find_season_quality(self, title, torrent_id):
        """ Rewrite the title of a Season Torrent with the quality found inspecting torrent file list """

        mediaExtensions = ["avi", "mkv", "wmv", "divx", "vob", "dvr-ms", "wtv", "ts" "ogv", "rar", "zip"]

        quality = Quality.UNKNOWN

        fileName = None

        fileURL = self.proxy._buildURL(self.url + "ajax_details_filelist.php?id=" + str(torrent_id))

        data = self.getURL(fileURL)

        if not data:
            return None

        filesList = re.findall("<td.+>(.*?)</td>", data)

        if not filesList:
            logger.log(u"Unable to get the torrent file list for " + title, logger.ERROR)

        #        for fileName in filter(lambda x: x.rpartition(".")[2].lower() in mediaExtensions, filesList):
        #            quality = Quality.nameQuality(os.path.basename(fileName))
        #            if quality != Quality.UNKNOWN: break

        for fileName in filesList:
            sepFile = fileName.rpartition(".")
            if fileName.rpartition(".")[2].lower() in mediaExtensions:
                quality = Quality.nameQuality(fileName)
                if quality != Quality.UNKNOWN:
                    break

        if fileName != None and quality == Quality.UNKNOWN:
            quality = Quality.assumeQuality(os.path.basename(fileName))

        if quality == Quality.UNKNOWN:
            logger.log(u"No Season quality for " + title, logger.DEBUG)
            return None

        try:
            myParser = NameParser()
            parse_result = myParser.parse(fileName)
        except InvalidNameException:
            return None

        logger.log(u"Season quality for " + title + " is " + Quality.qualityStrings[quality], logger.DEBUG)

        if parse_result.series_name and parse_result.season_number:
            title = (
                parse_result.series_name
                + " S%02d" % int(parse_result.season_number)
                + " "
                + self._reverseQuality(quality)
            )

        return title
예제 #16
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.ADDICT_USERNAME, sickbeard.ADDICT_PASSWORD)

        results = []

        searchUrl = self.url + '/index.php?page=torrents&active=1&options=0&' + searchString.replace(
            '!', '')

        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)

        soup = BeautifulSoup(r, "html.parser")

        resultsTable = soup.find("table", {"class": "lista", "width": "100%"})
        if resultsTable:

            rows = resultsTable.findAll("tr")
            x = 0
            for row in rows:
                x = x + 1
                if (x > 1):
                    #bypass first row because title only
                    columns = row.find('td')

                    link = row.findAll('td')[1].find(
                        "a", href=re.compile("torrent-details"))
                    if link:
                        title = link.text
                        logger.log(u"ADDICT TITLE TEMP: " + title,
                                   logger.DEBUG)
                        downloadURL = self.url + "/" + row.find(
                            "a", href=re.compile("\.torrent"))['href']

                        quality = Quality.nameQuality(title)
                        if quality == Quality.UNKNOWN and title:
                            if '720p' not in title.lower(
                            ) and '1080p' not in title.lower():
                                quality = Quality.SDTV
                        if show and french == None:
                            results.append(
                                ADDICTSearchResult(self.opener, title,
                                                   downloadURL, quality,
                                                   str(show.audio_lang)))
                        elif show and french:
                            results.append(
                                ADDICTSearchResult(self.opener, title,
                                                   downloadURL, quality, 'fr'))
                        else:
                            results.append(
                                ADDICTSearchResult(self.opener, title,
                                                   downloadURL, quality))

        return results
예제 #17
0
파일: gks.py 프로젝트: Arakmar/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None, french=None):
        results = []
        searchUrl = self.url+'rdirect.php?type=search&'+searchString.replace('!','')
        logger.log(u"Search URL: " + searchUrl, logger.DEBUG)
        
        data = self.getURL(searchUrl)
        if "bad key" in str(data).lower() :
            logger.log(u"GKS key invalid, check your config", logger.ERROR)
            return []

        parsedXML = parseString(data)
        channel = parsedXML.getElementsByTagName('channel')[0]
        description = channel.getElementsByTagName('description')[0]
        description_text = helpers.get_xml_text(description).lower()
        
        if "user can't be found" in description_text:
            logger.log(u"GKS invalid digest, check your config", logger.ERROR)
            return []
        elif "invalid hash" in description_text:
            logger.log(u"GKS invalid hash, check your config", logger.ERROR)
            return []
        else :
            items = channel.getElementsByTagName('item')
            for item in items:
                title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
                if "aucun resultat" in title.lower() :
                    logger.log(u"No results found in " + searchUrl, logger.DEBUG)
                    return []
                count=1
                if season:
                    count=0
                    if show:
                        if show.audio_lang=='fr' or french:
                            for frword in['french', 'truefrench', 'multi']:
                                if frword in title.lower():
                                    count+=1
                        else:
                            count +=1
                    else:
                        count +=1
                if count==0:
                    continue                                
                else :
                    downloadURL = helpers.get_xml_text(item.getElementsByTagName('link')[0])
                    quality = Quality.nameQuality(title)
                    if quality==Quality.UNKNOWN and title:
                        if '720p' not in title.lower() and '1080p' not in title.lower():
                            quality=Quality.SDTV
                    if show and french==None:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                    elif show and french:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                    else:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality ) )
        return results
예제 #18
0
 def getQuality(self, item):
     """
     Figures out the quality of the given RSS item node
     
     item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     
     Returns a Quality value obtained from the node's data 
     """
     (title, url) = self._get_title_and_url(item) #@UnusedVariable
     quality = Quality.nameQuality(title)
     return quality
예제 #19
0
 def getQuality(self, item):
     """
     Figures out the quality of the given RSS item node
     
     item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     
     Returns a Quality value obtained from the node's data 
     """
     (title, url) = self._get_title_and_url(item)  #@UnusedVariable
     quality = Quality.nameQuality(title)
     return quality
예제 #20
0
 def getQuality(self, item, anime=False):
     """
     Figures out the quality of the given RSS item node
     
     item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     
     Returns a Quality value obtained from the node's data 
     """
     (title, url) = self._get_title_and_url(item) #@UnusedVariable
     logger.log(u"geting quality for:" + title+ " anime: "+str(anime),logger.DEBUG)
     quality = Quality.nameQuality(title, anime)
     return quality
예제 #21
0
 def getQuality(self, item, anime=False):
     """
     Figures out the quality of the given RSS item node
     
     item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     
     Returns a Quality value obtained from the node's data 
     """
     (title, url) = self._get_title_and_url(item) #@UnusedVariable
     logger.log(u"geting quality for:" + title+ " anime: "+str(anime),logger.DEBUG)
     quality = Quality.nameQuality(title, anime)
     return quality
    def _doSearch(self, search_params, show=None):
    
        results = []
        items = {'Season': [], 'Episode': []}

        for mode in search_params.keys():
            for search_string in search_params[mode]:

                searchURL = self.proxy._buildURL(self.searchurl %(urllib.quote(search_string)))    
        
                logger.log(u"Search string: " + searchURL, logger.DEBUG)
        
                data = self.getURL(searchURL)
                if not data:
                    return []
        
                re_title_url = self.proxy._buildRE(self.re_title_url)
                
                #Extracting torrent information from data returned by searchURL                   
                match = re.compile(re_title_url, re.DOTALL ).finditer(urllib.unquote(data))
                for torrent in match:

                    title = torrent.group('title').replace('_','.')#Do not know why but SickBeard skip release with '_' in name
                    url = torrent.group('url')
                    id = int(torrent.group('id'))
                    seeders = int(torrent.group('seeders'))
                    leechers = int(torrent.group('leechers'))

                    #Filter unseeded torrent
                    if seeders == 0 or not title \
                    or not show_name_helpers.filterBadReleases(title):
                        continue 
                   
                    #Accept Torrent only from Good People for every Episode Search
                    if sickbeard.THEPIRATEBAY_TRUSTED and re.search('(VIP|Trusted|Helper)',torrent.group(0))== None:
                        logger.log(u"ThePirateBay Provider found result "+torrent.group('title')+" but that doesn't seem like a trusted result so I'm ignoring it",logger.DEBUG)
                        continue

                    #Try to find the real Quality for full season torrent analyzing files in torrent 
                    if mode == 'Season' and Quality.nameQuality(title) == Quality.UNKNOWN:     
                        if not self._find_season_quality(title,id): continue
                        
                    item = title, url, id, seeders, leechers
                    
                    items[mode].append(item)    

            #For each search mode sort all the items by seeders
            items[mode].sort(key=lambda tup: tup[3], reverse=True)        

            results += items[mode]  
                
        return results
예제 #23
0
    def getQuality(self, item):
        
        #torrent_node = item.getElementsByTagName('torrent')[0]
        #filename_node = torrent_node.getElementsByTagName('title')[0]
        #filename = get_xml_text(filename_node)
        
        # I think the only place we can get anything resembing the filename is in 
        # the title
        filename = helpers.get_xml_text(item.getElementsByTagName('title')[0])

        quality = Quality.nameQuality(filename)
        
        return quality
예제 #24
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.FTDB_USERNAME, sickbeard.FTDB_PASSWORD)

        results = []
        searchUrl = self.url + '/?section=TORRENTS&' + searchString.replace(
            '!', '')
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)
        soup = BeautifulSoup(r)
        resultsTable = soup.find("div", {"class": "DataGrid"})
        if resultsTable:
            rows = resultsTable.findAll("ul")

            for row in rows:
                link = row.find("a", title=True)
                title = link['title']

                autogetURL = self.url + '/' + (row.find(
                    "li", {
                        "class": "torrents_name"
                    }).find('a')['href'][1:]).replace('#FTD_MENU', '&menu=4')
                r = self.opener.open(autogetURL, 'wb').read()
                soup = BeautifulSoup(r)
                downloadURL = soup.find("div", {
                    "class": "autoget"
                }).find('a')['href']

                quality = Quality.nameQuality(title)
                if quality == Quality.UNKNOWN and title:
                    if '720p' not in title.lower(
                    ) and '1080p' not in title.lower():
                        quality = Quality.SDTV
                if show and french == None:
                    results.append(
                        FTDBSearchResult(self.opener, link['title'],
                                         downloadURL, quality,
                                         str(show.audio_lang)))
                elif show and french:
                    results.append(
                        FTDBSearchResult(self.opener, link['title'],
                                         downloadURL, quality, 'fr'))
                else:
                    results.append(
                        FTDBSearchResult(self.opener, link['title'],
                                         downloadURL, quality))

        return results
예제 #25
0
	def _doSearch(self, searchString, show=None, season=None, french=None):

		
		if not self.login_done:
			self._doLogin( sickbeard.XTHOR_USERNAME, sickbeard.XTHOR_PASSWORD )

		results = []
	   
		searchUrl = self.url + '/browse.php?' + searchString.replace('!','')

		logger.log(u"Search string: " + searchUrl, logger.DEBUG)
		
		r = self.opener.open( searchUrl )

		soup = BeautifulSoup( r)

		resultsTable = soup.find("table", { "class" : "table2"  })
		if resultsTable:

			rows = resultsTable.findAll("tr")
		   
			for row in rows:
			
				link = row.find("a",href=re.compile("details.php"))                                                           
								  
				if link:               
					title = link.text
					recherched=searchUrl.split("&search=")[1]
					recherched=recherched.replace(" ","(.*)")
					recherched= recherched + "(.*)"
					logger.log(u"XTHOR TITLE : " + title, logger.DEBUG)
					logger.log(u"XTHOR CHECK MATCH : " + recherched, logger.DEBUG)                                        
					if re.match(recherched,title , re.IGNORECASE):                                        
						downloadURL =  row.find("a",href=re.compile("download.php"))['href']
						downloadURL = self.url + '/'+ downloadURL
						logger.log(u"XTHOR DOWNLOAD URL : " + downloadURL, logger.DEBUG) 
					else:
						continue
					quality = Quality.nameQuality( title )
					if quality==Quality.UNKNOWN and title:
						if '720p' not in title.lower() and '1080p' not in title.lower():
							quality=Quality.SDTV
					if show and french==None:
						results.append( XTHORSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
					elif show and french:
						results.append( XTHORSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
					else:
						results.append( XTHORSearchResult( self.opener, title, downloadURL, quality ) )
		
		return results
예제 #26
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        
        if not self.login_done:
            self._doLogin( sickbeard.ADDICT_USERNAME, sickbeard.ADDICT_PASSWORD )

        results = []
        
        searchUrl = self.url + '/index.php?page=torrents&active=1&options=0&' + searchString.replace('!','')
 
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )

        soupfull = BeautifulSoup( r)
        delbegin=str(soupfull.prettify).split('<table width="100%">')[1]
        restable=delbegin[delbegin.find('<table'):delbegin.find('table>')+6]
        soup=BeautifulSoup(restable)
        resultsTable = soup.find("table")

        if resultsTable:

            rows = resultsTable.findAll("tr")
            x=0
            for row in rows:
                x=x+1
                if (x > 1): 
                #bypass first row because title only
                    if 'Liste des torrents' in str(row) :
                        continue                           
                 
                    link = row.findAll('td')[1].find("a",  href=re.compile("torrent-details")) 
                    if link:               
                        title = link.text
                        logger.log(u"ADDICT TITLE TEMP: " + title, logger.DEBUG)                   
                        downloadURL =  self.url + "/" + row.find("a",href=re.compile("\.torrent"))['href']              
                
                        quality = Quality.nameQuality( title )
                        if quality==Quality.UNKNOWN and title:
                            if '720p' not in title.lower() and '1080p' not in title.lower():
                                quality=Quality.SDTV
                        if show and french==None:
                            results.append( ADDICTSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                        elif show and french:
                            results.append( ADDICTSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                        else:
                            results.append( ADDICTSearchResult( self.opener, title, downloadURL, quality ) )
        
        return results
예제 #27
0
파일: tpi.py 프로젝트: yannickcr/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.TPI_USERNAME, sickbeard.TPI_PASSWORD)

        results = []

        searchUrl = self.url + '/parcourir.php?' + searchString.replace(
            '!', '')

        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)

        soup = BeautifulSoup(r, "html.parser")

        resultsTable = soup.find("table", {"class": "ttable_headinner"})
        if resultsTable:

            rows = resultsTable.findAll("tr", {"class": "t-row"})

            for row in rows:

                link = row.find("a", title=True)
                title = link['title']

                downloadURL = row.find("a",
                                       href=re.compile("\.torrent"))['href']

                quality = Quality.nameQuality(title)
                if quality == Quality.UNKNOWN and title:
                    if '720p' not in title.lower(
                    ) and '1080p' not in title.lower():
                        quality = Quality.SDTV
                if show and french == None:
                    results.append(
                        TPISearchResult(self.opener, title, downloadURL,
                                        quality, str(show.audio_lang)))
                elif show and french:
                    results.append(
                        TPISearchResult(self.opener, title, downloadURL,
                                        quality, 'fr'))
                else:
                    results.append(
                        TPISearchResult(self.opener, title, downloadURL,
                                        quality))

        return results
예제 #28
0
    def getQuality(self, item):
        """
        Figures out the quality of the given RSS item node

        item: An elementtree.ElementTree element representing the <item> tag of the RSS feed

        Returns a Quality value obtained from the node's data

        """
        torrent_info = self._get_title_and_url(item)
        if torrent_info.__len__() == 2:
            (title, url) = torrent_info
        else:
            (title, url, seeders) = torrent_info
        quality = Quality.nameQuality(title)
        return quality
예제 #29
0
파일: xthor.py 프로젝트: kinooo/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.XTHOR_USERNAME, sickbeard.XTHOR_PASSWORD)

        results = []

        searchUrl = self.url + "?p=torrents&pid=10&search_type=name&" + searchString.replace("!", "")

        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)

        soup = BeautifulSoup(r, "html.parser")

        resultsTable = soup.find("table", {"id": "torrents_table_classic"})
        if resultsTable:

            rows = resultsTable.findAll("tr")

            for row in rows:

                link = row.find("a", href=re.compile("action=details"))

                if link:
                    title = link.text
                    recherched = searchUrl.split("&[PARAMSTR]=")[1]
                    recherched = recherched.replace(" ", "(.*)")
                    logger.log(u"XTHOR TITLE : " + title, logger.DEBUG)
                    logger.log(u"XTHOR CHECK MATCH : " + recherched, logger.DEBUG)
                    if re.match(recherched, title, re.IGNORECASE):
                        downloadURL = row.find("a", href=re.compile("action=download"))["href"]
                        logger.log(u"XTHOR DOWNLOAD URL : " + downloadURL, logger.DEBUG)
                        quality = Quality.nameQuality(title)
                        if quality == Quality.UNKNOWN and title:
                            if "720p" not in title.lower() and "1080p" not in title.lower():
                                quality = Quality.SDTV
                        if show and french == None:
                            results.append(
                                XTHORSearchResult(self.opener, title, downloadURL, quality, str(show.audio_lang))
                            )
                        elif show and french:
                            results.append(XTHORSearchResult(self.opener, title, downloadURL, quality, "fr"))
                        else:
                            results.append(XTHORSearchResult(self.opener, title, downloadURL, quality))

        return results
예제 #30
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        
        if not self.login_done:
            self._doLogin( sickbeard.THINKGEEK_USERNAME, sickbeard.THINKGEEK_PASSWORD )

        results = []
        
        searchUrl = self.url + '?p=torrents&pid=10&search_type=name&' + searchString.replace('!','')
 
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )

        soup = BeautifulSoup( r)

        resultsTable = soup.find("div", { "id" : "content"  })
        if resultsTable:

            rows = resultsTable.findAll("div" , {"class" : "torrent-box"} )
           
            for row in rows:
            
                link = row.find("a",href=re.compile("action=details"))                                                       
                                  
                if link:               
                    title = link.text
                    recherched=searchUrl.split("&[PARAMSTR]=")[1]
                    recherched=recherched.replace(".","(.*)").replace(" ","(.*)").replace("'","(.*)")
                    logger.log(u"THINKGEEK TITLE : " + title, logger.DEBUG)  
                    logger.log(u"THINKGEEK CHECK MATCH : " + recherched, logger.DEBUG) 
                    if re.match(recherched,title , re.IGNORECASE):                 
                        downloadURL =  row.find("a",href=re.compile("action=download"))['href']
                    else:
                        continue            
                    logger.log(u"THINKGEEK DOWNLOAD URL : " + title, logger.DEBUG) 
                    quality = Quality.nameQuality( title )
                    if quality==Quality.UNKNOWN and title:
                        if '720p' not in title.lower() and '1080p' not in title.lower():
                            quality=Quality.SDTV
                    if show and french==None:
                        results.append( THINKGEEKSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                    elif show and french:
                        results.append( THINKGEEKSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                    else:
                        results.append( THINKGEEKSearchResult( self.opener, title, downloadURL, quality ) )        
        return results
    def _find_season_quality(self,title,torrent_id):
        """ Return the modified title of a Season Torrent with the quality found inspecting torrent file list """

        mediaExtensions = ['avi', 'mkv', 'wmv', 'divx',
                           'vob', 'dvr-ms', 'wtv', 'ts'
                           'ogv', 'rar', 'zip'] 
        
        quality = Quality.UNKNOWN        
        
        fileName = None
        
        fileURL = self.proxy._buildURL(self.url+'ajax_details_filelist.php?id='+str(torrent_id))
      
        data = self.getURL(fileURL)
        
        if not data:
            return None
        
        filesList = re.findall('<td.+>(.*?)</td>',data) 
        
        if not filesList: 
            logger.log(u"Unable to get the torrent file list for "+title, logger.ERROR)
            
        for fileName in filter(lambda x: x.rpartition(".")[2].lower() in mediaExtensions, filesList):
            quality = Quality.nameQuality(os.path.basename(fileName))
            if quality != Quality.UNKNOWN: break

        if fileName!=None and quality == Quality.UNKNOWN:
            quality = Quality.assumeQuality(os.path.basename(fileName))            

        if quality == Quality.UNKNOWN:
            logger.log(u"No Season quality for "+title, logger.DEBUG)
            return None

        try:
            myParser = NameParser()
            parse_result = myParser.parse(fileName)
        except InvalidNameException:
            return None
        
        logger.log(u"Season quality for "+title+" is "+Quality.qualityStrings[quality], logger.DEBUG)
        
        if parse_result.series_name and parse_result.season_number: 
            title = parse_result.series_name+' S%02d' % int(parse_result.season_number)+' '+self._reverseQuality(quality)
        
        return title
예제 #32
0
파일: t411.py 프로젝트: xila76/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin(sickbeard.T411_USERNAME, sickbeard.T411_PASSWORD)

        results = []
        searchUrl = self.url + '/torrents/search/?' + searchString.replace(
            '!', '')
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open(searchUrl)
        soup = BeautifulSoup(r, "html.parser")
        resultsTable = soup.find("table", {"class": "results"})
        if resultsTable:
            rows = resultsTable.find("tbody").findAll("tr")

            for row in rows:
                link = row.find("a", title=True)
                title = link['title']
                id = row.find_all('td')[2].find_all(
                    'a')[0]['href'][1:].replace('torrents/nfo/?id=', '')
                downloadURL = ('http://www.t411.me/torrents/download/?id=%s' %
                               id)

                quality = Quality.nameQuality(title)
                if quality == Quality.UNKNOWN and title:
                    if '720p' not in title.lower(
                    ) and '1080p' not in title.lower():
                        quality = Quality.SDTV
                if show and french == None:
                    results.append(
                        T411SearchResult(self.opener, link['title'],
                                         downloadURL, quality,
                                         str(show.audio_lang)))
                elif show and french:
                    results.append(
                        T411SearchResult(self.opener, link['title'],
                                         downloadURL, quality, 'fr'))
                else:
                    results.append(
                        T411SearchResult(self.opener, link['title'],
                                         downloadURL, quality))

        return results
예제 #33
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        
        if not self.login_done:
            self._doLogin( sickbeard.LIBERTALIA_USERNAME, sickbeard.LIBERTALIA_PASSWORD )

        results = []
        
        searchUrl = self.url + '/torrents.php?' + searchString.replace('!','')
 
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )
        soup = BeautifulSoup( r)

        resultsTable = soup.find("table", { "class" : "torrent_table"  })
        if resultsTable:
            logger.log(u"LIBERTALIA found resulttable ! " , logger.DEBUG)  
            rows = resultsTable.findAll("tr" ,  {"class" : "torrent_row new"}  )  # torrent_row new
            
            for row in rows:
                           
                #bypass first row because title only  
                columns = row.find('td', {"class" : "torrent_name"} )                            
                logger.log(u"LIBERTALIA found rows ! " , logger.DEBUG) 
                link = columns.find("a",  href=re.compile("torrents")) 
                if link:               
                    title = link.text
                    logger.log(u"LIBERTALIA TITLE TEMP: " + title, logger.DEBUG)                   
                    #downloadURL =  self.url + "/" + row.find("a",href=re.compile("torrent_pass"))['href']              
                    downloadURL =  row.find("a",href=re.compile("torrent_pass"))['href']
                
                    quality = Quality.nameQuality( title )
                    if quality==Quality.UNKNOWN and title:
                        if '720p' not in title.lower() and '1080p' not in title.lower():
                            quality=Quality.SDTV
                    if show and french==None:
                        results.append( LIBERTALIASearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                    elif show and french:
                        results.append( LIBERTALIASearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                    else:
                        results.append( LIBERTALIASearchResult( self.opener, title, downloadURL, quality ) )
                    
        return results
예제 #34
0
    def _doSearch(self, searchString, show=None, season=None, french=None):
        """
            Use the given URL to search the episodes required
        """
        searchUrl = self.url + '?section=TORRENTS&exact=1&search=Rechercher&' + searchString
        logger.log(u"Search URL: " + searchUrl, logger.DEBUG)     
        
        #Check if we can do the search
        if not self._isDownloadAllowed():
            return []

        results = []
                
        r,soup = self._urlOpen(searchUrl)
        if not soup:
            return []

        resultsTable = soup.find("div", { "class" : "DataGrid" })

        if resultsTable:
            rows = resultsTable.findAll("ul")
    
            for row in rows:
                link = row.find("li", { "class" : "torrents_name" }).find("a", title=True)
                title = link['title']
                downloadlink = row.find("li", { "class" : "torrents_download" }).find("a")['href'][1:]
                downloadURL = (self.url + downloadlink)

                #print downloadURL
                logger.log(u"Download URL: " + downloadURL, logger.DEBUG)
                                
                quality = Quality.nameQuality( title )
                if quality==Quality.UNKNOWN and title:
                    if '720p' not in title.lower() and '1080p' not in title.lower():
                        quality=Quality.SDTV
                if show and french==None:
                    results.append( FrenchTorrentDBSearchResult( link['title'], downloadURL, quality, str(show.audio_lang) ) )
                elif show and french:
                    results.append( FrenchTorrentDBSearchResult( link['title'], downloadURL, quality, 'fr' ) )
                else:
                    results.append( FrenchTorrentDBSearchResult( link['title'], downloadURL, quality ) )
                
        return results
예제 #35
0
    def _doSearch(self, searchString, show=None, season=None, french=None):
        if not self.login_done:
            self._doLogin(sickbeard.SOTORRENT_USERNAME,
                          sickbeard.SOTORRENT_PASSWORD)

        results = []

        search_url = "{0}/sphinx.php?{1}".format(self.url,
                                                 searchString.replace('!', ''))
        req = self.opener.open(search_url)
        page = BeautifulSoup(req)

        torrent_table = page.find("table", {"id": "torrent_list"})
        if torrent_table:
            logger.log(u"So-torrent found shows ! ", logger.DEBUG)
            torrent_rows = torrent_table.findAll("tr", {"id": "infos_sphinx"})

            for row in torrent_rows:
                release = row.strong.string
                id_search = row.find("img", {"alt": "+"})
                id_torrent = id_search['id'].replace('expandoGif', '')
                download_url = "https://so-torrent.com/get.php?id={0}".format(
                    id_search['id'].replace('expandoGif', ''))
                id_quality = Quality.nameQuality(release)

                if show and french == None:
                    results.append(
                        SOTORRENTSearchResult(self.opener, release,
                                              download_url, id_quality,
                                              str(show.audio_lang)))
                elif show and french:
                    results.append(
                        SOTORRENTSearchResult(self.opener, release,
                                              download_url, id_quality, 'fr'))
                else:
                    results.append(
                        SOTORRENTSearchResult(self.opener, release,
                                              download_url, id_quality))

        else:
            logger.log(u"No table founded.", logger.DEBUG)
            self.login_done = False
        return results
예제 #36
0
    def getQuality(self, item):
        """
        Figures out the quality of the given RSS item node
        item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
        Returns a Quality value obtained from the node's data
        
        Overridden here because dtvt has its own quirky way of doing quality. 
        """
        (title, url) = self._get_title_and_url(item)  #@UnusedVariable
        if title:
            if title.endswith(' [HD]'):
                return Quality.SDTV
            elif title.endswith(' [720]'):
                return Quality.HDTV
            elif title.endswith(' [1080]'):
                return Quality.FULLHDTV  # best choice available I think

        quality = Quality.nameQuality(title)
        return quality
예제 #37
0
 def getQuality(self, item):
     """
     Figures out the quality of the given RSS item node
     item: An xml.dom.minidom.Node representing the <item> tag of the RSS feed
     Returns a Quality value obtained from the node's data
     
     Overridden here because dtvt has its own quirky way of doing quality. 
     """
     (title, url) = self._get_title_and_url(item) #@UnusedVariable
     if title:
         if title.endswith(' [HD]'):
             return Quality.SDTV
         elif title.endswith(' [720]'):
             return Quality.HDTV
         elif title.endswith(' [1080]'):
             return Quality.FULLHDTV # best choice available I think
         
     quality = Quality.nameQuality(title)
     return quality
예제 #38
0
    def _doSearch(self, searchString, show=None, season=None):
        results = []
        searchUrl = self.url + 'rdirect.php?type=search&' + searchString
        logger.log(u"Search URL: " + searchUrl, logger.DEBUG)

        data = self.getURL(searchUrl)
        if "bad key" in str(data).lower():
            logger.log(u"GKS key invalid, check your config", logger.ERROR)
            return []

        parsedXML = parseString(data)
        channel = parsedXML.getElementsByTagName('channel')[0]
        description = channel.getElementsByTagName('description')[0]
        description_text = helpers.get_xml_text(description).lower()

        if "user can't be found" in description_text:
            logger.log(u"GKS invalid digest, check your config", logger.ERROR)
            return []
        elif "invalid hash" in description_text:
            logger.log(u"GKS invalid hash, check your config", logger.ERROR)
            return []
        else:
            items = channel.getElementsByTagName('item')
            for item in items:
                title = helpers.get_xml_text(
                    item.getElementsByTagName('title')[0])
                if "aucun resultat" in title.lower():
                    logger.log(u"No results found in " + searchUrl,
                               logger.DEBUG)
                    return []
                else:
                    downloadURL = helpers.get_xml_text(
                        item.getElementsByTagName('link')[0])
                    quality = Quality.nameQuality(title)
                    if show:
                        results.append(
                            GksSearchResult(self.opener, title, downloadURL,
                                            quality, str(show.audio_lang)))
                    else:
                        results.append(
                            GksSearchResult(self.opener, title, downloadURL,
                                            quality))
        return results
예제 #39
0
파일: fnt.py 프로젝트: pedro2d10/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None, french=None):

        
        if not self.login_done:
            self._doLogin( sickbeard.FNT_USERNAME, sickbeard.FNT_PASSWORD )

        results = []
        
        searchUrl = self.url + '/torrents/recherche/?afficher=1&c118=1&c129=1&c119=1&c120=1&c121=1&c126=1&c137=1&c138=1&c146=1&c122=1&c110=1&c109=1&c135=1&c148=1&c153=1&c149=1&c150=1&c154=1&c155=1&c156=1&c114=1&visible=1&freeleech=0&nuke=0&3D=0&' + searchString.replace('!','')
 
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )

        soup = BeautifulSoup( r, "html.parser" )

        resultsTable = soup.find("table", { "id" : "tablealign3bis"  })
        if resultsTable:

            rows = resultsTable.findAll("tr" , {"class" : "ligntorrent"} )
           
            for row in rows:
            
                link = row.findAll('td')[1].find("a" , href=re.compile("fiche_film") )                                                         
                                  
                if link:               
                   title = link.text
                   logger.log(u"FNT TITLE : " + title, logger.DEBUG)                   
                   downloadURL = self.url + "/" + row.find("a",href=re.compile("download\.php"))['href']             
                
                   quality = Quality.nameQuality( title )
                   if quality==Quality.UNKNOWN and title:
                     if '720p' not in title.lower() and '1080p' not in title.lower():
                        quality=Quality.SDTV
                   if show and french==None:
                     results.append( FNTSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                   elif show and french:
                     results.append( FNTSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                   else:
                    results.append( FNTSearchResult( self.opener, title, downloadURL, quality ) )
        
        return results
예제 #40
0
    def _doSearch(self, searchString, show=None, season=None, french=None):

        
        if not self.login_done:
            self._doLogin( sickbeard.FNT_USERNAME, sickbeard.FNT_PASSWORD )

        results = []
        
        searchUrl = self.url + '/torrents/recherche/?afficher=1&c118=1&c129=1&c119=1&c120=1&c121=1&c126=1&c137=1&c138=1&c146=1&c122=1&c110=1&c109=1&c135=1&c148=1&c153=1&c149=1&c150=1&c154=1&c155=1&c156=1&c114=1&visible=1&freeleech=0&nuke=0&3D=0&' + searchString.replace('!','')
 
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )

        soup = BeautifulSoup( r, "html.parser" )

        resultsTable = soup.find("table", { "id" : "tablealign3bis"  })
        if resultsTable:

            rows = resultsTable.findAll("tr" , {"class" : "ligntorrent"} )
           
            for row in rows:
            
                link = row.findAll('td')[1].find("a" )                                                         
                                  
                if link:               
                   title = link.text
                   logger.log(u"FNT TITLE : " + title, logger.DEBUG)                   
                   downloadURL = self.url + "/" + row.find("a",href=re.compile("download\.php"))['href']             
                
                   quality = Quality.nameQuality( title )
                   if quality==Quality.UNKNOWN and title:
                     if '720p' not in title.lower() and '1080p' not in title.lower():
                        quality=Quality.SDTV
                   if show and french==None:
                     results.append( FNTSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                   elif show and french:
                     results.append( FNTSearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                   else:
                    results.append( FNTSearchResult( self.opener, title, downloadURL, quality ) )
        
        return results
예제 #41
0
파일: tpi.py 프로젝트: Arakmar/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None, french=None):

        
        if not self.login_done:
            self._doLogin( sickbeard.TPI_USERNAME, sickbeard.TPI_PASSWORD )

        results = []

        searchUrl = self.url + '/parcourir.php?' + searchString.replace('!','')
 
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )

        soup = BeautifulSoup( r, "html.parser" )

        resultsTable = soup.find("table", { "class" : "ttable_headinner" })
        if resultsTable:

            rows = resultsTable.findAll("tr", { "class" : "t-row" })
    
            for row in rows:

                link = row.find("a", title=True)
                title = link['title']

                downloadURL = row.find("a",href=re.compile("\.torrent"))['href']
                
                
                quality = Quality.nameQuality( title )
                if quality==Quality.UNKNOWN and title:
                    if '720p' not in title.lower() and '1080p' not in title.lower():
                        quality=Quality.SDTV
                if show and french==None:
                    results.append( TPISearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                elif show and french:
                    results.append( TPISearchResult( self.opener, title, downloadURL, quality, 'fr' ) )
                else:
                    results.append( TPISearchResult( self.opener, title, downloadURL, quality ) )
        
        return results
예제 #42
0
파일: t411.py 프로젝트: FunFR/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None):
        
        if not self.login_done:
            self._doLogin( sickbeard.T411_USERNAME, sickbeard.T411_PASSWORD )

        results = []
        searchUrl = self.url + '/torrents/search/?' + searchString
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)
        
        r = self.opener.open( searchUrl )
        soup = BeautifulSoup( r )
        resultsTable = soup.find("table", { "class" : "results" })
        if resultsTable:
            rows = resultsTable.find("tbody").findAll("tr")
    
            for row in rows:
                link = row.find("a", title=True)
                title = link['title']
                
                pageURL = link['href']
                if pageURL.startswith("//"):
                    pageURL = "http:" + pageURL
                
                torrentPage = self.opener.open( pageURL )
                torrentSoup = BeautifulSoup( torrentPage )
               
                downloadTorrentLink = torrentSoup.find("a", text=u"Télécharger")
                if downloadTorrentLink:
                    
                    downloadURL = self.url + downloadTorrentLink['href']
                    
                    quality = Quality.nameQuality( title )

                    if show:
                        results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality, str(show.audio_lang) ) )
                    else:
                        results.append( T411SearchResult( self.opener, link['title'], downloadURL, quality ) )

        return results
예제 #43
0
파일: gks.py 프로젝트: JMDGBE/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None):
        results = []
        searchUrl = self.url+'rdirect.php?type=search&'+searchString
        logger.log(u"Search URL: " + searchUrl, logger.DEBUG)
        
        data = self.getURL(searchUrl)
        if "bad key" in str(data).lower() :
            logger.log(u"GKS key invalid, check your config", logger.ERROR)
            return []

        parsedXML = parseString(data)
        channel = parsedXML.getElementsByTagName('channel')[0]
        description = channel.getElementsByTagName('description')[0]
        description_text = helpers.get_xml_text(description).lower()
        
        if "user can't be found" in description_text:
            logger.log(u"GKS invalid digest, check your config", logger.ERROR)
            return []
        elif "invalid hash" in description_text:
            logger.log(u"GKS invalid hash, check your config", logger.ERROR)
            return []
        else :
            items = channel.getElementsByTagName('item')
            for item in items:
                title = helpers.get_xml_text(item.getElementsByTagName('title')[0])
                if "aucun resultat" in title.lower() :
                    logger.log(u"No results found in " + searchUrl, logger.DEBUG)
                    return []
                else :
                    downloadURL = helpers.get_xml_text(item.getElementsByTagName('link')[0])
                    quality = Quality.nameQuality(title)
                    if show:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality, str(show.audio_lang) ) )
                    else:
                        results.append( GksSearchResult( self.opener, title, downloadURL, quality ) )
        return results
예제 #44
0
    def _parseItem(self, item):

        if item.findtext('title') != None and item.findtext(
                'title') == "You must be logged in to view this feed":
            raise exceptions.AuthException(
                "TVBinz authentication details are incorrect, check your config"
            )

        if item.findtext('title') == None or item.findtext('link') == None:
            logger.log(
                u"The XML returned from the TVBinz RSS feed is incomplete, this result is unusable: "
                + str(item), logger.ERROR)
            return

        title = item.findtext('title')
        url = item.findtext('link').replace('&amp;', '&')

        sInfo = item.find('{http://tvbinz.net/rss/tvb/}seriesInfo')
        if sInfo == None:
            logger.log(
                u"No series info, this is some kind of non-standard release, ignoring it",
                logger.DEBUG)
            return

        logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG)

        quality = Quality.nameQuality(title)

        if sInfo.findtext('{http://tvbinz.net/rss/tvb/}tvrID') == None:
            tvrid = 0
        else:
            tvrid = int(sInfo.findtext('{http://tvbinz.net/rss/tvb/}tvrID'))

        # since TVBinz normalizes the scene names it's more reliable to parse the episodes out myself
        # than to rely on it, because it doesn't support multi-episode numbers in the feed
        self._addCacheEntry(title, url, tvrage_id=tvrid, quality=quality)
예제 #45
0
파일: ftdb.py 프로젝트: lodoss5/Sick-Beard
    def _doSearch(self, searchString, show=None, season=None, french=None):

        if not self.login_done:
            self._doLogin( sickbeard.FTDB_USERNAME, sickbeard.FTDB_PASSWORD )

        results = []
        searchUrl = self.url + '/?section=TORRENTS&' + searchString.replace('!','')
        logger.log(u"Search string: " + searchUrl, logger.DEBUG)

        r = self.opener.open( searchUrl )
        soup = BeautifulSoup( r, "html.parser" )
        resultsTable = soup.find("div", { "class" : "DataGrid" })
        if resultsTable:
            rows = resultsTable.findAll("ul")

            for row in rows:
                link = row.find("a", title=True)
                title = link['title']

                autogetURL = self.url + (row.find("li", { "class" : "torrents_name"}).find('a')['href'][1:]).replace('#FTD_MENU','&menu=4')
                r = self.opener.open( autogetURL , 'wb').read()
                soup = BeautifulSoup( r, "html.parser" )
                downloadURL = soup.find("div", { "class" : "autoget"}).find('a')['href']

                quality = Quality.nameQuality( title )
                if quality==Quality.UNKNOWN and title:
                    if '720p' not in title.lower() and '1080p' not in title.lower():
                        quality=Quality.SDTV
                if show and french==None:
                    results.append( FTDBSearchResult( self.opener, link['title'], downloadURL, quality, str(show.audio_lang) ) )
                elif show and french:
                    results.append( FTDBSearchResult( self.opener, link['title'], downloadURL, quality, 'fr' ) )
                else:
                    results.append( FTDBSearchResult( self.opener, link['title'], downloadURL, quality ) )

        return results
예제 #46
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        self.categories = "cat=" + str(self.cat)

        for mode in search_params:
            items = []
            logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode == 'RSS':
                    self.page = 2

                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):
                    z = x * 20
                    if last_page:
                        break

                    if mode != 'RSS':
                        search_url = (self.urls['search_page'] + '&filter={2}').format(z, self.categories, search_string)
                    else:
                        search_url = self.urls['search_page'].format(z, self.categories)

                    if mode != 'RSS':
                        logger.log(u"Search string: {0}".format
                                   (search_string.decode("utf-8")), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u"No data returned from provider", logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            torrent_table = html.find('table', class_='copyright')
                            torrent_rows = torrent_table('tr') if torrent_table else []

                            # Continue only if one Release is found
                            if len(torrent_rows) < 3:
                                logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 42:
                                last_page = 1

                            for result in torrent_table('tr')[2:]:

                                try:
                                    link = result.find('td').find('a')
                                    title = link.string
                                    download_url = self.urls['download'] % result('td')[8].find('a')['href'][-8:]
                                    leechers = result('td')[3]('td')[1].text
                                    leechers = int(leechers.strip('[]'))
                                    seeders = result('td')[3]('td')[2].text
                                    seeders = int(seeders.strip('[]'))
                                    torrent_size = result('td')[3]('td')[3].text.strip('[]') + " GB"
                                    size = convert_size(torrent_size) or -1
                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(result) and not self.subtitle:
                                    logger.log(u"Torrent is subtitled, skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(result):
                                    logger.log(u"Torrent isnt english audio/subtitled , skipping: {0} ".format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)', search_string)[0]
                                show_title = search_show
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '', title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    if mode != 'RSS':
                                        logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
                                                   (title, seeders, leechers), logger.DEBUG)
                                    continue

                                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                                if mode != 'RSS':
                                    logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(u"Failed parsing provider. Traceback: {0}".format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

                results += items

        return results
예제 #47
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []

        for mode in search_params:
            items = []
            logger.log(u'Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                self.page = 1
                last_page = 0
                y = int(self.page)

                if search_string == '':
                    continue

                search_string = str(search_string).replace('.', ' ')

                for x in range(0, y):

                    if last_page:
                        break

                    search_url = self.urls['search_page'].format(search_string, x)

                    logger.log(u'Search string: {0}'.format(search_string.decode('utf-8')), logger.DEBUG)

                    data = self.get_url(search_url, returns='text')
                    if not data:
                        logger.log(u'No data returned from provider', logger.DEBUG)
                        continue

                    try:
                        with BS4Parser(data, 'html5lib') as html:
                            table_header = html.find('tr', class_='bordo')
                            torrent_table = table_header.find_parent('table') if table_header else None
                            if not torrent_table:
                                logger.log(u'Could not find table of torrents', logger.ERROR)
                                continue

                            torrent_rows = torrent_table('tr')

                            # Continue only if one Release is found
                            if (len(torrent_rows) < 6) or (len(torrent_rows[2]('td')) == 1):
                                logger.log(u'Data returned from provider does not contain any torrents', logger.DEBUG)
                                last_page = 1
                                continue

                            if len(torrent_rows) < 45:
                                last_page = 1

                            for result in torrent_rows[2:-3]:

                                try:
                                    link = result('td')[1].find('a')['href']
                                    title = re.sub(' +',' ', link.rsplit('/', 1)[-1].replace('_', ' '))
                                    hash = result('td')[3].find('input', class_='downarrow')['value'].upper()
                                    seeders = try_int(result('td')[5].text)
                                    leechers = try_int(result('td')[6].text)
                                    torrent_size = result('td')[2].string
                                    size = convert_size(torrent_size) or -1

                                    # Download Urls
                                    download_url = self.urls['download'] % hash
                                    if urllib.urlopen(download_url).getcode() == 404:
                                        logger.log(u'Torrent hash not found in itorrents.org, searching for magnet',
                                                   logger.DEBUG)
                                        data_detail = self.get_url(link, returns='text')
                                        with BS4Parser(data_detail, 'html5lib') as html_detail:
                                            sources_row = html_detail.find('td', class_='header2').parent
                                            source_magnet = sources_row('td')[1].find('a', class_='forbtn', title='Magnet')
                                            if source_magnet and not source_magnet == 'None':
                                                download_url = source_magnet['href']
                                            else:
                                                continue

                                except (AttributeError, TypeError):
                                    continue

                                filename_qt = self._reverseQuality(self._episodeQuality(result))
                                for text in self.hdtext:
                                    title1 = title
                                    title = title.replace(text, filename_qt)
                                    if title != title1:
                                        break

                                if Quality.nameQuality(title) == Quality.UNKNOWN:
                                    title += filename_qt

                                if not self._is_italian(title) and not self.subtitle:
                                    logger.log(u'Torrent is subtitled, skipping: {0} '.format(title), logger.DEBUG)
                                    continue

                                if self.engrelease and not self._is_english(title):
                                    logger.log(u'Torrent isnt english audio/subtitled , skipping: {0} '.format(title), logger.DEBUG)
                                    continue

                                search_show = re.split(r'([Ss][\d{1,2}]+)', search_string)[0]
                                show_title = search_show
                                ep_params = ''
                                rindex = re.search(r'([Ss][\d{1,2}]+)', title)
                                if rindex:
                                    show_title = title[:rindex.start()]
                                    ep_params = title[rindex.start():]
                                if show_title.lower() != search_show.lower() and search_show.lower() in show_title.lower():
                                    new_title = search_show + ep_params
                                    title = new_title

                                if not all([title, download_url]):
                                    continue

                                if self._is_season_pack(title):
                                    title = re.sub(r'([Ee][\d{1,2}\-?]+)', '', title)

                                # Filter unseeded torrent
                                if seeders < self.minseed or leechers < self.minleech:
                                    logger.log(u'Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
                                                   (title, seeders, leechers), logger.DEBUG)
                                    continue

                                item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
                                if mode != 'RSS':
                                    logger.log(u'Found result: {0} with {1} seeders and {2} leechers'.format(title, seeders, leechers), logger.DEBUG)

                                items.append(item)

                    except Exception:
                        logger.log(u'Failed parsing provider. Traceback: {0}'.format(traceback.format_exc()), logger.ERROR)

                # For each search mode sort all the items by seeders if available
                items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

                results += items

        return results
예제 #48
0
 def getQuality(self, item):        
     quality = Quality.nameQuality(item[0])
     return quality 
예제 #49
0
                u"Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name,
                logger.DEBUG)

            # set the indexerid in the db to the show's indexerid
            curProper.indexerid = parse_result.show.indexerid

            # set the indexer in the db to the show's indexer
            curProper.indexer = parse_result.show.indexer

            # populate our Proper instance
            curProper.show = parse_result.show
            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
            curProper.episode = parse_result.episode_numbers[0]
            curProper.release_group = parse_result.release_group
            curProper.version = parse_result.version
            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)

            # only get anime proper if it has release group and version
            if parse_result.is_anime:
                if not curProper.release_group and curProper.version == -1:
                    logger.log(u"Proper " + curProper.name + " doesn't have a release group and version, ignoring it",
                               logger.DEBUG)
                    continue

            if not show_name_helpers.filterBadReleases(curProper.name, parse=False):
                logger.log(u"Proper " + curProper.name + " isn't a valid scene release that we want, ignoring it",
                           logger.DEBUG)
                continue

            if parse_result.show.rls_ignore_words and search.filter_release_name(curProper.name,
                                                                                 parse_result.show.rls_ignore_words):
예제 #50
0
class TVCache():
    def __init__(self, provider):

        self.provider = provider
        self.providerID = self.provider.getID()
        self.minTime = 10

    def _getDB(self):

        return CacheDBConnection(self.providerID)

    def _clearCache(self):

        myDB = self._getDB()

        myDB.action("DELETE FROM " + self.providerID + " WHERE 1")

    def _getRSSData(self):

        data = None

        return data

    def _checkAuth(self, parsedXML):
        return True

    def _checkItemAuth(self, title, url):
        return True

    def updateCache(self):

        if not self.shouldUpdate():
            return

        if self._checkAuth(None):

            data = self._getRSSData()

            # as long as the http request worked we count this as an update
            if data:
                self.setLastUpdate()
            else:
                return []

            # now that we've loaded the current RSS feed lets delete the old cache
            logger.log(u"Clearing " + self.provider.name +
                       " cache and updating with new information")
            self._clearCache()

            parsedXML = helpers.parse_xml(data)

            if parsedXML is None:
                logger.log(
                    u"Error trying to load " + self.provider.name +
                    " RSS feed", logger.ERROR)
                return []

            if self._checkAuth(parsedXML):

                if parsedXML.tag == 'rss':
                    items = parsedXML.findall('.//item')

                else:
                    logger.log(
                        u"Resulting XML from " + self.provider.name +
                        " isn't RSS, not parsing it", logger.ERROR)
                    return []

                for item in items:
                    self._parseItem(item)

            else:
                raise AuthException(u"Your authentication credentials for " +
                                    self.provider.name +
                                    " are incorrect, check your config")

        return []

    def _translateTitle(self, title):
        return title.replace(' ', '.')

    def _translateLinkURL(self, url):
        return url.replace('&amp;', '&')

    def _parseItem(self, item):

        title = helpers.get_xml_text(item.find('title'))
        url = helpers.get_xml_text(item.find('link'))

        self._checkItemAuth(title, url)

        if title and url:
            title = self._translateTitle(title)
            url = self._translateLinkURL(url)

            logger.log(u"Adding item from RSS to cache: " + title,
                       logger.DEBUG)
            self._addCacheEntry(title, url)

        else:
            logger.log(
                u"The XML returned from the " + self.provider.name +
                " feed is incomplete, this result is unusable", logger.DEBUG)
            return

    def _getLastUpdate(self):
        myDB = self._getDB()
        sqlResults = myDB.select(
            "SELECT time FROM lastUpdate WHERE provider = ?",
            [self.providerID])

        if sqlResults:
            lastTime = int(sqlResults[0]["time"])
        else:
            lastTime = 0

        return datetime.datetime.fromtimestamp(lastTime)

    def setLastUpdate(self, toDate=None):

        if not toDate:
            toDate = datetime.datetime.today()

        myDB = self._getDB()
        myDB.upsert("lastUpdate",
                    {'time': int(time.mktime(toDate.timetuple()))},
                    {'provider': self.providerID})

    lastUpdate = property(_getLastUpdate)

    def shouldUpdate(self):
        # if we've updated recently then skip the update
        if datetime.datetime.today() - self.lastUpdate < datetime.timedelta(
                minutes=self.minTime):
            logger.log(
                u"Last update was too soon, using old cache: today()-" +
                str(self.lastUpdate) + "<" +
                str(datetime.timedelta(minutes=self.minTime)), logger.DEBUG)
            return False

        return True

    def _addCacheEntry(self,
                       name,
                       url,
                       season=None,
                       episodes=None,
                       tvdb_id=0,
                       tvrage_id=0,
                       quality=None,
                       extraNames=[]):

        myDB = self._getDB()

        parse_result = None

        # if we don't have complete info then parse the filename to get it
        for curName in [name] + extraNames:
            try:
                myParser = NameParser()
                parse_result = myParser.parse(curName)
            except InvalidNameException:
                logger.log(
                    u"Unable to parse the filename " + curName +
                    " into a valid episode", logger.DEBUG)
                continue

        if not parse_result:
            logger.log(
                u"Giving up because I'm unable to parse this name: " + name,
                logger.DEBUG)
            return False

        if not parse_result.series_name:
            logger.log(
                u"No series name retrieved from " + name +
                ", unable to cache it", logger.DEBUG)
            return False

        tvdb_lang = None

        # if we need tvdb_id or tvrage_id then search the DB for them
        if not tvdb_id or not tvrage_id:

            # if we have only the tvdb_id, use the database
            if tvdb_id:
                showObj = helpers.findCertainShow(sickbeard.showList, tvdb_id)
                if showObj:
                    tvrage_id = showObj.tvrid
                    tvdb_lang = showObj.lang
                else:
                    logger.log(
                        u"We were given a TVDB id " + str(tvdb_id) +
                        " but it doesn't match a show we have in our list, so leaving tvrage_id empty",
                        logger.DEBUG)
                    tvrage_id = 0

            # if we have only a tvrage_id then use the database
            elif tvrage_id:
                showObj = helpers.findCertainTVRageShow(
                    sickbeard.showList, tvrage_id)
                if showObj:
                    tvdb_id = showObj.tvdbid
                    tvdb_lang = showObj.lang
                else:
                    logger.log(
                        u"We were given a TVRage id " + str(tvrage_id) +
                        " but it doesn't match a show we have in our list, so leaving tvdb_id empty",
                        logger.DEBUG)
                    tvdb_id = 0

            # if they're both empty then fill out as much info as possible by searching the show name
            else:

                # check the name cache and see if we already know what show this is
                logger.log(
                    u"Checking the cache to see if we already know the tvdb id of "
                    + parse_result.series_name, logger.DEBUG)
                tvdb_id = name_cache.retrieveNameFromCache(
                    parse_result.series_name)

                # remember if the cache lookup worked or not so we know whether we should bother updating it later
                if tvdb_id == None:
                    logger.log(
                        u"No cache results returned, continuing on with the search",
                        logger.DEBUG)
                    from_cache = False
                else:
                    logger.log(
                        u"Cache lookup found " + repr(tvdb_id) +
                        ", using that", logger.DEBUG)
                    from_cache = True

                # if the cache failed, try looking up the show name in the database
                if tvdb_id == None:
                    logger.log(
                        u"Trying to look the show up in the show database",
                        logger.DEBUG)
                    showResult = helpers.searchDBForShow(
                        parse_result.series_name)
                    if showResult:
                        logger.log(
                            parse_result.series_name +
                            " was found to be show " + showResult[1] + " (" +
                            str(showResult[0]) + ") in our DB.", logger.DEBUG)
                        tvdb_id = showResult[0]

                # if the DB lookup fails then do a comprehensive regex search
                if tvdb_id == None:
                    logger.log(
                        u"Couldn't figure out a show name straight from the DB, trying a regex search instead",
                        logger.DEBUG)
                    for curShow in sickbeard.showList:
                        if show_name_helpers.isGoodResult(
                                name, curShow, False):
                            logger.log(
                                u"Successfully matched " + name + " to " +
                                curShow.name + " with regex", logger.DEBUG)
                            tvdb_id = curShow.tvdbid
                            tvdb_lang = curShow.lang
                            break

                # if tvdb_id was anything but None (0 or a number) then
                if not from_cache:
                    name_cache.addNameToCache(parse_result.series_name,
                                              tvdb_id)

                # if we came out with tvdb_id = None it means we couldn't figure it out at all, just use 0 for that
                if tvdb_id == None:
                    tvdb_id = 0

                # if we found the show then retrieve the show object
                if tvdb_id:
                    showObj = helpers.findCertainShow(sickbeard.showList,
                                                      tvdb_id)
                    if showObj:
                        tvrage_id = showObj.tvrid
                        tvdb_lang = showObj.lang

        # if we weren't provided with season/episode information then get it from the name that we parsed
        if not season:
            season = parse_result.season_number if parse_result.season_number != None else 1
        if not episodes:
            episodes = parse_result.episode_numbers

        # if we have an air-by-date show then get the real season/episode numbers
        if parse_result.air_by_date and tvdb_id:
            try:
                # There's gotta be a better way of doing this but we don't wanna
                # change the language value elsewhere
                ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()

                if not (tvdb_lang == "" or tvdb_lang == "en"
                        or tvdb_lang == None):
                    ltvdb_api_parms['language'] = tvdb_lang

                t = tvdb_api.Tvdb(**ltvdb_api_parms)
                epObj = t[tvdb_id].airedOn(parse_result.air_date)[0]
                season = int(epObj["seasonnumber"])
                episodes = [int(epObj["episodenumber"])]
            except tvdb_exceptions.tvdb_episodenotfound:
                logger.log(
                    u"Unable to find episode with date " +
                    str(parse_result.air_date) + " for show " +
                    parse_result.series_name + ", skipping", logger.WARNING)
                return False
            except tvdb_exceptions.tvdb_error, e:
                logger.log(u"Unable to contact TVDB: " + ex(e), logger.WARNING)
                return False

        episodeText = "|" + "|".join(map(str, episodes)) + "|"

        # get the current timestamp
        curTimestamp = int(time.mktime(datetime.datetime.today().timetuple()))

        if not quality:
            quality = Quality.nameQuality(name)

        myDB.action(
            "INSERT INTO " + self.providerID +
            " (name, season, episodes, tvrid, tvdbid, url, time, quality) VALUES (?,?,?,?,?,?,?,?)",
            [
                name, season, episodeText, tvrage_id, tvdb_id, url,
                curTimestamp, quality
            ])
예제 #51
0
파일: speed.py 프로젝트: fakegit/Sick-Beard
 def getQuality(self, item):
     quality = Quality.nameQuality(item[0])
     return quality
예제 #52
0
    def getQuality(self, item):

        filename = item.filename
        quality = Quality.nameQuality(filename)

        return quality
예제 #53
0
class BinNewzProvider(generic.NZBProvider):
    allowedGroups = {
        'abmulti': 'alt.binaries.multimedia',
        'abtvseries': 'alt.binaries.tvseries',
        'abtv': 'alt.binaries.tv',
        'a.b.teevee': 'alt.binaries.teevee',
        'abstvdivxf': 'alt.binaries.series.tv.divx.french',
        'abhdtvx264fr': 'alt.binaries.hdtv.x264.french',
        'abmom': 'alt.binaries.mom',
        'abhdtv': 'alt.binaries.hdtv',
        'abboneless': 'alt.binaries.boneless',
        'abhdtvf': 'alt.binaries.hdtv.french',
        'abhdtvx264': 'alt.binaries.hdtv.x264',
        'absuperman': 'alt.binaries.superman',
        'abechangeweb': 'alt.binaries.echange-web',
        'abmdfvost': 'alt.binaries.movies.divx.french.vost',
        'abdvdr': 'alt.binaries.dvdr',
        'abmzeromov': 'alt.binaries.movies.zeromovies',
        'abcfaf': 'alt.binaries.cartoons.french.animes-fansub',
        'abcfrench': 'alt.binaries.cartoons.french',
        'abgougouland': 'alt.binaries.gougouland',
        'abroger': 'alt.binaries.roger',
        'abtatu': 'alt.binaries.tatu',
        'abstvf': 'alt.binaries.series.tv.french',
        'abmdfreposts': 'alt.binaries.movies.divx.french.reposts',
        'abmdf': 'alt.binaries.movies.french',
        'ab.aa': 'alt.binaries.aa',
        'abspectdf': 'alt.binaries.spectacles.divx.french'
    }

    qualityCategories = {3: ['24', '7', '56'], 500: ['44', '53']}

    qualityMinSize = {
        (Quality.SDTV, Quality.SDDVD):
        130,
        Quality.HDTV:
        500,
        (Quality.HDWEBDL, Quality.HDBLURAY, Quality.FULLHDBLURAY, Quality.FULLHDTV, Quality.FULLHDWEBDL):
        600
    }

    url = "http://www.binnews.in/"
    supportsBacklog = True
    nzbDownloaders = [BinSearch(), NZBIndex(), NZBClub()]

    def __init__(self):
        generic.NZBProvider.__init__(self, "BinnewZ")

    def isEnabled(self):
        return sickbeard.BINNEWZ

    def _get_season_search_strings(self, show, season, episode=None):
        showNam = show_name_helpers.allPossibleShowNames(show)
        showNames = list(set(showNam))
        result = []
        global globepid
        global searchstringlist
        searchstringlist = []
        globepid = show.tvdbid
        for showName in showNames:
            result.append(showName + ".saison %2d" % season)
        return result

    def _get_episode_search_strings(self, ep_obj, french=None):
        strings = []
        showNam = show_name_helpers.allPossibleShowNames(ep_obj.show)
        showNames = list(set(showNam))
        global globepid
        global searchstringlist
        searchstringlist = []
        myDB = db.DBConnection()
        epidr = myDB.select(
            "SELECT episode_id from tv_episodes where tvdbid=?",
            [ep_obj.tvdbid])
        globepid = epidr[0][0]
        for showName in showNames:
            strings.append("%s S%02dE%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02dE%d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%dE%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s %dx%d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02d E%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02d E%d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%d E%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02dEp%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02dEp%d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%dEp%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02d Ep%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02d Ep%d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%d Ep%02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02d Ep %02d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%02d Ep %d" %
                           (showName, ep_obj.season, ep_obj.episode))
            strings.append("%s S%d Ep %02d" %
                           (showName, ep_obj.season, ep_obj.episode))
        return strings

    def _get_title_and_url(self, item):
        cleanTitle = re.sub(r'(\s*\[[\w\s]+\-\w+\])', "", item.title)
        return cleanTitle, item.refererURL

    def getQuality(self, item):
        return item.quality

    def buildUrl(self, searchString, quality):
        if quality in self.qualityCategories:
            data = {
                'chkInit': '1',
                'edTitre': searchString,
                'chkTitre': 'on',
                'chkFichier': 'on',
                'chkCat': 'on',
                'cats[]': self.qualityCategories[quality],
                'edAge': '',
                'edYear': ''
            }
        else:
            data = {
                'b_submit': 'BinnewZ',
                'cats[]': 'all',
                'edSearchAll': searchString,
                'sections[]': 'all'
            }
        return data

    #wtf with the signature change...
    def _doSearch(self,
                  searchString=None,
                  show=None,
                  season=None,
                  french=None):
        if searchString is None:
            return []
        logger.log("BinNewz : Searching for " + searchString, logger.DEBUG)
        data = self.buildUrl(searchString, show.quality)
        try:
            soup = BeautifulSoup(
                urllib2.urlopen("http://www.binnews.in/_bin/search2.php",
                                urllib.urlencode(data, True)))
        except Exception, e:
            logger.log(u"Error trying to load BinNewz response: " + e,
                       logger.ERROR)
            return []

        results = []
        tables = soup.findAll("table", id="tabliste")
        for table in tables:
            if len(results) > 5:
                break
            rows = table.findAll("tr")
            for row in rows:

                cells = row.select("> td")
                if len(cells) < 11:
                    continue

                name = cells[2].text.strip()
                language = cells[3].find("img").get("src")

                if show:
                    if show.audio_lang == "fr" or french:
                        if not "_fr" in language:
                            continue
                    elif show.audio_lang == "en":
                        if "_fr" in language:
                            continue

                # blacklist_groups = [ "alt.binaries.multimedia" ]
                blacklist_groups = []

                newgroupLink = cells[4].find("a")
                newsgroup = None
                if newgroupLink.contents:
                    newsgroup = newgroupLink.contents[0]
                    if newsgroup in self.allowedGroups:
                        newsgroup = self.allowedGroups[newsgroup]
                    else:
                        logger.log(u"Unknown binnewz newsgroup: " + newsgroup,
                                   logger.ERROR)
                        continue
                    if newsgroup in blacklist_groups:
                        logger.log(
                            u"Ignoring result, newsgroup is blacklisted: " +
                            newsgroup, logger.WARNING)
                        continue

                filename = cells[5].contents[0]

                acceptedQualities = Quality.splitQuality(show.quality)[0]
                quality = Quality.nameQuality(filename)
                if quality == Quality.UNKNOWN:
                    quality = self.getReleaseQuality(name)
                if quality not in acceptedQualities:
                    continue
                if filename in searchstringlist:
                    continue

                minSize = self.qualityMinSize[
                    quality] if quality in self.qualityMinSize else 100
                searchItems = []
                #multiEpisodes = False

                rangeMatcher = re.search(
                    "(?i).*(?<![\s\.\-_])[\s\.\-_]+s?(?:aison)?[\s\.\-_]*\d{1,2}[\s\.\-_]?(?:x|dvd|[eéEÉ](?:p|pisodes?)?)[\s\.\-_]*(\d{1,2})(?:(?:[\s\.\-_]*(?:[aàAÀ,/\-\.\s\&_]|et|and|to|x)[\s\.\-_]*(?:x|dvd|[eéEÉ](?:p|pisodes?)?)?[\s\.\-_]*([0-9]{1,2})))+.*",
                    name)
                if rangeMatcher:
                    rangeStart = int(rangeMatcher.group(1))
                    rangeEnd = int(rangeMatcher.group(2))
                    if filename.find("*") != -1:
                        for i in range(rangeStart, rangeEnd + 1):
                            searchItem = filename.replace("**", str(i))
                            searchItem = searchItem.replace("*", str(i))
                            searchItems.append(searchItem)
                    #else:
                    #    multiEpisodes = True

                if len(searchItems) == 0:
                    searchItems.append(filename)

                for searchItem in searchItems:
                    for downloader in self.nzbDownloaders:
                        searchstringlist.append(searchItem)
                        logger.log("Searching for download : " + name +
                                   ", search string = " + searchItem + " on " +
                                   downloader.__class__.__name__)
                        try:
                            binsearch_result = downloader.search(
                                searchItem, minSize, newsgroup)
                            if binsearch_result:
                                links = []
                                if french:
                                    binsearch_result.audio_langs = 'fr'
                                else:
                                    binsearch_result.audio_langs = show.audio_lang
                                binsearch_result.title = searchString
                                binsearch_result.quality = quality
                                myDB = db.DBConnection()
                                listlink = myDB.select(
                                    "SELECT link from episode_links where episode_id =?",
                                    [globepid])
                                for dlink in listlink:
                                    links.append(dlink[0])
                                if binsearch_result.nzburl in links:
                                    continue
                                else:
                                    results.append(binsearch_result)
                                    logger.log("Found : " + searchItem +
                                               " on " +
                                               downloader.__class__.__name__)
                                    break
                        except Exception, e:
                            logger.log(
                                "Searching from " +
                                downloader.__class__.__name__ + " failed : " +
                                str(e), logger.ERROR)
예제 #54
0
            logger.log(
                u"Successful match! Result " + parse_result.original_name +
                " matched to show " + parse_result.show.name, logger.DEBUG)

            # set the indexerid in the db to the show's indexerid
            curProper.indexerid = parse_result.show.indexerid

            # set the indexer in the db to the show's indexer
            curProper.indexer = parse_result.show.indexer

            # populate our Proper instance
            curProper.season = parse_result.season_number if parse_result.season_number != None else 1
            curProper.episode = parse_result.episode_numbers[0]
            curProper.release_group = parse_result.release_group
            curProper.version = parse_result.version
            curProper.quality = Quality.nameQuality(curProper.name,
                                                    parse_result.is_anime)

            # only get anime proper if it has release group and version
            if parse_result.is_anime:
                if not curProper.release_group and curProper.version == -1:
                    logger.log(
                        u"Proper " + curProper.name +
                        " doesn't have a release group and version, ignoring it",
                        logger.DEBUG)
                    continue

            if not show_name_helpers.filterBadReleases(curProper.name):
                logger.log(
                    u"Proper " + curProper.name +
                    " isn't a valid scene release that we want, ignoring it",
                    logger.DEBUG)
class ProperFinder():
    def __init__(self):
        self.updateInterval = datetime.timedelta(hours=1)

    def run(self):

        if not sickbeard.DOWNLOAD_PROPERS:
            return

        # look for propers every night at 1 AM
        updateTime = datetime.time(hour=1)

        logger.log(u"Checking proper time", logger.DEBUG)

        hourDiff = datetime.datetime.today().time().hour - updateTime.hour

        # if it's less than an interval after the update time then do an update
        if hourDiff >= 0 and hourDiff < self.updateInterval.seconds / 3600:
            logger.log(u"Beginning the search for new propers")
        else:
            return

        propers = self._getProperList()

        self._downloadPropers(propers)

    def _getProperList(self):

        propers = {}

        # for each provider get a list of the propers
        for curProvider in providers.sortedProviderList():

            if not curProvider.isActive():
                continue

            search_date = datetime.datetime.today() - datetime.timedelta(
                days=2)

            logger.log(u"Searching for any new PROPER releases from " +
                       curProvider.name)
            try:
                curPropers = curProvider.findPropers(search_date)
            except exceptions.AuthException, e:
                logger.log(u"Authentication error: " + ex(e), logger.ERROR)
                continue

            # if they haven't been added by a different provider than add the proper to the list
            for x in curPropers:
                name = self._genericName(x.name)

                if not name in propers:
                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
                    x.provider = curProvider
                    propers[name] = x

        # take the list of unique propers and get it sorted by
        sortedPropers = sorted(propers.values(),
                               key=operator.attrgetter('date'),
                               reverse=True)
        finalPropers = []

        for curProper in sortedPropers:

            # parse the file name
            try:
                myParser = NameParser(False)
                parse_result = myParser.parse(curProper.name)
            except InvalidNameException:
                logger.log(
                    u"Unable to parse the filename " + curProper.name +
                    " into a valid episode", logger.DEBUG)
                continue

            if not parse_result.episode_numbers:
                logger.log(
                    u"Ignoring " + curProper.name +
                    " because it's for a full season rather than specific episode",
                    logger.DEBUG)
                continue

            # populate our Proper instance
            if parse_result.air_by_date:
                curProper.season = -1
                curProper.episode = parse_result.air_date
            else:
                curProper.season = parse_result.season_number if parse_result.season_number != None else 1
                curProper.episode = parse_result.episode_numbers[0]
            curProper.quality = Quality.nameQuality(curProper.name)

            # for each show in our list
            for curShow in sickbeard.showList:

                if not parse_result.series_name:
                    continue

                genericName = self._genericName(parse_result.series_name)

                # get the scene name masks
                sceneNames = set(
                    show_name_helpers.makeSceneShowSearchStrings(curShow))

                # for each scene name mask
                for curSceneName in sceneNames:

                    # if it matches
                    if genericName == self._genericName(curSceneName):
                        logger.log(
                            u"Successful match! Result " +
                            parse_result.series_name + " matched to show " +
                            curShow.name, logger.DEBUG)

                        # set the tvdbid in the db to the show's tvdbid
                        curProper.tvdbid = curShow.tvdbid

                        # since we found it, break out
                        break

                # if we found something in the inner for loop break out of this one
                if curProper.tvdbid != -1:
                    break

            if curProper.tvdbid == -1:
                continue

            if not show_name_helpers.filterBadReleases(curProper.name):
                logger.log(
                    u"Proper " + curProper.name +
                    " isn't a valid scene release that we want, igoring it",
                    logger.DEBUG)
                continue

            # if we have an air-by-date show then get the real season/episode numbers
            if curProper.season == -1 and curProper.tvdbid:
                showObj = helpers.findCertainShow(sickbeard.showList,
                                                  curProper.tvdbid)
                if not showObj:
                    logger.log(
                        u"This should never have happened, post a bug about this!",
                        logger.ERROR)
                    raise Exception("BAD STUFF HAPPENED")

                tvdb_lang = showObj.lang
                # There's gotta be a better way of doing this but we don't wanna
                # change the language value elsewhere
                ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()

                if tvdb_lang and not tvdb_lang == 'en':
                    ltvdb_api_parms['language'] = tvdb_lang

                try:
                    t = tvdb_api.Tvdb(**ltvdb_api_parms)
                    epObj = t[curProper.tvdbid].airedOn(curProper.episode)[0]
                    curProper.season = int(epObj["seasonnumber"])
                    curProper.episodes = [int(epObj["episodenumber"])]
                except tvdb_exceptions.tvdb_episodenotfound:
                    logger.log(
                        u"Unable to find episode with date " +
                        str(curProper.episode) + " for show " +
                        parse_result.series_name + ", skipping",
                        logger.WARNING)
                    continue

            # check if we actually want this proper (if it's the right quality)
            sqlResults = db.DBConnection().select(
                "SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
                [curProper.tvdbid, curProper.season, curProper.episode])
            if not sqlResults:
                continue
            oldStatus, oldQuality = Quality.splitCompositeStatus(
                int(sqlResults[0]["status"]))

            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
            if oldStatus not in (DOWNLOADED,
                                 SNATCHED) or oldQuality != curProper.quality:
                continue

            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
            if curProper.tvdbid != -1 and (curProper.tvdbid, curProper.season,
                                           curProper.episode) not in map(
                                               operator.attrgetter(
                                                   'tvdbid', 'season',
                                                   'episode'), finalPropers):
                logger.log(u"Found a proper that we need: " +
                           str(curProper.name))
                finalPropers.append(curProper)

        return finalPropers
예제 #56
0
    def _getProperList(self):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        """
        Walk providers for propers
        """
        propers = {}

        search_date = datetime.datetime.today() - datetime.timedelta(days=2)

        # for each provider get a list of the
        origThreadName = threading.currentThread().name
        providers = [x for x in sickbeard.providers.sortedProviderList(sickbeard.RANDOMIZE_PROVIDERS) if x.is_active()]
        for curProvider in providers:
            threading.currentThread().name = origThreadName + " :: [" + curProvider.name + "]"

            logger.log(u"Searching for any new PROPER releases from " + curProvider.name)

            try:
                curPropers = curProvider.find_propers(search_date)
            except AuthException as e:
                logger.log(u"Authentication error: " + ex(e), logger.DEBUG)
                continue
            except Exception as e:
                logger.log(u"Exception while searching propers in " + curProvider.name + ", skipping: " + ex(e), logger.ERROR)
                logger.log(traceback.format_exc(), logger.DEBUG)
                continue

            # if they haven't been added by a different provider than add the proper to the list
            for x in curPropers:
                if not re.search(r'\b(proper|repack|real)\b', x.name, re.I):
                    logger.log(u'find_propers returned a non-proper, we have caught and skipped it.', logger.DEBUG)
                    continue

                name = self._genericName(x.name)
                if name not in propers:
                    logger.log(u"Found new proper: " + x.name, logger.DEBUG)
                    x.provider = curProvider
                    propers[name] = x

            threading.currentThread().name = origThreadName

        # take the list of unique propers and get it sorted by
        sortedPropers = sorted(propers.values(), key=operator.attrgetter('date'), reverse=True)
        finalPropers = []

        for curProper in sortedPropers:

            try:
                parse_result = NameParser(False).parse(curProper.name)
            except (InvalidNameException, InvalidShowException) as error:
                logger.log(u"{0}".format(error), logger.DEBUG)
                continue

            if not parse_result.series_name:
                continue

            if not parse_result.episode_numbers:
                logger.log(
                    u"Ignoring " + curProper.name + " because it's for a full season rather than specific episode",
                    logger.DEBUG)
                continue

            logger.log(
                u"Successful match! Result " + parse_result.original_name + " matched to show " + parse_result.show.name,
                logger.DEBUG)

            # set the indexerid in the db to the show's indexerid
            curProper.indexerid = parse_result.show.indexerid

            # set the indexer in the db to the show's indexer
            curProper.indexer = parse_result.show.indexer

            # populate our Proper instance
            curProper.show = parse_result.show
            curProper.season = parse_result.season_number if parse_result.season_number is not None else 1
            curProper.episode = parse_result.episode_numbers[0]
            curProper.release_group = parse_result.release_group
            curProper.version = parse_result.version
            curProper.quality = Quality.nameQuality(curProper.name, parse_result.is_anime)
            curProper.content = None

            # filter release
            bestResult = pickBestResult(curProper, parse_result.show)
            if not bestResult:
                logger.log(u"Proper " + curProper.name + " were rejected by our release filters.", logger.DEBUG)
                continue

            # only get anime proper if it has release group and version
            if bestResult.show.is_anime:
                if not bestResult.release_group and bestResult.version == -1:
                    logger.log(u"Proper " + bestResult.name + " doesn't have a release group and version, ignoring it",
                               logger.DEBUG)
                    continue

            # check if we actually want this proper (if it's the right quality)
            main_db_con = db.DBConnection()
            sql_results = main_db_con.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
                                             [bestResult.indexerid, bestResult.season, bestResult.episode])
            if not sql_results:
                continue

            # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
            oldStatus, oldQuality = Quality.splitCompositeStatus(int(sql_results[0]["status"]))
            if oldStatus not in (DOWNLOADED, SNATCHED) or oldQuality != bestResult.quality:
                continue

            # check if we actually want this proper (if it's the right release group and a higher version)
            if bestResult.show.is_anime:
                main_db_con = db.DBConnection()
                sql_results = main_db_con.select(
                    "SELECT release_group, version FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
                    [bestResult.indexerid, bestResult.season, bestResult.episode])

                oldVersion = int(sql_results[0]["version"])
                oldRelease_group = (sql_results[0]["release_group"])

                if -1 < oldVersion < bestResult.version:
                    logger.log(u"Found new anime v" + str(bestResult.version) + " to replace existing v" + str(oldVersion))
                else:
                    continue

                if oldRelease_group != bestResult.release_group:
                    logger.log(u"Skipping proper from release group: " + bestResult.release_group + ", does not match existing release group: " + oldRelease_group)
                    continue

            # if the show is in our list and there hasn't been a proper already added for that particular episode then add it to our list of propers
            if bestResult.indexerid != -1 and (bestResult.indexerid, bestResult.season, bestResult.episode) not in {(p.indexerid, p.season, p.episode) for p in finalPropers}:
                logger.log(u"Found a proper that we need: " + str(bestResult.name))
                finalPropers.append(bestResult)

        return finalPropers
예제 #57
0
def _get_proper_list(aired_since_shows,
                     recent_shows,
                     recent_anime,
                     proper_list=None):
    propers = {}

    # for each provider get a list of the
    orig_thread_name = threading.currentThread().name
    providers = [
        x for x in sickbeard.providers.sortedProviderList() if x.is_active()
    ]
    for cur_provider in providers:
        if not recent_anime and cur_provider.anime_only:
            continue

        if None is not proper_list:
            found_propers = proper_list.get(cur_provider.get_id(), [])
            if not found_propers:
                continue
        else:
            threading.currentThread(
            ).name = orig_thread_name + ' :: [' + cur_provider.name + ']'

            logger.log(u'Searching for new PROPER releases')

            try:
                found_propers = cur_provider.find_propers(
                    search_date=aired_since_shows,
                    shows=recent_shows,
                    anime=recent_anime)
            except exceptions.AuthException as e:
                logger.log(u'Authentication error: ' + ex(e), logger.ERROR)
                continue
            except Exception as e:
                logger.log(
                    u'Error while searching ' + cur_provider.name +
                    ', skipping: ' + ex(e), logger.ERROR)
                logger.log(traceback.format_exc(), logger.ERROR)
                continue
            finally:
                threading.currentThread().name = orig_thread_name

        # if they haven't been added by a different provider than add the proper to the list
        count = 0
        for x in found_propers:
            name = _generic_name(x.name)
            if name not in propers:
                try:
                    np = NameParser(False,
                                    try_scene_exceptions=True,
                                    showObj=x.parsed_show,
                                    indexer_lookup=False)
                    parse_result = np.parse(x.name)
                    if parse_result.series_name and parse_result.episode_numbers and \
                            (parse_result.show.indexer, parse_result.show.indexerid) in recent_shows + recent_anime:
                        cur_size = getattr(x, 'size', None)
                        if failed_history.has_failed(x.name, cur_size,
                                                     cur_provider.name):
                            continue
                        logger.log(u'Found new proper: ' + x.name,
                                   logger.DEBUG)
                        x.show = parse_result.show.indexerid
                        x.provider = cur_provider
                        x.is_repack, x.properlevel = Quality.get_proper_level(
                            parse_result.extra_info_no_name(),
                            parse_result.version,
                            parse_result.is_anime,
                            check_is_repack=True)
                        x.is_internal = parse_result.extra_info_no_name() and \
                            re.search(r'\binternal\b', parse_result.extra_info_no_name(), flags=re.I)
                        x.codec = _get_codec(parse_result.extra_info_no_name())
                        propers[name] = x
                        count += 1
                except (InvalidNameException, InvalidShowException):
                    continue
                except (StandardError, Exception):
                    continue

        cur_provider.log_result('Propers', count, '%s' % cur_provider.name)

    # take the list of unique propers and get it sorted by
    sorted_propers = sorted(propers.values(),
                            key=operator.attrgetter('properlevel', 'date'),
                            reverse=True)
    verified_propers = set()

    for cur_proper in sorted_propers:

        np = NameParser(False,
                        try_scene_exceptions=True,
                        showObj=cur_proper.parsed_show,
                        indexer_lookup=False)
        try:
            parse_result = np.parse(cur_proper.name)
        except (StandardError, Exception):
            continue

        # set the indexerid in the db to the show's indexerid
        cur_proper.indexerid = parse_result.show.indexerid

        # set the indexer in the db to the show's indexer
        cur_proper.indexer = parse_result.show.indexer

        # populate our Proper instance
        cur_proper.season = parse_result.season_number if None is not parse_result.season_number else 1
        cur_proper.episode = parse_result.episode_numbers[0]
        cur_proper.release_group = parse_result.release_group
        cur_proper.version = parse_result.version
        cur_proper.extra_info = parse_result.extra_info
        cur_proper.extra_info_no_name = parse_result.extra_info_no_name
        cur_proper.quality = Quality.nameQuality(cur_proper.name,
                                                 parse_result.is_anime)
        cur_proper.is_anime = parse_result.is_anime

        # only get anime proper if it has release group and version
        if parse_result.is_anime:
            if not cur_proper.release_group and -1 == cur_proper.version:
                logger.log(
                    u'Proper %s doesn\'t have a release group and version, ignoring it'
                    % cur_proper.name, logger.DEBUG)
                continue

        if not show_name_helpers.pass_wordlist_checks(
                cur_proper.name, parse=False, indexer_lookup=False):
            logger.log(
                u'Proper %s isn\'t a valid scene release that we want, ignoring it'
                % cur_proper.name, logger.DEBUG)
            continue

        re_extras = dict(re_prefix='.*', re_suffix='.*')
        result = show_name_helpers.contains_any(
            cur_proper.name, parse_result.show.rls_ignore_words, **re_extras)
        if None is not result and result:
            logger.log(u'Ignored: %s for containing ignore word' %
                       cur_proper.name)
            continue

        result = show_name_helpers.contains_any(
            cur_proper.name, parse_result.show.rls_require_words, **re_extras)
        if None is not result and not result:
            logger.log(
                u'Ignored: %s for not containing any required word match' %
                cur_proper.name)
            continue

        # check if we actually want this proper (if it's the right quality)
        my_db = db.DBConnection()
        sql_results = my_db.select(
            'SELECT release_group, status, version, release_name FROM tv_episodes WHERE showid = ? AND indexer = ? '
            + 'AND season = ? AND episode = ?', [
                cur_proper.indexerid, cur_proper.indexer, cur_proper.season,
                cur_proper.episode
            ])
        if not sql_results:
            continue

        # only keep the proper if we have already retrieved the same quality ep (don't get better/worse ones)
        # don't take proper of the same level we already downloaded
        old_status, old_quality = Quality.splitCompositeStatus(
            int(sql_results[0]['status']))
        cur_proper.is_repack, cur_proper.proper_level = Quality.get_proper_level(
            cur_proper.extra_info_no_name(),
            cur_proper.version,
            cur_proper.is_anime,
            check_is_repack=True)

        old_release_group = sql_results[0]['release_group']
        # check if we want this release: same quality as current, current has correct status
        # restrict other release group releases to proper's
        if old_status not in SNATCHED_ANY + [DOWNLOADED, ARCHIVED] \
                or cur_proper.quality != old_quality \
                or (cur_proper.is_repack and cur_proper.release_group != old_release_group):
            continue

        np = NameParser(False,
                        try_scene_exceptions=True,
                        showObj=parse_result.show,
                        indexer_lookup=False)
        try:
            extra_info = np.parse(
                sql_results[0]['release_name']).extra_info_no_name()
        except (StandardError, Exception):
            extra_info = None

        old_proper_level, old_is_internal, old_codec, old_extra_no_name, old_name = \
            get_old_proper_level(parse_result.show, cur_proper.indexer, cur_proper.indexerid, cur_proper.season,
                                 parse_result.episode_numbers, old_status, cur_proper.quality, extra_info,
                                 cur_proper.version, cur_proper.is_anime)

        old_name = (old_name,
                    sql_results[0]['release_name'])[old_name in ('', None)]
        if cur_proper.proper_level < old_proper_level:
            continue
        elif cur_proper.proper_level == old_proper_level:
            if '264' == cur_proper.codec and 'xvid' == old_codec:
                pass
            elif old_is_internal and not cur_proper.is_internal:
                pass
            else:
                continue

        log_same_grp = 'Skipping proper from release group: [%s], does not match existing release group: [%s] for [%s]'\
                       % (cur_proper.release_group, old_release_group, cur_proper.name)

        is_web = (old_quality in (Quality.HDWEBDL, Quality.FULLHDWEBDL,
                                  Quality.UHD4KWEB) or
                  (old_quality == Quality.SDTV
                   and re.search(r'\Wweb.?(dl|rip|.[hx]26[45])\W',
                                 str(sql_results[0]['release_name']), re.I)))

        if is_web:
            old_webdl_type = get_webdl_type(old_extra_no_name, old_name)
            new_webdl_type = get_webdl_type(cur_proper.extra_info_no_name(),
                                            cur_proper.name)
            if old_webdl_type != new_webdl_type:
                logger.log(
                    'Skipping proper webdl source: [%s], does not match existing webdl source: [%s] for [%s]'
                    % (old_webdl_type, new_webdl_type, cur_proper.name),
                    logger.DEBUG)
                continue

        # for webldls, prevent propers from different groups
        if sickbeard.PROPERS_WEBDL_ONEGRP and is_web and cur_proper.release_group != old_release_group:
            logger.log(log_same_grp, logger.DEBUG)
            continue

        # check if we actually want this proper (if it's the right release group and a higher version)
        if parse_result.is_anime:

            old_version = int(sql_results[0]['version'])
            if -1 < old_version < cur_proper.version:
                logger.log(u'Found new anime v%s to replace existing v%s' %
                           (cur_proper.version, old_version))
            else:
                continue

            if cur_proper.release_group != old_release_group:
                logger.log(log_same_grp, logger.DEBUG)
                continue

        # if the show is in our list and there hasn't been a proper already added for that particular episode
        # then add it to our list of propers
        if cur_proper.indexerid != -1:
            if (cur_proper.indexerid, cur_proper.indexer, cur_proper.season,
                    cur_proper.episode) not in map(
                        operator.attrgetter('indexerid', 'indexer', 'season',
                                            'episode'), verified_propers):
                logger.log(u'Found a proper that may be useful: %s' %
                           cur_proper.name)
                verified_propers.add(cur_proper)
            else:
                rp = set()
                for vp in verified_propers:
                    if vp.indexer == cur_proper.indexer and vp.indexerid == cur_proper.indexerid and \
                                    vp.season == cur_proper.season and vp.episode == cur_proper.episode and \
                                    vp.proper_level < cur_proper.proper_level:
                        rp.add(vp)
                if rp:
                    verified_propers = verified_propers - rp
                    logger.log(u'Found a proper that may be useful: %s' %
                               cur_proper.name)
                    verified_propers.add(cur_proper)

    return list(verified_propers)