Example #1
0
    def getShowInfo(self, identifier = None):
        if self.isDisabled():
            return {}

        cache_key = 'xem.cache.%s' % identifier
        log.debug('Getting showInfo: %s', cache_key)
        result = self.getCache(cache_key) or {}
        if result:
            return result

        # Create season/episode and absolute mappings
        url =  self.config['url_all'] + "id=%s&origin=tvdb" % tryUrlencode(identifier)
        response = self.getJsonData(url)
        if response:
            if response.get('result') == 'success':
                data = response.get('data', None)
                result = self._parse(data)

        # Create name alias mappings
        url =  self.config['url_names'] + "id=%s&origin=tvdb" % tryUrlencode(identifier)
        response = self.getJsonData(url)
        if response:
            if response.get('result') == 'success':
                data = response.get('data', None)
                result.update({'map_names': data})

        self.setCache(cache_key, result)
        return result
Example #2
0
    def _search(self, media, quality, results):

        search_url = self.urls["verified_search"] if self.conf("verified_only") else self.urls["search"]

        # Create search parameters
        search_params = self.buildUrl(media)

        smin = quality.get("size_min")
        smax = quality.get("size_max")
        if smin and smax:
            search_params += " size %sm - %sm" % (smin, smax)

        min_seeds = tryInt(self.conf("minimal_seeds"))
        if min_seeds:
            search_params += " seed > %s" % (min_seeds - 1)

        rss_data = self.getRSSData(search_url % search_params)

        if rss_data:
            try:

                for result in rss_data:

                    name = self.getTextElement(result, "title")
                    detail_url = self.getTextElement(result, "link")
                    description = self.getTextElement(result, "description")

                    magnet = splitString(detail_url, "/")[-1]
                    magnet_url = "magnet:?xt=urn:btih:%s&dn=%s&tr=%s" % (
                        magnet.upper(),
                        tryUrlencode(name),
                        tryUrlencode("udp://tracker.openbittorrent.com/announce"),
                    )

                    reg = re.search(
                        "Size: (?P<size>\d+) MB Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)",
                        six.text_type(description),
                    )
                    size = reg.group("size")
                    seeds = reg.group("seeds").replace(",", "")
                    peers = reg.group("peers").replace(",", "")

                    results.append(
                        {
                            "id": magnet,
                            "name": six.text_type(name),
                            "url": magnet_url,
                            "detail_url": detail_url,
                            "size": tryInt(size),
                            "seeders": tryInt(seeds),
                            "leechers": tryInt(peers),
                        }
                    )

            except:
                log.error("Failed getting results from %s: %s", (self.getName(), traceback.format_exc()))
Example #3
0
    def _searchOnHost(self, host, movie, quality, results):

        arguments = tryUrlencode({
            'imdbid': movie['library']['identifier'].replace('tt', ''),
            'apikey': host['api_key'],
            'extended': 1
        })
        url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments)

        nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})

        for nzb in nzbs:

            date = None
            spotter = None
            for item in nzb:
                if date and spotter:
                    break
                if item.attrib.get('name') == 'usenetdate':
                    date = item.attrib.get('value')
                    break

                # Get the name of the person who posts the spot
                if item.attrib.get('name') == 'poster':
                    if "@spot.net" in item.attrib.get('value'):
                        spotter = item.attrib.get('value').split("@")[0]
                        continue

            if not date:
                date = self.getTextElement(nzb, 'pubDate')

            nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
            name = self.getTextElement(nzb, 'title')

            if not name:
                continue

            name_extra = ''
            if spotter:
                name_extra = spotter

            results.append({
                'id': nzb_id,
                'provider_extra': urlparse(host['host']).hostname or host['host'],
                'name': name,
                'name_extra': name_extra,
                'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
                'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
                'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
                'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)),
                'content': self.getTextElement(nzb, 'description'),
                'score': host['extra_score'],
            })
Example #4
0
    def buildUrl(self, media):

        q = tryUrlencode({
            'q': '%s' % fireEvent('library.query', media, single = True),
        })

        query = tryUrlencode({
            'ig': 1,
            'rpp': 200,
            'st': 5,
            'sp': 1,
            'ns': 1,
        })
        return '%s&%s' % (q, query)
Example #5
0
 def getLoginParams(self):
     return tryUrlencode({
          'username': self.conf('username'),
          'password': self.conf('password'),
          'keeplogged': '1',
          'login': '******'
     })
    def login(self):

        cookieprocessor = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
        opener = urllib2.build_opener(cookieprocessor, Base.PTPHTTPRedirectHandler())
        opener.addheaders = [
            ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko)'),
            ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
            ('Accept-Language', 'fr-fr,fr;q=0.5'),
            ('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7'),
            ('Keep-Alive', '115'),
            ('Connection', 'keep-alive'),
            ('Cache-Control', 'max-age=0'),
        ]

        try:
            response = opener.open('http://www.cpasbien.io', tryUrlencode({'url': '/'}))
        except urllib2.URLError as e:
            log.error('Login to cPASbien failed: %s' % e)
            return False

        if response.getcode() == 200:
            log.debug('Login HTTP cPASbien status 200; seems successful')
            self.last_login_check = opener
            return True
        else:
            log.error('Login to cPASbien failed: returned code %d' % response.getcode())
            return False
Example #7
0
    def _searchOnTitle(self, title, movie, quality, results):

        movieTitle = tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year']))
        url = self.urls['search'] % (self.getSceneOnly(), movieTitle)
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id': 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class': 'torrent'})
                for result in entries:

                    link = result.find('a', attrs = {'dir': 'ltr'})
                    url = result.find('span', attrs = {'title': 'Download'}).parent
                    tds = result.find_all('td')
                    size = tds[5].contents[0].strip('\n ')

                    results.append({
                        'id': link['href'].replace('torrents.php?id=', '').split('&')[0],
                        'name': link.contents[0],
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(size),
                        'seeders': tryInt(tds[len(tds)-2].string),
                        'leechers': tryInt(tds[len(tds)-1].string),
                    })
            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Example #8
0
    def getAuthorizationUrl(self):

        referer = getParam('host')
        callback_url = cleanHost(referer) + '%snotify.%s.credentials/' % (url_for('api.index').lstrip('/'), self.getName().lower())

        oauth_consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret)
        oauth_client = oauth2.Client(oauth_consumer)

        resp, content = oauth_client.request(self.urls['request'], 'POST', body = tryUrlencode({'oauth_callback': callback_url}))

        if resp['status'] != '200':
            log.error('Invalid response from Twitter requesting temp token: %s' % resp['status'])
            return jsonified({
                'success': False,
            })
        else:
            self.request_token = dict(parse_qsl(content))

            auth_url = self.urls['authorize'] + ("?oauth_token=%s" % self.request_token['oauth_token'])

            log.info('Redirecting to "%s"' % auth_url)
            return jsonified({
                'success': True,
                'url': auth_url,
            })
    def _searchOnTitle(self, title, movie, quality, results):

        q = '%s %s' % (title, movie['info']['year'])
        params = tryUrlencode({
            'search': q,
            'catid': ','.join([str(x) for x in self.getCatId(quality)]),
            'user': self.conf('username', default = ''),
            'api': self.conf('api_key', default = ''),
        })
        
        if len(self.conf('custom_tag')) > 0:
            params = '%s&%s' % (params, self.conf('custom_tag'))

        nzbs = self.getJsonData(self.urls['search'] % params)

        if isinstance(nzbs, list):
            for nzb in nzbs:

                results.append({
                    'id': nzb.get('nzbid'),
                    'name': toUnicode(nzb.get('release')),
                    'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
                    'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
                    'url': nzb.get('getnzb'),
                    'detail_url': nzb.get('details'),
                    'description': nzb.get('weblink')
                })
Example #10
0
	def _searchOnTitle(self, title, movie, quality, results):
		log.debug("Searching for %s (%s) on %s" % (title, quality['label'], self.urls['base_url']))

		# remove accents 
		simpletitle = simplifyString(title)
		cat = self.getCatId(quality)

		log.debug("Searching in CorSaRoNero category: %s" % cat)
		data = self.getHTMLData(self.urls['search'] % (cat, tryUrlencode(simpletitle)))

		if 'Nessus torrent trovato!!!!' in data:
			log.info("No torrents found for %s on ilCorsaroNero.info.", title)
			return
		
		if data:
			try:
				html = BeautifulSoup(data)
				entries_1 = html.findAll('tr', attrs={'class':'odd'})
				entries_2 = html.findAll('tr', attrs={'class':'odd2'})
			
				try:
					self.parseResults(results, entries_1, movie, title)
					self.parseResults(results, entries_2, movie, title)
				except:
					log.error('Failed parsing ilCorsaroNero: %s', traceback.format_exc())
						
			except AttributeError:
				log.debug('No search results found.')
 def buildUrl(self, media, host):
     arguments = tryUrlencode({
         'user': host['name'],
         'passkey': host['pass_key'],
         'imdbid': getIdentifier(media),
     })
     return '%s?%s' % (host['host'], arguments)
Example #12
0
    def _searchOnTitle(self, title, movie, quality, results):

        scene_only = '1' if self.conf('scene_only') else ''

        url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), scene_only, self.getCatId(quality['identifier'])[0])
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id' : 'torrent_table'})
                if not result_table:
                    return

                entries = result_table.find_all('tr', attrs = {'class' : 'torrent'})

                for result in entries:

                    link = result.find('span', attrs = {'class' : 'torrent_name_link'}).parent
                    url = result.find('td', attrs = {'class' : 'torrent_td'}).find('a')

                    results.append({
                        'id': link['href'].replace('torrents.php?torrentid=', ''),
                        'name': unicode(link.span.string).translate({ord(u'\xad'): None}),
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % link['href'],
                        'size': self.parseSize(result.find_all('td')[4].string),
                        'seeders': tryInt(result.find_all('td')[6].string),
                        'leechers': tryInt(result.find_all('td')[7].string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Example #13
0
    def _searchOnTitle(self, title, movie, quality, results):

        page = 0
        total_pages = 1
        cats = self.getCatId(quality['identifier'])

        while page < total_pages:

            search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s" %s' % (title, movie['library']['year'])), page, ','.join(str(x) for x in cats))
            page += 1

            data = self.getHTMLData(search_url)

            if data:
                try:
                    soup = BeautifulSoup(data)
                    results_table = soup.find('table', attrs = {'id': 'searchResult'})

                    if not results_table:
                        return

                    try:
                        total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
                    except:
                        pass

                    entries = results_table.find_all('tr')
                    for result in entries[2:]:
                        link = result.find(href = re.compile('torrent\/\d+\/'))
                        download = result.find(href = re.compile('magnet:'))

                        try:
                            size = re.search('Size (?P<size>.+),', six.text_type(result.select('font.detDesc')[0])).group('size')
                        except:
                            continue

                        if link and download:

                            def extra_score(item):
                                trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
                                vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
                                confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
                                moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]

                                return confirmed + trusted + vip + moderated

                            results.append({
                                'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
                                'name': link.string,
                                'url': download['href'],
                                'detail_url': self.getDomain(link['href']),
                                'size': self.parseSize(size),
                                'seeders': tryInt(result.find_all('td')[2].string),
                                'leechers': tryInt(result.find_all('td')[3].string),
                                'extra_score': extra_score,
                                'get_more_info': self.getMoreInfo
                            })

                except:
                    log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Example #14
0
    def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs):

        # Login first
        if not parameters: parameters = {}
        if not self.session_id and auth:
            self.login()

        # Always add session id to request
        if self.session_id:
            parameters['sessionid'] = self.session_id

        params = tryUrlencode(parameters)

        url = cleanHost(self.conf('host')) + 'api/' + call

        try:
            data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs)

            if data:
                return data
        except HTTPError as e:
            sc = e.response.status_code
            if sc == 403:
                # Try login and do again
                if not is_repeat:
                    self.login()
                    return self.call(call, parameters = parameters, is_repeat = True, **kwargs)

            log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
        except:
            log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

        return {}
Example #15
0
    def notify(self, message = '', data = {}):
        if self.isDisabled(): return

        http_handler = HTTPSConnection('api.prowlapp.com')

        data = {
            'apikey': self.conf('api_key'),
            'application': self.default_title,
            'description': toUnicode(message),
            'priority': self.conf('priority'),
        }

        http_handler.request('POST',
            '/publicapi/add',
            headers = {'Content-type': 'application/x-www-form-urlencoded'},
            body = tryUrlencode(data)
        )
        response = http_handler.getresponse()
        request_status = response.status

        if request_status == 200:
            log.info('Prowl notifications sent.')
            return True
        elif request_status == 401:
            log.error('Prowl auth failed: %s' % response.reason)
            return False
        else:
            log.error('Prowl notification failed.')
            return False
Example #16
0
    def login(self):

        cookieprocessor = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
        opener = urllib2.build_opener(cookieprocessor, cpasbien.PTPHTTPRedirectHandler())
        opener.addheaders = [
            ("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko)"),
            ("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
            ("Accept-Language", "fr-fr,fr;q=0.5"),
            ("Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7"),
            ("Keep-Alive", "115"),
            ("Connection", "keep-alive"),
            ("Cache-Control", "max-age=0"),
        ]

        try:
            response = opener.open("http://www.cpasbien.me", tryUrlencode({"url": "/"}))
        except urllib2.URLError as e:
            log.error("Login to cPASbien failed: %s" % e)
            return False

        if response.getcode() == 200:
            log.debug("Login HTTP cPASbien status 200; seems successful")
            self.login_opener = opener
            return True
        else:
            log.error("Login to cPASbien failed: returned code %d" % response.getcode())
            return False
Example #17
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '%s %s' % (title, movie['library']['year'])
        params = tryUrlencode({
            'search': q,
            'catid': ','.join([str(x) for x in self.getCatId(quality['identifier'])]),
            'user': self.conf('username', default = ''),
            'api': self.conf('api_key', default = ''),
        })

        nzbs = self.getRSSData(self.urls['search'] % params)

        for nzb in nzbs:

            enclosure = self.getElement(nzb, 'enclosure').attrib
            nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0]

            results.append({
                'id': nzb_id,
                'name': toUnicode(self.getTextElement(nzb, 'title')),
                'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))),
                'size': tryInt(enclosure['length']) / 1024 / 1024,
                'url': enclosure['url'],
                'detail_url': self.urls['detail_url'] % nzb_id,
                'description': self.getTextElement(nzb, 'description')
            })
Example #18
0
    def call(self, call, parameters = {}, repeat = False, auth = True, *args, **kwargs):

        # Login first
        if not self.session_id and auth:
            self.login()

        # Always add session id to request
        if self.session_id:
            parameters['sessionid'] = self.session_id

        params = tryUrlencode(parameters)

        url = cleanHost(self.conf('host')) + 'api/' + call
        url_opener = urllib2.build_opener(HTTPSHandler())

        try:
            data = self.urlopen('%s?%s' % (url, params), opener = url_opener, *args, **kwargs)

            if data:
                return json.loads(data)
        except URLError, e:
            if hasattr(e, 'code') and e.code == 403:
                # Try login and do again
                if not repeat:
                    self.login()
                    return self.call(call, parameters = parameters, repeat = True, *args, **kwargs)

            log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Example #19
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % (tryUrlencode(title.replace(':', '') + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0])
        data = self.getHTMLData(url, opener = self.login_opener)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'id' : 'torrenttable'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:

                    link = result.find('td', attrs = {'class' : 'name'}).find('a')
                    url = result.find('td', attrs = {'class' : 'quickdownload'}).find('a')
                    details = result.find('td', attrs = {'class' : 'name'}).find('a')

                    results.append({
                        'id': link['href'].replace('/torrent/', ''),
                        'name': link.string,
                        'url': self.urls['download'] % url['href'],
                        'detail_url': self.urls['download'] % details['href'],
                        'download': self.loginDownload,
                        'size': self.parseSize(result.find_all('td')[4].string),
                        'seeders': tryInt(result.find('td', attrs = {'class' : 'seeders'}).string),
                        'leechers': tryInt(result.find('td', attrs = {'class' : 'leechers'}).string),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Example #20
0
    def search(self, filename, minSize, newsgroup=None):
        
        q = filename
        arguments = tryUrlencode({
            'q': q,
            'age': Env.setting('retention', 'nzb'),
            'sort': 'agedesc',
            'minsize': minSize,
            'rating': 1,
            'max': 250,
            'more': 1,
            'complete': 1,
        })
        nzbs = self.getRSSData(self.urls['search'] % arguments)
        nzbid = None
        for nzb in nzbs:

            enclosure = self.getElement(nzb, 'enclosure').attrib
            nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])

        
            nzbid = nzbindex_id 
            age = self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple())))
            sizeInMegs = tryInt(enclosure['length']) / 1024 / 1024
            downloadUrl = enclosure['url']
            detailURL = enclosure['url'].replace('/download/', '/release/')
            
        if nzbid:
            return NZBGetURLSearchResult(self, downloadUrl, sizeInMegs, detailURL, age, nzbid)
Example #21
0
    def findViaAlternative(self, group):
        results = {'480p':[], '720p':[], '1080p':[]}

        movie_name = getTitle(group['library'])

        url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name}))
        try:
            data = self.getCache('hdtrailers.alt.%s' % group['library']['identifier'], url, show_error = False)
        except HTTPError:
            log.debug('No alternative page found for: %s', movie_name)
            data = None

        if not data:
            return results

        try:
            tables = SoupStrainer('div')
            html = BeautifulSoup(data, parse_only = tables)
            result_table = html.find_all('h2', text = re.compile(movie_name))

            for h2 in result_table:
                if 'trailer' in h2.lower():
                    parent = h2.parent.parent.parent
                    trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p'))
                    try:
                        for trailer in trailerLinks:
                            results[trailer].insert(0, trailer.parent['href'])
                    except:
                        pass

        except AttributeError:
            log.debug('No trailers found in via alternative.')

        return results
Example #22
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0])
        data = self.getHTMLData(url, opener = self.login_opener)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find('table', attrs = {'border' : '1'})
                if not result_table:
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:
                    cells = result.find_all('td')

                    link = cells[1].find('a', attrs = {'class' : 'index'})

                    full_id = link['href'].replace('details.php?id=', '')
                    torrent_id = full_id[:6]

                    results.append({
                        'id': torrent_id,
                        'name': link.contents[0],
                        'url': self.urls['download'] % (torrent_id, link.contents[0]),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
                        'seeders': tryInt(cells[8].find('span').contents[0]),
                        'leechers': tryInt(cells[9].find('span').contents[0]),
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Example #23
0
 def buildDetailsUrl(self, nzb_id, api_key):
     query = tryUrlencode({
         't': 'details',
         'id': nzb_id,
         'apikey': api_key,
     })
     return query
Example #24
0
    def addToLibrary(self, group = {}):
        if self.isDisabled(): return

        host = self.conf('host')
        mount = self.conf('mount')
        database = self.conf('database')

        if self.mount:
            log.debug('Try to mount network drive via url: %s' % (mount))
            try:
                data = self.urlopen(mount)
            except:
                return False

        params = {
            'arg0': 'scanner_start',
            'arg1': database,
            'arg2': 'background',
            'arg3': '',
        }
        params = tryUrlencode(params)
        UPDATE_URL = 'http://%(host)s:8008/metadata_database?%(params)s'
        updateUrl = UPDATE_URL % {'host': host, 'params': params}

        try:
            response = self.urlopen(updateUrl)
        except:
            return False

        try:
            et = etree.fromstring(response)
            result = et.findtext('returnValue')
        except SyntaxError, e:
            log.error('Unable to parse XML returned from the Popcorn Hour: %s' % (e))
            return False
Example #25
0
    def notify(self, message = '', data = {}, listener = None):

        http_handler = HTTPSConnection("api.pushover.net:443")

        data = {
            'user': self.conf('user_key'),
            'token': self.app_token,
            'message': toUnicode(message),
            'priority': self.conf('priority')
        }

        http_handler.request('POST',
            "/1/messages.json",
            headers = {'Content-type': 'application/x-www-form-urlencoded'},
            body = tryUrlencode(data)
        )

        response = http_handler.getresponse()
        request_status = response.status

        if request_status == 200:
            log.info('Pushover notifications sent.')
            return True
        elif request_status == 401:
            log.error('Pushover auth failed: %s', response.reason)
            return False
        else:
            log.error('Pushover notification failed.')
            return False
Example #26
0
    def notify(self, message="", data={}, listener=None):
        if self.isDisabled():
            return

        http_handler = HTTPSConnection("api.prowlapp.com")

        data = {
            "apikey": self.conf("api_key"),
            "application": self.default_title,
            "description": toUnicode(message),
            "priority": self.conf("priority"),
        }

        http_handler.request(
            "POST",
            "/publicapi/add",
            headers={"Content-type": "application/x-www-form-urlencoded"},
            body=tryUrlencode(data),
        )
        response = http_handler.getresponse()
        request_status = response.status

        if request_status == 200:
            log.info("Prowl notifications sent.")
            return True
        elif request_status == 401:
            log.error("Prowl auth failed: %s", response.reason)
            return False
        else:
            log.error("Prowl notification failed.")
            return False
Example #27
0
 def getLoginParams(self):
     return tryUrlencode({
          'login': self.conf('username'),
          'password': self.conf('password'),
          'remember': '1',
          'url': '/'
     })
Example #28
0
 def getLoginParams(self):
     return tryUrlencode({
         'username': self.conf('username'),
         'password': self.conf('password'),
         'remember_me': 'on',
         'login': '******',
     })
Example #29
0
    def _searchOnHost(self, host, movie, quality, results):

        arguments = tryUrlencode({
            'user': host['name'],
            'passkey': host['pass_key'],
            'imdbid': movie['library']['identifier']
        })
        url = '%s?%s' % (host['host'], arguments)

        torrents = self.getJsonData(url, cache_timeout = 1800)

        if torrents:
            try:
                if torrents.get('error'):
                    log.error('%s: %s', (torrents.get('error'), host['host']))
                elif torrents.get('results'):
                    for torrent in torrents.get('results', []):
                        results.append({
                            'id': torrent.get('torrent_id'),
                            'protocol': 'torrent' if re.match('^(http|https|ftp)://.*$', torrent.get('download_url')) else 'torrent_magnet',
                            'provider_extra': urlparse(host['host']).hostname or host['host'],
                            'name': toUnicode(torrent.get('release_name')),
                            'url': torrent.get('download_url'),
                            'detail_url': torrent.get('details_url'),
                            'size': torrent.get('size'),
                            'score': host['extra_score'],
                            'seeders': torrent.get('seeders'),
                            'leechers': torrent.get('leechers'),
                            'seed_ratio': host['seed_ratio'],
                            'seed_time': host['seed_time'],
                        })

            except:
                log.error('Failed getting results from %s: %s', (host['host'], traceback.format_exc()))
Example #30
0
    def getMessages(self, last_check = 0):

        data = self.getJsonData(self.urls['messages'] % tryUrlencode({
            'last_check': last_check,
        }), headers = self.getRequestHeaders(), cache_timeout = 10)

        return data
Example #31
0
    def _searchOnTitle(self, title, media, quality, results, offset=0):
        """
        Do a search based on possible titles. This function doesn't check
        the quality because CouchPotato do the job when parsing results.
        Furthermore the URL must stay generic to use native CouchPotato
        caching feature.

        .. seealso:: YarrProvider.search
        """
        try:
            params = {
                'category': 2145,  # Film/Vidéo
                'description': '',
                'do': 'search',
                'file': '',
                'name': simplifyString(title),
                'sub_category': 'all',
                'uploader': ''
            }
            if offset > 0:
                params['page'] = offset * YGG.limit
            url = self.urls['search'].format(tryUrlencode(params))
            data = self.getHTMLData(url)
            soup = BeautifulSoup(data, 'html.parser')
            filter_ = '^{0}'.format(self.urls['torrent'])
            for link in soup.find_all(href=re.compile(filter_)):
                detail_url = link['href']
                if re.search(u'/filmvidéo/(film|animation|documentaire)/',
                             detail_url):
                    name = self.parseText(link)
                    id_ = tryInt(re.search('/(\d+)-[^/\s]+$', link['href']).
                                 group(1))
                    columns = link.parent.parent.find_all('td')
                    size = self.parseSize(self.parseText(columns[5]))
                    seeders = tryInt(self.parseText(columns[7]))
                    leechers = tryInt(self.parseText(columns[8]))
                    result = {
                        'id': id_,
                        'name': name,
                        'seeders': seeders,
                        'leechers': leechers,
                        'size': size,
                        'url': self.urls['url'].format(id_),
                        'detail_url': detail_url,
                        'verified': True,
                        'get_more_info': self.getMoreInfo,
                        'extra_check': self.extraCheck
                    }
                    results.append(result)
                    YGG.log.debug(result)
            # Get next page if we don't have all results
            pagination = soup.find('ul', class_='pagination')
            if pagination:
                for page in pagination.find_all('li'):
                    next_ = tryInt(self.parseText(page.find('a')))
                    if next_ > offset + 1:
                        self._searchOnTitle(title, media, quality, results,
                                            offset + 1)
                        break
        except:
            YGG.log.error('Failed searching release from {0}: {1}'.
                          format(self.getName(), traceback.format_exc()))
Example #32
0
    def _searchOnHost(self, host, media, quality, results):

        query = self.buildUrl(media, host)
        url = '%s&%s' % (self.getUrl(host['host']), query)
        nzbs = self.getRSSData(url,
                               cache_timeout=1800,
                               headers={'User-Agent': Env.getIdentifier()})

        for nzb in nzbs:

            date = None
            spotter = None
            for item in nzb:
                if date and spotter:
                    break
                if item.attrib.get('name') == 'usenetdate':
                    date = item.attrib.get('value')
                    break

                # Get the name of the person who posts the spot
                if item.attrib.get('name') == 'poster':
                    if "@spot.net" in item.attrib.get('value'):
                        spotter = item.attrib.get('value').split("@")[0]
                        continue

            if not date:
                date = self.getTextElement(nzb, 'pubDate')

            nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop()
            name = self.getTextElement(nzb, 'title')

            if not name:
                continue

            name_extra = ''
            if spotter:
                name_extra = spotter

            description = ''
            if "@spot.net" in nzb_id:
                try:
                    # Get details for extended description to retrieve passwords
                    query = self.buildDetailsUrl(nzb_id, host['api_key'])
                    url = '%s&%s' % (self.getUrl(host['host']), query)
                    nzb_details = self.getRSSData(
                        url,
                        cache_timeout=1800,
                        headers={'User-Agent': Env.getIdentifier()})[0]

                    description = self.getTextElement(nzb_details,
                                                      'description')

                    # Extract a password from the description
                    password = re.search(
                        '(?:' + self.passwords_regex +
                        ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$',
                        description,
                        flags=re.I).group(1)
                    if password:
                        name += ' {{%s}}' % password.strip()
                except:
                    log.debug('Error getting details of "%s": %s',
                              (name, traceback.format_exc()))

            results.append({
                'id':
                nzb_id,
                'provider_extra':
                urlparse(host['host']).hostname or host['host'],
                'name':
                toUnicode(name),
                'name_extra':
                name_extra,
                'age':
                self.calculateAge(int(time.mktime(parse(date).timetuple()))),
                'size':
                int(self.getElement(nzb, 'enclosure').attrib['length']) /
                1024 / 1024,
                'url': ((self.getUrl(host['host']) + self.urls['download']) %
                        tryUrlencode(nzb_id)) + self.getApiExt(host),
                'detail_url': (cleanHost(host['host']) + self.urls['detail']) %
                tryUrlencode(nzb_id),
                'content':
                self.getTextElement(nzb, 'description'),
                'description':
                description,
                'score':
                host['extra_score'],
            })
 def buildUrl(self, media):
     query = (tryUrlencode(fireEvent('library.query', media, single=True)))
     return query
Example #34
0
    def _search(self, media, quality, results):

        search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']

        # Create search parameters
        search_params = self.buildUrl(media)

        smin = quality.get('size_min')
        smax = quality.get('size_max')
        if smin and smax:
            search_params += ' size %sm - %sm' % (smin, smax)

        min_seeds = tryInt(self.conf('minimal_seeds'))
        if min_seeds:
            search_params += ' seed > %s' % (min_seeds - 1)

        rss_data = self.getRSSData(search_url % search_params)

        if rss_data:
            try:

                for result in rss_data:

                    name = self.getTextElement(result, 'title')
                    detail_url = self.getTextElement(result, 'link')
                    description = self.getTextElement(result, 'description')

                    magnet = splitString(detail_url, '/')[-1]
                    magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce'))

                    reg = re.search('Size: (?P<size>\d+) MB Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description))
                    size = reg.group('size')
                    seeds = reg.group('seeds').replace(',', '')
                    peers = reg.group('peers').replace(',', '')

                    results.append({
                        'id': magnet,
                        'name': six.text_type(name),
                        'url': magnet_url,
                        'detail_url': detail_url,
                        'size': tryInt(size),
                        'seeders': tryInt(seeds),
                        'leechers': tryInt(peers),
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
    def _searchOnTitle(self, title, movie, quality, results):

        torrentlist = []

        if self.conf('only_freeleech'):
            onlyFreelech = True
        else:
            onlyFreelech = False

        if self.conf('only_verified'):
            onlyVerified = True
        else:
            onlyVerified = False

        if not '/logout.php' in self.urlopen(self.urls['login'], data = self.getLoginParams()).lower():
            log.info('problems logging into tehconnection.eu')
            return []

        data = self.getHTMLData(self.urls['search'] % tryUrlencode(getIdentifier(movie)))
        if data:
            try:
                resultsTable = BeautifulSoup(data).find('table', attrs = {'id' : 'browse_torrent_table'})
                if resultsTable is None:
                    log.info('movie not found on TehConnection')
                    return []

                pagelinkdata = resultsTable.find("a", { "title" : "View Torrent" })
                torrentpage = (pagelinkdata.attrs['href']).strip()
                indivTorrData = self.getHTMLData(self.urls['baseurl'] + (torrentpage))

                soup = BeautifulSoup(indivTorrData)
                items = soup.findAll("div", { "class" : "torrent_widget box pad" })
                for item in items:

                    torrentData = TorrentDetails(0, 0, '', '', 0, '', '', False, False)


                    detailstats = item.find("div", { "class" : "details_stats" })

                    #seeders
                    seed = detailstats.find("img", { "title" : "Seeders" }).parent
                    torrentData.seeders = ((seed.text).strip())

                    #leechers
                    leech = detailstats.find("img", { "title" : "Leechers" }).parent
                    torrentData.leechers = ((leech.text).strip())

                    #permalink
                    perma = detailstats.find("a", { "title" : "Permalink" })
                    torrentData.permalink = self.urls['baseurl'] + perma.attrs['href']

                    #download link
                    downlo = detailstats.find("a", { "title" : "Download" })
                    torrentData.downlink = self.urls['baseurl'] + downlo.attrs['href']

                    #Torrent ID
                    m = re.search(r'\d+$', torrentData.permalink)
                    torrentData.torrentID = (int(m.group()) if m else None)

                    #TorrentName
                    namedata = item.find("div", { "id" : "desc_%s" % torrentData.torrentID })
                    torrentData.torrentName = ((namedata.text).splitlines()[1]).strip()

                    #FileSize
                    sizedata = item.find("div", { "class" : "details_title" })
                    sizefile = ((sizedata.text).splitlines()[3]).replace("(","").replace(")","").strip()
                    torrentData.filesize = sizefile

                    #FreeLeech
                    freeleechdata = item.find("span", { "class" : "freeleech" })
                    if freeleechdata is None:
                        torrentData.freeleech = False
                    else:
                        torrentData.freeleech = True

                    #QualityEncode
                    qualityenc = detailstats.find("img", { "class" : "approved" })
                    if qualityenc is None:
                        torrentData.qualityEncode = False
                    else:
                        torrentData.torrentName += " HQ"
                        torrentData.qualityEncode = True

                    #Test if the Freelech or Verified boxes have been checked & add depending
                    if (onlyFreelech == False) or (onlyFreelech == True and torrentData.freeleech == True):
                        #Only Freelech is switched off OR only Freelech is ON and the torrent is a freelech, so safe to add to results
                        if (onlyVerified == False) or (onlyVerified == True and torrentData.qualityEncode == True):
                            #Only Verified is switched off OR only Verified is ON and the torrent is verified, so safe to add to results
                            torrentlist.append(torrentData)


                for torrentFind in torrentlist:
                    log.info('TehConnection found ' + torrentFind.torrentName)
                    results.append({
                        'leechers': torrentFind.leechers,
                        'seeders': torrentFind.seeders,
                        'name': torrentFind.torrentName,
                        'url': torrentFind.downlink,
                        'detail_url': torrentFind.permalink,
                        'id': torrentFind.torrentID,
                        'size': self.parseSize(torrentFind.filesize)
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Example #36
0
    def _searchOnTitle(self, title, movie, quality, results):

        page = 0
        total_pages = 1
        cats = self.getCatId(quality['identifier'])

        while page < total_pages:

            search_url = self.urls['search'] % (
                self.getDomain(),
                tryUrlencode('"%s" %s' % (title, movie['library']['year'])),
                page, ','.join(str(x) for x in cats))
            page += 1

            data = self.getHTMLData(search_url)

            if data:
                try:
                    soup = BeautifulSoup(data)
                    results_table = soup.find('table',
                                              attrs={'id': 'searchResult'})

                    if not results_table:
                        return

                    try:
                        total_pages = len(
                            soup.find('div', attrs={
                                'align': 'center'
                            }).find_all('a'))
                    except:
                        pass

                    entries = results_table.find_all('tr')
                    for result in entries[2:]:
                        link = result.find(href=re.compile('torrent\/\d+\/'))
                        download = result.find(href=re.compile('magnet:'))

                        try:
                            size = re.search(
                                'Size (?P<size>.+),',
                                six.text_type(
                                    result.select('font.detDesc')[0])).group(
                                        'size')
                        except:
                            continue

                        if link and download:

                            def extra_score(item):
                                trusted = (0, 10)[result.find(
                                    'img', alt=re.compile('Trusted'))
                                                  is not None]
                                vip = (0, 20)[result.find(
                                    'img', alt=re.compile('VIP')) is not None]
                                confirmed = (0, 30)[result.find(
                                    'img', alt=re.compile('Helpers'))
                                                    is not None]
                                moderated = (0, 50)[result.find(
                                    'img', alt=re.compile('Moderator'))
                                                    is not None]

                                return confirmed + trusted + vip + moderated

                            results.append({
                                'id':
                                re.search('/(?P<id>\d+)/',
                                          link['href']).group('id'),
                                'name':
                                link.string,
                                'url':
                                download['href'],
                                'detail_url':
                                self.getDomain(link['href']),
                                'size':
                                self.parseSize(size),
                                'seeders':
                                tryInt(result.find_all('td')[2].string),
                                'leechers':
                                tryInt(result.find_all('td')[3].string),
                                'extra_score':
                                extra_score,
                                'get_more_info':
                                self.getMoreInfo
                            })

                except:
                    log.error('Failed getting results from %s: %s',
                              (self.getName(), traceback.format_exc()))