Esempio n. 1
0
    def buildUrl(self, media):

        q = tryUrlencode({
            'q': '"%s"' % fireEvent('library.query', media, single = True),
        })

        query = tryUrlencode({
            'ig': 1,
            'rpp': 200,
            'st': 5,
            'sp': 1,
            'ns': 1,
        })
        return '%s&%s' % (q, query)
Esempio n. 2
0
 def buildDetailsUrl(self, nzb_id, api_key):
     query = tryUrlencode({
         't': 'details',
         'id': nzb_id,
         'apikey': api_key,
     })
     return query
Esempio n. 3
0
    def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs):

        # Login first
        if not parameters: parameters = {}
        if not self.session_id and auth:
            self.login()

        # Always add session id to request
        if self.session_id:
            parameters['sessionid'] = self.session_id

        params = tryUrlencode(parameters)

        url = cleanHost(self.conf('host')) + 'api/' + call

        try:
            data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs)

            if data:
                return data
        except HTTPError as e:
            sc = e.response.status_code
            if sc == 403:
                # Try login and do again
                if not is_repeat:
                    self.login()
                    return self.call(call, parameters = parameters, is_repeat = True, **kwargs)

            log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
        except:
            log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))

        return {}
Esempio n. 4
0
    def findViaAlternative(self, group):
        results = {"480p": [], "720p": [], "1080p": []}

        movie_name = getTitle(group)

        url = "%s?%s" % (self.urls["backup"], tryUrlencode({"s": movie_name}))
        try:
            data = self.getCache("hdtrailers.alt.%s" % getIdentifier(group), url, show_error=False)
        except HTTPError:
            log.debug("No alternative page found for: %s", movie_name)
            data = None

        if not data:
            return results

        try:
            html = BeautifulSoup(data, parse_only=self.only_tables_tags)
            result_table = html.find_all("h2", text=re.compile(movie_name))

            for h2 in result_table:
                if "trailer" in h2.lower():
                    parent = h2.parent.parent.parent
                    trailerLinks = parent.find_all("a", text=re.compile("480p|720p|1080p"))
                    try:
                        for trailer in trailerLinks:
                            results[trailer].insert(0, trailer.parent["href"])
                    except:
                        pass

        except AttributeError:
            log.debug("No trailers found in via alternative.")

        return results
Esempio n. 5
0
 def buildUrl(self, media, host):
     arguments = tryUrlencode({
         'user': host['name'],
         'passkey': host['pass_key'],
         'imdbid': getIdentifier(media),
     })
     return '%s?%s' % (host['host'], arguments)
Esempio n. 6
0
    def _searchOnTitle(self, title, movie, quality, results):

        q = '%s %s' % (title, movie['info']['year'])
        params = tryUrlencode({
            'search': q,
            'catid': ','.join([str(x) for x in self.getCatId(quality)]),
            'user': self.conf('username', default = ''),
            'api': self.conf('api_key', default = ''),
        })

        if len(self.conf('custom_tag')) > 0:
            params = '%s&%s' % (params, self.conf('custom_tag'))

        nzbs = self.getJsonData(self.urls['search'] % params)

        if isinstance(nzbs, list):
            for nzb in nzbs:

                results.append({
                    'id': nzb.get('nzbid'),
                    'name': toUnicode(nzb.get('release')),
                    'age': self.calculateAge(tryInt(nzb.get('usenetage'))),
                    'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024,
                    'url': nzb.get('getnzb'),
                    'detail_url': nzb.get('details'),
                    'description': nzb.get('weblink')
                })
Esempio n. 7
0
    def getMessages(self, last_check = 0):

        data = self.getJsonData(self.urls['messages'] % tryUrlencode({
            'last_check': last_check,
        }), headers = self.getRequestHeaders(), cache_timeout = 10)

        return data
Esempio n. 8
0
    def _buildUrl(self, query, quality):

        cat_ids = self.getCatId(quality)

        if not cat_ids:
            log.warning('Unable to find category ids for identifier "%s"', quality.get("identifier"))
            return None

        return self.urls["search"] % ("&".join(("l%d=" % x) for x in cat_ids), tryUrlencode(query).replace("%", "%%"))
Esempio n. 9
0
    def buildUrl(self, title, media, quality):
        cat_id = self.getCatId(quality)[0]
        url = self.urls['search'] % (cat_id, cat_id)

        arguments = tryUrlencode({
            'search': '%s %s' % (title, media['info']['year']),
            'method': 2,
        })
        query = "%s&%s" % (url, arguments)

        return query
Esempio n. 10
0
 def buildUrl(self, media, quality):
     query = tryUrlencode(
         {
             "q": getIdentifier(media),
             "m": "n",
             "max": 400,
             "adv_age": Env.setting("retention", "nzb"),
             "adv_sort": "date",
             "adv_col": "on",
             "adv_nfo": "on",
             "minsize": quality.get("size_min"),
             "maxsize": quality.get("size_max"),
         }
     )
     return query
Esempio n. 11
0
    def request(self, command, client):
        url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % (
            client['address'],
            client['port'],
            tryUrlencode(command)
        )

        headers = {}

        try:
            self.plex.urlopen(url, headers = headers, timeout = 3, show_error = False)
        except Exception as err:
            log.error("Couldn't sent command to Plex: %s", err)
            return False

        return True
Esempio n. 12
0
    def request(self, call = '', params = {}, return_key = None):

        params = dict((k, v) for k, v in params.items() if v)
        params = tryUrlencode(params)

        try:
            url = 'https://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.conf('api_key'), '&%s' % params if params else '')
            data = self.getJsonData(url, show_error = False)
        except:
            log.debug('Movie not found: %s, %s', (call, params))
            data = None

        if data and return_key and return_key in data:
            data = data.get(return_key)

        return data
Esempio n. 13
0
    def notify(self, message = '', data = None, listener = None):
        if not data: data = {}

        data = {
            'title': self.default_title,
            'text': toUnicode(message),
            'sender': toUnicode("CouchPotato"),
            'image': 'https://raw.github.com/RuudBurger/CouchPotatoServer/master/whatpotato/static/images/homescreen.png',
        }

        try:
            self.urlopen(self.urls['api'] % (self.conf('api_key'), tryUrlencode(data)), show_error = False)
            return True
        except:
            log.error('Toasty failed: %s', traceback.format_exc())

        return False
Esempio n. 14
0
    def call(self, request_params, use_json = True, **kwargs):

        url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api?' + tryUrlencode(mergeDicts(request_params, {
            'apikey': self.conf('api_key'),
            'output': 'json'
        }))

        data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs)
        if use_json:
            d = json.loads(data)
            if d.get('error'):
                log.error('Error getting data from SABNZBd: %s', d.get('error'))
                return {}

            return d.get(request_params['mode']) or d
        else:
            return data
Esempio n. 15
0
    def addToLibrary(self, message = None, group = None):
        if self.isDisabled(): return
        if not group: group = {}

        host = self.conf('host')
        mount = self.conf('mount')
        database = self.conf('database')

        if mount:
            log.debug('Try to mount network drive via url: %s', mount)
            try:
                self.urlopen(mount)
            except:
                return False

        params = {
            'arg0': 'scanner_start',
            'arg1': database,
            'arg2': 'background',
            'arg3': '',
        }
        params = tryUrlencode(params)
        update_url = 'http://%(host)s:8008/metadata_database?%(params)s' % {'host': host, 'params': params}

        try:
            response = self.urlopen(update_url)
        except:
            return False

        try:
            et = etree.fromstring(response)
            result = et.findtext('returnValue')
        except SyntaxError as e:
            log.error('Unable to parse XML returned from the Popcorn Hour: %s', e)
            return False

        if int(result) > 0:
            log.error('Popcorn Hour returned an errorcode: %s', result)
            return False
        else:
            log.info('NMJ started background scan')
            return True
Esempio n. 16
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls["search"] % (
            tryUrlencode("%s %s" % (title.replace(":", ""), movie["info"]["year"])),
            self.getCatId(quality)[0],
        )
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find("table", attrs={"border": "1"})
                if not result_table:
                    return

                entries = result_table.find_all("tr")

                for result in entries[1:]:
                    cells = result.find_all("td")

                    link = cells[1].find("a", attrs={"class": "index"})

                    full_id = link["href"].replace("details.php?id=", "")
                    torrent_id = full_id[:6]
                    name = toUnicode(link.get("title", link.contents[0]).encode("ISO-8859-1")).strip()

                    results.append(
                        {
                            "id": torrent_id,
                            "name": name,
                            "url": self.urls["download"] % (torrent_id, name),
                            "detail_url": self.urls["detail"] % torrent_id,
                            "size": self.parseSize(cells[6].contents[0] + cells[6].contents[2]),
                            "seeders": tryInt(cells[8].find("span").contents[0]),
                            "leechers": tryInt(cells[9].find("span").contents[0]),
                        }
                    )

            except:
                log.error("Failed to parsing %s: %s", (self.getName(), traceback.format_exc()))
Esempio n. 17
0
    def _searchOnTitle(self, title, movie, quality, results):

        url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0])
        data = self.getHTMLData(url)

        if data:
            html = BeautifulSoup(data)

            try:
                result_table = html.find(attrs = {'id': 'torrenttable'})

                if not result_table:
                    log.error('failed to generate result_table')
                    return

                entries = result_table.find_all('tr')

                for result in entries[1:]:
                    cells = result.find_all('td')
                    link = result.find('a', attrs = {'class': 'index'})
                    torrent_id = link['href'].replace('download.php/','').split('/')[0]
                    torrent_file = link['href'].replace('download.php/','').split('/')[1]
                    size = self.parseSize(cells[5].contents[0] + cells[5].contents[2])
                    name_row = cells[1].contents[0]
                    name = name_row.getText()
                    seeders_row = cells[6].contents[0]
                    seeders = seeders_row.getText()


                    results.append({
                        'id': torrent_id,
                        'name': name,
                        'url': self.urls['download']  % (torrent_id,torrent_file),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'size': size,
                        'seeders': seeders,
                    })

            except:
                log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
Esempio n. 18
0
    def search(self, q, limit = 12):

        name_year = fireEvent('scanner.name_year', q, single = True)

        if not name_year or (name_year and not name_year.get('name')):
            name_year = {
                'name': q
            }

        cache_key = 'omdbapi.cache.%s' % q
        url = self.urls['search'] % tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})
        cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()})

        if cached:
            result = self.parseMovie(cached)
            if result.get('titles') and len(result.get('titles')) > 0:
                log.info('Found: %s', result['titles'][0] + ' (' + str(result.get('year')) + ')')
                return [result]

            return []

        return []
Esempio n. 19
0
    def getAuthorizationUrl(self, host = None, **kwargs):

        callback_url = cleanHost(host) + '%snotify.%s.credentials/' % (Env.get('api_base').lstrip('/'), self.getName().lower())

        oauth_consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret)
        oauth_client = oauth2.Client(oauth_consumer)

        resp, content = oauth_client.request(self.urls['request'], 'POST', body = tryUrlencode({'oauth_callback': callback_url}))

        if resp['status'] != '200':
            log.error('Invalid response from Twitter requesting temp token: %s', resp['status'])
            return {
                'success': False,
            }
        else:
            self.request_token = dict(parse_qsl(content))

            auth_url = self.urls['authorize'] + ("?oauth_token=%s" % self.request_token['oauth_token'])

            log.info('Redirecting to "%s"', auth_url)
            return {
                'success': True,
                'url': auth_url,
            }
Esempio n. 20
0
    def _search(self, media, quality, results):

        movie_title = getTitle(media)
        quality_id = quality['identifier']

        params = mergeDicts(self.quality_search_params[quality_id].copy(), {
            'order_by': 'relevance',
            'order_way': 'descending',
            'searchstr': getIdentifier(media)
        })

        url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params))
        res = self.getJsonData(url)

        try:
            if not 'Movies' in res:
                return

            authkey = res['AuthKey']
            passkey = res['PassKey']

            for ptpmovie in res['Movies']:
                if not 'Torrents' in ptpmovie:
                    log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year']))
                    continue

                log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents'])))
                for torrent in ptpmovie['Torrents']:
                    torrent_id = tryInt(torrent['Id'])
                    torrentdesc = '%s %s %s' % (torrent['Resolution'], torrent['Source'], torrent['Codec'])
                    torrentscore = 0

                    if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']:
                        torrentdesc += ' HQ'
                        if self.conf('prefer_golden'):
                            torrentscore += 5000
                    if 'FreeleechType' in torrent:
                        torrentdesc += ' Freeleech'
                        if self.conf('prefer_freeleech'):
                            torrentscore += 7000
                    if 'Scene' in torrent and torrent['Scene']:
                        torrentdesc += ' Scene'
                        if self.conf('prefer_scene'):
                            torrentscore += 2000
                    if 'RemasterTitle' in torrent and torrent['RemasterTitle']:
                        torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle'])

                    torrentdesc += ' (%s)' % quality_id
                    torrent_name = re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) - %s' % (movie_title, ptpmovie['Year'], torrentdesc))

                    def extra_check(item):
                        return self.torrentMeetsQualitySpec(item, quality_id)

                    results.append({
                        'id': torrent_id,
                        'name': torrent_name,
                        'Source': torrent['Source'],
                        'Checked': 'true' if torrent['Checked'] else 'false',
                        'Resolution': torrent['Resolution'],
                        'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey),
                        'detail_url': self.urls['detail'] % torrent_id,
                        'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())),
                        'size': tryInt(torrent['Size']) / (1024 * 1024),
                        'seeders': tryInt(torrent['Seeders']),
                        'leechers': tryInt(torrent['Leechers']),
                        'score': torrentscore,
                        'extra_check': extra_check,
                    })

        except:
            log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Esempio n. 21
0
 def buildUrl(self, title, media, quality):
     return (
         tryUrlencode(title.replace(':', '')),
         ','.join([str(x) for x in self.getCatId(quality)])
     )
Esempio n. 22
0
 def buildUrl(self, title, media, quality):
     query = tryUrlencode({
         'search': '"%s" %s' % (title, media['info']['year']),
         'cat': self.getCatId(quality)[0],
     })
     return query
Esempio n. 23
0
 def buildUrl(self, media, quality):
     query = (tryUrlencode(fireEvent('library.query', media, single = True)),
              self.getSceneOnly(),
              self.getCatId(quality)[0])
     return query
Esempio n. 24
0
    def _searchOnTitle(self, title, media, quality, results):

        search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search']

        # Create search parameters
        search_params = self.buildUrl(title, media, quality)

        smin = quality.get('size_min')
        smax = quality.get('size_max')
        if smin and smax:
            search_params += ' size %sm - %sm' % (smin, smax)

        min_seeds = tryInt(self.conf('minimal_seeds'))
        if min_seeds:
            search_params += ' seed > %s' % (min_seeds - 1)

        rss_data = self.getRSSData(search_url % search_params)

        if rss_data:
            try:

                for result in rss_data:

                    name = self.getTextElement(result, 'title')
                    detail_url = self.getTextElement(result, 'link')
                    description = self.getTextElement(result, 'description')

                    magnet = splitString(detail_url, '/')[-1]
                    magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce'))

                    reg = re.search('Size: (?P<size>\d+) MB Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description))
                    size = reg.group('size')
                    seeds = reg.group('seeds').replace(',', '')
                    peers = reg.group('peers').replace(',', '')

                    results.append({
                        'id': magnet,
                        'name': six.text_type(name),
                        'url': magnet_url,
                        'detail_url': detail_url,
                        'size': tryInt(size),
                        'seeders': tryInt(seeds),
                        'leechers': tryInt(peers),
                    })

            except:
                log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Esempio n. 25
0
 def buildUrl(self, media, page, cats):
     return (
         tryUrlencode('"%s"' % fireEvent('library.query', media, single = True)),
         page,
         ','.join(str(x) for x in cats)
     )
Esempio n. 26
0
    def _searchOnTitle(self, title, movie, quality, results):

        page = 0
        total_pages = 1
        cats = self.getCatId(quality)

        while page < total_pages:

            movieTitle = tryUrlencode('"%s" %s' % (title, movie['info']['year']))
            search_url = self.urls['search'] % (movieTitle, page, cats[0])
            page += 1

            data = self.getHTMLData(search_url)
            if data:
                try:

                    results_table = None

                    data_split = splitString(data, '<table')
                    soup = None
                    for x in data_split:
                        soup = BeautifulSoup(x)
                        results_table = soup.find('table', attrs = {'class': 'koptekst'})
                        if results_table:
                            break

                    if not results_table:
                        return

                    try:
                        pagelinks = soup.findAll(href = re.compile('page'))
                        page_numbers = [int(re.search('page=(?P<page_number>.+'')', i['href']).group('page_number')) for i in pagelinks]
                        total_pages = max(page_numbers)
                    except:
                        pass

                    entries = results_table.find_all('tr')

                    for result in entries[1:]:
                        prelink = result.find(href = re.compile('details.php'))
                        link = prelink['href']
                        download = result.find('a', href = re.compile('download.php'))['href']

                        if link and download:

                            def extra_score(item):
                                trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
                                vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
                                confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
                                moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]

                                return confirmed + trusted + vip + moderated

                            id = re.search('id=(?P<id>\d+)&', link).group('id')
                            url = self.urls['download'] % download

                            fileSize = self.parseSize(result.select('td.rowhead')[8].text)
                            results.append({
                                'id': id,
                                'name': toUnicode(prelink.find('b').text),
                                'url': url,
                                'detail_url': self.urls['detail'] % link,
                                'size': fileSize,
                                'seeders': tryInt(result.find_all('td')[2].string),
                                'leechers': tryInt(result.find_all('td')[3].string),
                                'extra_score': extra_score,
                                'get_more_info': self.getMoreInfo
                            })

                except:
                    log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
Esempio n. 27
0
 def buildUrl(self, title, media, quality):
     return tryUrlencode('"%s %s"' % (title, media["info"]["year"]))
Esempio n. 28
0
 def getSourceUrl(self, repo = None, repo_name = None, branch = None):
     return self.getJsonData(self.urls['updater'] % tryUrlencode({
         'repo': repo,
         'name': repo_name,
         'branch': branch,
     }), headers = self.getRequestHeaders())
Esempio n. 29
0
    def _searchOnHost(self, host, media, quality, results):

        query = self.buildUrl(media, host)
        url = '%s%s' % (self.getUrl(host['host']), query)
        nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})

        for nzb in nzbs:

            date = None
            spotter = None
            for item in nzb:
                if date and spotter:
                    break
                if item.attrib.get('name') == 'usenetdate':
                    date = item.attrib.get('value')
                    break

                # Get the name of the person who posts the spot
                if item.attrib.get('name') == 'poster':
                    if "@spot.net" in item.attrib.get('value'):
                        spotter = item.attrib.get('value').split("@")[0]
                        continue

            if not date:
                date = self.getTextElement(nzb, 'pubDate')

            name = self.getTextElement(nzb, 'title')
            detail_url = self.getTextElement(nzb, 'guid')
            nzb_id = detail_url.split('/')[-1:].pop()

            if '://' not in detail_url:
                detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id)

            if not name:
                continue

            name_extra = ''
            if spotter:
                name_extra = spotter

            description = ''
            if "@spot.net" in nzb_id:
                try:
                    # Get details for extended description to retrieve passwords
                    query = self.buildDetailsUrl(nzb_id, host['api_key'])
                    url = '%s%s' % (self.getUrl(host['host']), query)
                    nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0]

                    description = self.getTextElement(nzb_details, 'description')

                    # Extract a password from the description
                    password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1)
                    if password:
                        name += ' {{%s}}' % password.strip()
                except:
                    log.debug('Error getting details of "%s": %s', (name, traceback.format_exc()))

            results.append({
                'id': nzb_id,
                'provider_extra': urlparse(host['host']).hostname or host['host'],
                'name': toUnicode(name),
                'name_extra': name_extra,
                'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))),
                'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024,
                'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host),
                'detail_url': detail_url,
                'content': self.getTextElement(nzb, 'description'),
                'description': description,
                'score': host['extra_score'],
            })
Esempio n. 30
0
 def search(self, q, limit = 5):
     return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders())