示例#1
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            if not 'tv-series' in query:
                links = client.parseDOM(r, 'tbody')
                links = client.parseDOM(links, 'a', ret='href')
            else:
                links = client.parseDOM(r,
                                        'ul',
                                        attrs={'class': 'collapsible'})[0]
                pattern = 'href="#">.+?%d\s*<span class="right lmn-num-of-ep">(.+?)</table></div>' % self.ep
                links = re.findall(pattern, links)
                links = client.parseDOM(links, 'a', ret='href')

            for url in links:
                try:
                    if 'liomenoi' in url:
                        url = re.findall('liomenoi.+?link=(.+?)&title', url)[0]
                        url = base64.b64decode(url)
                    if 'target' in url: continue

                    if 'redvid' in url:
                        data = client.request(url)
                        url = client.parseDOM(data, 'iframe', ret='src')[0]

                    if any(x in url for x in [
                            '.online', 'xrysoi.se', 'filmer', '.bp',
                            '.blogger', 'youtu'
                    ]):
                        continue
                    if 'crypt' in url:
                        host = re.findall('embed\/(.+?)\/', url)[0]
                        url = url
                    else:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                    quality = 'SD'
                    lang, info = 'gr', 'SUB'

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': lang,
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass
            return sources
        except:
            return sources
示例#2
0
    def __search(self, titles, year, content):
        try:

            query = [self.search_link % (urllib.quote_plus(cleantitle.getsearch(i))) for i in titles]

            query = [urlparse.urljoin(self.base_link, i) for i in query]

            t = [cleantitle.get(i) for i in set(titles) if i] #cleantitle.get(titles[0])

            for u in query:
                try:
                    r = client.request(u)

                    r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})

                    if content == 'movies':
                        r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
                    else:
                        r = client.parseDOM(r, 'div', attrs={'id': 'series'})

                    r = [dom_parser2.parse_dom(i, 'figcaption') for i in r]
                    data = [(i[0].attrs['title'], dom_parser2.parse_dom(i[0].content, 'a', req='href')) for i in r if i]
                    data = [i[1][0].attrs['href'] for i in data if cleantitle.get(i[0]) in t]
                    if data: return source_utils.strip_domain(data[0])
                    else:
                        url = [dom_parser2.parse_dom(i[0].content, 'a', req='href') for i in r]
                        data = client.request(url[0][0]['href'])
                        data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
                        if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])
                except:pass

            return
        except:
            return
示例#3
0
def yandex(url):
    try:
        cookie = client.request(url, output='cookie')

        r = client.request(url, cookie=cookie)
        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

        sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0]

        idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0]

        idclient = binascii.b2a_hex(os.urandom(16))

        post = {
            'idClient': idclient,
            'version': '3.9.2',
            'sk': sk,
            '_model.0': 'do-get-resource-url',
            'id.0': idstring
        }
        post = urllib_parse.urlencode(post)

        r = client.request('https://yadi.sk/models/?_m=do-get-resource-url',
                           post=post,
                           cookie=cookie)
        r = json.loads(r)

        url = r['models'][0]['data']['file']

        return url
    except:
        return
def __getTrakt(url, post=None):
    try:
        url = urllib_parse.urljoin(BASE_URL, url)
        post = json.dumps(post) if post else None
        headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': 2}

        if getTraktCredentialsInfo():
            headers.update({'Authorization': 'Bearer %s' % control.addon('plugin.video.koditvr').getSetting('trakt.token')})

        result = client.request(url, post=post, headers=headers, output='extended', error=True)
        result = utils.byteify(result)

        resp_code = result[1]
        resp_header = result[2]
        result = result[0]

        if resp_code in ['423', '500', '502', '503', '504', '520', '521', '522', '524']:
            log_utils.log('Trakt Error: %s' % str(resp_code))
            control.infoDialog('Trakt Error: ' + str(resp_code), sound=True)
            return
        elif resp_code in ['429']:
            log_utils.log('Trakt Rate Limit Reached: %s' % str(resp_code))
            control.infoDialog('Trakt Rate Limit Reached: ' + str(resp_code), sound=True)
            return
        elif resp_code in ['404']:
            log_utils.log('Object Not Found : %s' % str(resp_code))
            return

        if resp_code not in ['401', '405']:
            return result, resp_header

        koditvr = urllib_parse.urljoin(BASE_URL, '/koditvr/token')
        opost = {'client_id': V2_API_KEY, 'client_secret': CLIENT_SECRET, 'redirect_uri': REDIRECT_URI, 'grant_type': 'refresh_token', 'refresh_token': control.addon('plugin.video.koditvr').getSetting('trakt.refresh')}

        result = client.request(koditvr, post=json.dumps(opost), headers=headers)
        result = utils.json_loads_as_str(result)

        token, refresh = result['access_token'], result['refresh_token']
        print('Info - ' + str(token))
        control.addon('plugin.video.koditvr').setSetting(id='trakt.token', value=token)
        control.addon('plugin.video.koditvr').setSetting(id='trakt.refresh', value=refresh)

        headers['Authorization'] = 'Bearer %s' % token

        result = client.request(url, post=post, headers=headers, output='extended', error=True)
        result = utils.byteify(result)
        return result[0], result[2]
    except:
        log_utils.log('getTrakt Error', 1)
        pass
示例#5
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         query = self.tvsearch_link % quote_plus(
             cleantitle.query(tvshowtitle))
         query = urljoin(self.base_link, query.lower())
         result = client.request(query, referer=self.base_link)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'index_item.+?'})
         result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                   for i in result if i]
         if not result:
             return
         result = [(
             i.attrs['href']
         ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
             re.sub(
                 '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                 '',
                 i.attrs['title'],
                 flags=re.I))]
         if not result: return
         else: result = result[0]
         url = client.replaceHTMLCodes(result)
         try:
             url = url.encode('utf-8')
         except:
             pass
         return url
     except:
         source_utils.scraper_error('PRIMEWIRE')
         return
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         page = client.request(url)
         items = client.parseDOM(page,
                                 'div',
                                 attrs={'class': 'season-table-row'})
         for item in items:
             try:
                 url = re.compile('<a href="(.+?)">',
                                  re.DOTALL).findall(item)[0]
             except:
                 pass
             sea = client.parseDOM(item,
                                   'div',
                                   attrs={'class':
                                          'season-table-season'})[0]
             epi = client.parseDOM(item,
                                   'div',
                                   attrs={'class': 'season-table-ep'})[0]
             if cleantitle.get(season) in cleantitle.get(
                     sea) and cleantitle.get(episode) in cleantitle.get(
                         epi):
                 url = self.base_link + url
                 return url
         return
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return
示例#7
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(alias['title']))
             url = client.request(url, headers=headers, output='geturl', timeout='10')
             if not url is None and url != self.base_link:
                 break
         if url is None:
             for alias in aliases:
                 url = '%s/movie/%s-%s' % (self.base_link, cleantitle.geturl(alias['title']), year)
                 url = client.request(url, headers=headers, output='geturl', timeout='10')
                 if not url is None and url != self.base_link:
                     break
         return url
     except:
         return
示例#8
0
    def __search(self, titles, year):
        try:
            tit = [i.split(':')[0] for i in titles]
            query = [
                self.search_link % (urllib.quote_plus(cleantitle.getsearch(i)))
                for i in tit
            ]
            query = [urlparse.urljoin(self.base_link, i) for i in query]
            t = [cleantitle.get(i) for i in set(titles) if i]
            for u in query:
                try:
                    r = client.request(u)
                    r = json.loads(r)
                    r = [(r[i]['url'], r[i]['title'], r[i]['extra']) for i in r
                         if i]
                    r = [(i[0], i[1]) for i in r if i[2]['date'] == year]
                    if len(r) == 1: return source_utils.strip_domain(r[0][0])
                    else:
                        r = [(i[0]) for i in r if cleantitle.get(i[1]) in t]
                        return source_utils.strip_domain(r[0])

                except BaseException:
                    pass

            return
        except BaseException:
            return
示例#9
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            if url == None: return

            url = urljoin(self.base_link, url)

            r = client.request(url, headers=self.headers)
            r = json.loads(r)['episodes']
            r = [(str(i['id']), str(i['season']), str(i['number']),
                  str(i['airdate'])) for i in r]

            url = [
                i for i in r if season == '%01d' %
                int(i[1]) and episode == '%01d' % int(i[2])
            ]
            url += [i for i in r if premiered == i[3]]

            url = self.episode_link % url[0][0]

            return url
        except Exception as e:
            log_utils.log('Ororo: ' + str(e))
            return
示例#10
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostprDict + hostDict
         r = client.request(url)
         r = re.compile(
             'class="watch-button" data-actuallink="(.+?)"').findall(r)
         for url in r:
             if url in str(sources):
                 continue
             quality, info = source_utils.get_release_quality(url, url)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         log_utils.log('Watchepisodes4 Exception', 1)
         return sources
示例#11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         html = client.request(url)
         quality = re.compile(
             '<div>Quanlity: <span class="quanlity">(.+?)</span></div>',
             re.DOTALL).findall(html)
         for qual in quality:
             quality = source_utils.check_url(qual)
             info = qual
         links = re.compile('var link_.+? = "(.+?)"',
                            re.DOTALL).findall(html)
         for url in links:
             if not url.startswith('http'):
                 url = "https:" + url
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
示例#12
0
    def __search(self, titles, year):
        try:
            tit = [i.split(':')[0] for i in titles]
            query = [
                self.search_link %
                (urllib.quote_plus(cleantitle.getsearch(i + ' ' + year)))
                for i in tit
            ]
            query = [urlparse.urljoin(self.base_link, i) for i in query]
            t = [cleantitle.get(i) for i in set(titles) if i]
            for u in query:
                try:
                    r = client.request(u)
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'card-content'})
                    r = dom_parser2.parse_dom(r, 'a')
                    r = [(i.attrs['href'], i.content) for i in r if i]
                    r = [(i[0], i[1]) for i in r
                         if year == re.findall('(\d{4})', i[1], re.DOTALL)[0]]
                    if len(r) == 1: return source_utils.strip_domain(r[0][0])
                    else:
                        r = [(i[0]) for i in r if cleantitle.get(i[1]) in t]
                        return source_utils.strip_domain(r[0])

                except:
                    pass

            return
        except:
            return
 def sources(self, url, hostDict, hostprDict):
     try:
         hostDict = hostprDict + hostDict
         sources = []
         if url == None:
             return sources
         page = client.request(url)
         links = re.compile('<a rel="nofollow" target="blank" href="(.+?)"',
                            re.DOTALL).findall(page)
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             valid, host = source_utils.is_host_valid(link, hostDict)
             if valid:
                 quality, info = source_utils.get_release_quality(
                     link, link)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return sources
示例#14
0
    def sources(self, url, hostDict, hostprDict):
        self._sources = []
        try:
            if url is None:
                return self._sources

            if debrid.status() is False:
                return self._sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.search.format('8', quote(query))
            else:
                url = self.search.format('4', quote(query))

            self.hostDict = hostDict + hostprDict
            headers = {'User-Agent': client.agent()}
            _html = client.request(url, headers=headers)
            threads = []
            for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
                threads.append(workers.Thread(self._get_items, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except BaseException:
            return self._sources
    def _get_items(self, url):
        items = []
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'})
            posts = [i for i in posts if not 'racker:' in i]
            for post in posts:
                data = client.parseDOM(post, 'a', ret='href')
                url = [i for i in data if 'magnet:' in i][0]
                name = client.parseDOM(post, 'a', ret='title')[0]
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                items.append((name, url, isize, dsize))
            return items
        except:
            log_utils.log('glodls2_exc', 1)
            return items
示例#16
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
         posts = client.parseDOM(posts, 'tr')
         for post in posts:
             data = dom.parse_dom(post, 'a', req='href')[1]
             link = urlparse.urljoin(self.base_link, data.attrs['href'])
             name = data.content
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                     self.title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 dsize, isize = utils._size(size)
             except BaseException:
                 dsize, isize = 0, ''
             self.items.append((name, link, isize, dsize))
         return self.items
     except BaseException:
         return self.items
示例#17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            url = urljoin(self.base_link, url)
            url = client.request(url, headers=self.headers)
            url = json.loads(url)['url']

            sources.append({
                'source': 'direct',
                'quality': 'HD',
                'language': 'en',
                'url': url,
                'direct': True,
                'debridonly': False
            })

            return sources
        except Exception as e:
            log_utils.log('Ororo: ' + str(e))
            return sources
示例#18
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']), int(data['episode'])
         ) if 'tvshowtitle' in data else '%s' % data['imdb']
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         token = client.request(self.token)
         token = json.loads(token)["token"]
         if 'tvshowtitle' in data:
             search_link = self.tvsearch.format(token,
                                                urllib.quote_plus(query),
                                                'format=json_extended')
         else:
             search_link = self.msearch.format(token, data['imdb'],
                                               'format=json_extended')
         rjson = client.request(search_link)
         files = json.loads(rjson)['torrent_results']
         for file in files:
             name = file["title"]
             url = file["download"]
             url = url.split('&tr')[0]
             quality, info = source_utils.get_release_quality(name, url)
             dsize = float(file["size"]) / 1073741824
             isize = '%.2f GB' % dsize
             #size = source_utils.convert_size(file["size"])
             #size = '[B]%s[/B]' % size
             info.insert(0, isize)
             info = ' | '.join(info)
             sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True,
                 'size': dsize
             })
         return sources
     except BaseException:
         return sources
示例#19
0
    def resolve(self, url):
        if 'crypt' in url:
            data = client.request(url)
            url = re.findall('''onclick="location.href=['"]([^"']+)["']''',
                             data, re.DOTALL)[0]
            url = re.sub('(?:playvideo-|\?playvid)', '', url)

        return url
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None: return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            year = data['year']
            if int(year) > 1970:
                return sources

            scrape = title.lower().replace(' ', '+')

            #start_url = self.search_link %(self.goog,scrape,year)
            start_url = self.base_link + self.search_link % scrape

            html = client.request(start_url)
            posts = client.parseDOM(html, 'div', attrs={'class': 'post'})
            for post in posts:
                url = client.parseDOM(post, 'a', ret='href')[0]
                if self.base_link in url:
                    if 'webcache' in url:
                        continue
                    if cleantitle.geturl(title) in url:
                        html2 = client.request(url)
                        chktitle = re.compile('<title.+?>(.+?)</title>',
                                              re.DOTALL).findall(html2)[0]
                        if title in chktitle and year in chktitle:
                            links = client.parseDOM(html2, 'source', ret='src')
                            for link in links:
                                sources.append({
                                    'source': 'direct',
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': link,
                                    'info': '',
                                    'direct': True,
                                    'debridonly': False
                                })
            return sources
        except:
            log_utils.log('BNWM1 - Exception', 1)
            return sources
示例#21
0
	def get_sources(self, url):
		try:
			r = client.request(url)
			if not r:
				return
			div = client.parseDOM(r, 'div', attrs={'id': 'div2child'})

			for row in div:
				row = client.parseDOM(r, 'div', attrs={'class': 'resultdivbotton'})
				if not row:
					return

				for post in row:
					hash = re.findall('<div id="hideinfohash.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = re.findall('<div id="hidename.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = unquote_plus(name)
					name = source_utils.clean_name(self.title, name)
					if source_utils.remove_lang(name, self.episode_title):
						continue

					url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

					if url in str(self.sources):
						continue

					if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
						continue

					# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
					if self.episode_title:
						if not source_utils.filter_single_episodes(self.hdlr, name):
							continue

					try:
						seeders = int(re.findall('<div class="resultdivbottonseed">([0-9]+|[0-9]+,[0-9]+)<', post, re.DOTALL)[0].replace(',', ''))
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('<div class="resultdivbottonlength">(.+?)<', post)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
													'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('IDOPE')
			pass
示例#22
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            for alias in aliases:
                url = '%s/full-movie/%s' % (self.base_link, cleantitle.geturl(alias['title']))
                url = client.request(url, headers=headers, output='geturl', timeout='10')
                if url is not None and url != self.base_link:
                    break
            if url is None:
                for alias in aliases:
                    url = '%s/full-movie/%s-%s' % (self.base_link, cleantitle.geturl(alias['title']), year)
                    url = client.request(url, headers=headers, output='geturl', timeout='10')
                    if url is not None and url != self.base_link:
                        break

            return url
        except:
            log_utils.log('cartoonhd - Exception', 1)
            return
示例#23
0
 def resolve(self, url):
     if 'google' in url:
         return directstream.googlepass(url)
     elif 'vidcloud' in url:
         r = client.request(url)
         url = re.findall("file: '(.+?)'", r)[0]
         return url
     else:
         return url
示例#24
0
 def searchShow(self, title, season, episode, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/show/%s/season/%01d/episode/%01d' % (self.base_link, cleantitle.geturl(title), int(season), int(episode))
             url = client.request(url, headers=headers, output='geturl', timeout='10')
             if not url is None and url != self.base_link:
                 break
         return url
     except:
         return
示例#25
0
def geturl(url):
    try:
        r = client.request(url, output='geturl')
        if r is None: return r

        host1 = re.findall('([\w]+)[.][\w]+$', urllib_parse.urlparse(url.strip().lower()).netloc)[0]
        host2 = re.findall('([\w]+)[.][\w]+$', urllib_parse.urlparse(r.strip().lower()).netloc)[0]
        if host1 == host2: return r

        proxies = sorted(get(), key=lambda x: random.random())
        proxies = sorted(proxies, key=lambda x: random.random())
        proxies = proxies[:3]

        for p in proxies:
            p += urllib_parse.quote_plus(url)
            r = client.request(p, output='geturl')
            if r is not None: return parse(r)
    except:
        pass
示例#26
0
 def resolve(self, url):
     try:
         if '/stream/' in url or '/watch/' in url:
             r = client.request(url, referer=self.base_link)
             link = client.parseDOM(r,
                                    'a',
                                    ret='data-href',
                                    attrs={'id': 'iframe_play'})[0]
         else:
             try:
                 data = client.request(url, referer=self.base_link)
                 data = re.findall(r'\s*(eval.+?)\s*</script', data,
                                   re.DOTALL)[0]
                 link = jsunpack.unpack(data)
                 link = link.replace('\\', '')
                 if 'eval' in link:
                     link = jsunpack.unpack(link)
                 link = link.replace('\\', '')
                 host = re.findall('hosted=\'(.+?)\';var', link,
                                   re.DOTALL)[0]
                 if 'streamango' in host:
                     loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                      re.DOTALL)[0]
                     link = 'https://streamango.com/embed/{0}'.format(loc)
                 elif 'openload' in host:
                     loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                      re.DOTALL)[0]
                     link = 'https://openload.co/embed/{0}'.format(loc)
                 else:
                     link = re.findall('''loc\s*=\s*['"](.+?)['"]\;''',
                                       re.DOTALL)[0]
             except:
                 source_utils.scraper_error('PRIMEWIRE')
                 link = client.request(url, output='geturl', timeout=10)
                 if link == url:
                     return
                 else:
                     return link
         return link
     except:
         source_utils.scraper_error('PRIMEWIRE')
         return
def authTrakt():
    try:
        if getTraktCredentialsInfo() == True:
            if control.yesnoDialog(control.lang(32511) + '[CR]' + control.lang(32512), heading='Trakt'):
                control.addon('plugin.video.koditvr').setSetting(id='trakt.user', value='')
                control.addon('plugin.video.koditvr').setSetting(id='trakt.authed', value='')
                control.addon('plugin.video.koditvr').setSetting(id='trakt.token', value='')
                control.addon('plugin.video.koditvr').setSetting(id='trakt.refresh', value='')
            raise Exception()

        result = getTraktAsJson('/koditvr/device/code', {'client_id': V2_API_KEY})
        verification_url = control.lang(32513) % result['verification_url']
        user_code = six.ensure_text(control.lang(32514) % result['user_code'])
        expires_in = int(result['expires_in'])
        device_code = result['device_code']
        interval = result['interval']

        progressDialog = control.progressDialog
        progressDialog.create('Trakt')

        for i in list(range(0, expires_in)):
            try:
                percent = int(100 * float(i) / int(expires_in))
                progressDialog.update(max(1, percent), verification_url + '[CR]' + user_code)
                if progressDialog.iscanceled(): break
                time.sleep(1)
                if not float(i) % interval == 0: raise Exception()
                r = getTraktAsJson('/koditvr/device/token', {'client_id': V2_API_KEY, 'client_secret': CLIENT_SECRET, 'code': device_code})
                if 'access_token' in r: break
            except:
                pass

        try: progressDialog.close()
        except: pass

        token, refresh = r['access_token'], r['refresh_token']

        headers = {'Content-Type': 'application/json', 'trakt-api-key': V2_API_KEY, 'trakt-api-version': 2, 'Authorization': 'Bearer %s' % token}


        result = client.request(urllib_parse.urljoin(BASE_URL, '/users/me'), headers=headers)
        result = utils.json_loads_as_str(result)

        user = result['username']
        authed = '' if user == '' else str('yes')

        print('info - ' + token)
        control.addon('plugin.video.koditvr').setSetting(id='trakt.user', value=user)
        control.addon('plugin.video.koditvr').setSetting(id='trakt.authed', value=authed)
        control.addon('plugin.video.koditvr').setSetting(id='trakt.token', value=token)
        control.addon('plugin.video.koditvr').setSetting(id='trakt.refresh', value=refresh)
        raise Exception()
    except:
        control.openSettings('4.1')
示例#28
0
 def searchShow(self, title, season, episode, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/tv-show/%s/season/%01d/episode/%01d' % (self.base_link, cleantitle.geturl(title), int(season), int(episode))
             url = client.request(url, headers=headers, output='geturl', timeout='10')
             if url is not None and url != self.base_link:
                 break
         return url
     except:
         log_utils.log('cartoonhd - Exception', 1)
         return
示例#29
0
    def ororo_moviecache(self, user):
        try:
            url = urlparse.urljoin(self.base_link, self.moviesearch_link)

            r = client.request(url, headers=self.headers)
            r = json.loads(r)['movies']
            r = [(str(i['id']), str(i['imdb_id'])) for i in r]
            r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
            return r
        except:
            return
示例#30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []

            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')

            hdlr = 'S%02d' % (int(data['season'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)

            posts = client.parseDOM(r, 'figure')

            items = []
            for post in posts:
                try:
                    tit = client.parseDOM(post, 'img', ret='title')[0]
                    tit = client.replaceHTMLCodes(tit)
                    t = tit.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in tit:
                        continue

                    url = client.parseDOM(post, 'a', ret='href')[0]

                    items.append((url, tit))

                except:
                    pass

            threads = []
            for i in items:
                threads.append(workers.Thread(self._get_sources, i[0], i[1], hostDict, hostprDict))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources

        except:
            return self._sources