コード例 #1
0
ファイル: watch1080p_mv.py プロジェクト: Rockystar2/filmkodi
    def resolve(self, url):
        link = client.source(url)
        url = re.compile('src="(.+?)" style').findall(link)[0]
        link = client.source(url)
        try:
            url = re.compile("window.atob\('(.+?)'\)\)").findall(link)[0]
            func_count = len(re.findall('window\.atob', link))
            print(">>>>>>>> ILE", func_count)
            for _i in xrange(func_count):
                url = base64.decodestring(url)
            url = re.compile("<source src='(.+?)'").findall(url)[0]
            control.log(">> u2 %s |ENcoded %s", url, resolvers.request(url))
            url = resolvers.request(url)

        except:
            try:
                url = re.compile('src="(.+?)"').findall(link)[0]

                host = urlparse.urlparse(url).netloc
                host = host.replace('www.', '').replace('embed.', '')
                host = host.rsplit('.', 1)[0]
                host = host.lower()
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                url = resolvers.request(url)

            except:
                pass
        #print("--------------->>>>> URL",url)
        return url
コード例 #2
0
ファイル: afdah_mv.py プロジェクト: AMObox/teammaniac
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            self.base_link = random.choice([self.base_link_1, self.base_link_2])

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(result)[0]
            post = {'video_id': video_id}

            result = client.source(urlparse.urljoin(self.base_link, self.info_link), post=post)

            u = [i for i in result.split('&') if 'google' in i][0]
            u = urllib.unquote_plus(u)
            u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
            u = [googleplus.tag(i)[0] for i in u]
            u = [i for i in u if i['quality'] in ['1080p', 'HD']]

            for i in u: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'Afdah', 'url': i['url']})

            return sources
        except:
            return sources
コード例 #3
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = self.search_link % (urllib.quote_plus(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = json.loads(result)
            result = result['data']['films']

            tvshowtitle = cleantitle.tv(tvshowtitle)
            years = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1)
            ]
            result = [(i['id'], i['title'].encode('utf-8')) for i in result]
            result = [i for i in result
                      if tvshowtitle == cleantitle.tv(i[1])][:2]
            result = [(i[0], self.base_link + self.detail_link % i[0])
                      for i in result]
            result = [(i[0], client.source(i[1])) for i in result]
            result = [(i[0], json.loads(i[1])['data']['state'])
                      for i in result]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = str(result)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #4
0
ファイル: alltube_mv_tv.py プロジェクト: borys76/filmkodi
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            links = client.parseDOM(result, 'tr')
            links = [(client.parseDOM(i, 'a', attrs = {'class': 'watch'}, ret='data-iframe')[0],
                    client.parseDOM(i, 'img', ret='alt')[0],
                    client.parseDOM(i, 'td', attrs={'class':'text-center'})[0]) for i in links]

            for i in links:
                try:
                    result = client.source(i[0].decode('base64'))
                    url= client.parseDOM(result, 'iframe', ret='src')[0]
                    url = url.encode('utf-8')
                    print ("Q",videoquality.solvequality(url),url)
                    sources.append({'source': i[1], 'quality': 'SD', 'provider': 'Alltube', 'url': url, 'vtype':i[2]})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #5
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            url = urlparse.urljoin(self.base_link, url) + '/watching.html'

            result = client.source(url)
            movie = client.parseDOM(result, 'div', ret='movie-id', attrs = {'id': 'media-player'})[0]
            control.log('####### %s MOVIE' % movie)

            try:
                quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
                control.log('####### %s MOVIE quality ' % quality)

            except: quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'

            url = '/movie/loadepisodes/%s' % movie
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
            result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a', ret='episode-id'), client.parseDOM(result, 'a'))
            result = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in result]
            result = [(i[0], i[1], i[2], i[3]) for i in result]

            if content == 'episode': result = [i for i in result if i[3] == '%01d' % int(episode)]

            links = [('movie/load_episode/%s/%s' % (i[2], i[1]), 'gvideo') for i in result if 2 <= int(i[0]) <= 11]

            for i in links: sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmoviesv2', 'url': i[0]})

            links = []
            links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'openload') for i in result if i[0] == '14']
            #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videomega.tv') for i in result if i[0] == '13']
            #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12']

            #for i in links: sources.append({'source': i[1], 'quality': quality, 'provider': 'Onemovies', 'url': i[0], 'direct': False, 'debridonly': False})
            for i in links: sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmoviesv2', 'url': i[0]})
            control.log('####### MOVIE sources %s' % sources)

            return sources

            #for u in url: sources.append({'source': 'Muchmovies', 'quality': quality, 'provider': 'Muchmoviesv2', 'url': u})

        except:
            return sources
コード例 #6
0
ファイル: animeultima_tv.py プロジェクト: PsychoMC/PsychoMC
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            tvdb_link = self.tvdb_link % (self.tvdb_key, tvdb, int(season),
                                          int(episode))
            result = client.source(tvdb_link)

            num = client.parseDOM(result, 'absolute_number')[0]
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode('iso-8859-1').encode('utf-8')
            result = client.parseDOM(result, 'tr', attrs={'class': ''})
            result = [(client.parseDOM(i, 'a', ret='href')[0],
                       client.parseDOM(i, 'td', attrs={'class': 'epnum'})[0])
                      for i in result]
            result = [i[0] for i in result if num == i[1]][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #7
0
ファイル: 9movies_mv_tv.py プロジェクト: AMObox/teammaniac
    def resolve(self, url):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, "") for i in data])

            headers = {"X-Requested-With": "XMLHttpRequest"}

            now = time.localtime()
            url = "/ajax/film/episode?hash_id=%s&f=&p=%s" % (data["hash_id"], now.tm_hour + now.tm_min)
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, headers=headers, referer=data["referer"])
            result = json.loads(result)

            grabber = {"flash": 1, "json": 1, "s": now.tm_min, "link": result["videoUrlHash"], "_": int(time.time())}
            grabber = result["grabber"] + "?" + urllib.urlencode(grabber)

            result = client.source(grabber, headers=headers, referer=url)
            result = json.loads(result)

            url = [(re.findall("(\d+)", i["label"]), i["file"]) for i in result if "label" in i and "file" in i]
            url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0]
            url = sorted(url, key=lambda k: k[0])
            url = url[-1][1]

            url = client.request(url, output="geturl")
            if "requiressl=yes" in url:
                url = url.replace("http://", "https://")
            else:
                url = url.replace("https://", "http://")
            return url
        except:
            return
コード例 #8
0
ファイル: ororo_tv_null.py プロジェクト: mpie/repo
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            if not 'menu season-tabs' in str(result) and not (
                    self.user == '' or self.password == ''):
                cookie = client.source(self.sign_link,
                                       post=self.post,
                                       output='cookie')
                result = client.source(url, cookie=cookie)

            result = client.parseDOM(
                result,
                'a',
                ret='data-href',
                attrs={'href': '#%01d-%01d' % (int(season), int(episode))})[0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #9
0
ファイル: animeultima_tv.py プロジェクト: AMObox/teammaniac
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None:
                return

            tvdb_link = self.tvdb_link % (self.tvdb_key, tvdb, int(season), int(episode))
            result = client.source(tvdb_link)

            num = client.parseDOM(result, "absolute_number")[0]
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = result.decode("iso-8859-1").encode("utf-8")
            result = client.parseDOM(result, "tr", attrs={"class": ""})
            result = [
                (client.parseDOM(i, "a", ret="href")[0], client.parseDOM(i, "td", attrs={"class": "epnum"})[0])
                for i in result
            ]
            result = [i[0] for i in result if num == i[1]][0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
コード例 #10
0
    def get_sources(self, url):
        logger.debug('%s SOURCES URL %s' % (self.__class__, url))
        try:
            quality = ''
            sources = []

            if url == None: return sources

            try: result = client.source(url, headers=self.headers)
            except: result = ''

            result = json.loads(result)

            try :
                url = result['resultObj']['src']
                url = url.replace('http://','https://').replace('/z/','/i/').replace('manifest.f4m', 'master.m3u8').replace('2000,_STAR.','2000,3000,4500,_STAR.')
                cookie = client.source(url, headers=self.headers, output='cookie')
                result = client.source(url, headers=self.headers)

                match = re.compile("BANDWIDTH=[0-9]+,RESOLUTION=[0-9]+x(.+?),[^\n]*\n([^\n]*)\n").findall(result)
                if match:
                    for (res, url) in match:
                        try :
                            host = 'hotstar'
                            quality = self.res_map[res]
                            url = '%s|Cookie=%s' % (url, cookie)
                            sources.append({'source': host, 'parts': '1', 'quality': quality, 'provider': 'Hotstar', 'url': url, 'direct':True})
                        except:
                            pass
            except:
                pass
            logger.debug('%s SOURCES [%s]' % (__name__,sources))
            return sources
        except:
            return sources
コード例 #11
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            if url == None: return sources
            myurl = urlparse.urljoin(self.base_link, '/forum/' + url)
            result = client2.http_get(myurl)
            result10 = result
            result10 = client.parseDOM(result10, 'div', attrs={'id': '5throw'})[0]
            result10 = client.parseDOM(result10, 'a', attrs={'rel': 'nofollow'}, ret='href')
            mquality = 'HD'
            if '1080'in url: mquality = '1080p'
            for i in result10:
                if 'mail.ru' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'MAIL.RU', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
                if 'yadi.sk' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'YADISK', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})

            result = client.parseDOM(result, 'iframe', ret='src')
            result = [i for i in result if 'pasep' in i][0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            links = resolvers.request(result)
            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dayt', 'url': i[0]})
            return sources
        except:
            return sources
コード例 #12
0
ファイル: 9movies_mv_tv.py プロジェクト: c0ns0le/YCBuilds
    def resolve(self, url):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            now = time.localtime()
            url = '/ajax/film/episode?hash_id=%s&f=&p=%s' % (data['hash_id'], now.tm_hour + now.tm_min)
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, headers=headers, referer=data['referer'])
            result = json.loads(result)

            grabber = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': result['videoUrlHash'], '_': int(time.time())}
            grabber = result['grabber'] + '?' + urllib.urlencode(grabber)

            result = client.source(grabber, headers=headers, referer=url)
            result = json.loads(result)

            url = [(re.findall('(\d+)', i['label']), i['file']) for i in result if 'label' in i and 'file' in i]
            url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0]
            url = sorted(url, key=lambda k: k[0])
            url = url[-1][1]

            url = client.request(url, output='geturl')
            if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
            else: url = url.replace('https://', 'http://')
            return url
        except:
            return
コード例 #13
0
    def get_movie(self, imdb, title, year):
        try:
            query = urlparse.urljoin(self.base_link, self.search_link)
            post = urllib.urlencode({'action': 'ajaxy_sf', 'sf_value': title})

            result = client.source(query, post=post)
            result = result.replace('&#8211;', '-').replace('&#8217;', '\'')
            result = json.loads(result)
            result = result['post']['all']

            title = cleantitle.movie(title)
            result = [
                i['post_link'] for i in result
                if title == cleantitle.movie(i['post_title'])
            ][0]

            check = client.source(result)
            if not str(imdb) in check: raise Exception()

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #14
0
ファイル: apnaview_mv.py プロジェクト: kevintone/tdbaddon
    def get_source(self, item):
        quality = ''
        try:
            urls = client.parseDOM(item, "td")[1]
            urls = client.parseDOM(urls, "a", ret="href")
            for i in range(0, len(urls)):
                uResult = client.source(urls[i], mobile=False)
                uResult = uResult.replace('\n', '').replace('\t', '')
                if 'Could not connect to mysql! Please check your database' in uResult:
                    uResult = client.source(urls[i], mobile=True)

                item = client.parseDOM(uResult,
                                       "div",
                                       attrs={"class": "videoplayer"})[0]
                item = re.compile('(SRC|src|data-config)=[\'|\"](.+?)[\'|\"]'
                                  ).findall(item)[0][1]
                urls[i] = item
            host = client.host(urls[0])
            if len(urls) > 1:
                url = "##".join(urls)
            else:
                url = urls[0]
            self.sources.append({
                'source': host,
                'parts': str(len(urls)),
                'quality': quality,
                'provider': 'ApnaView',
                'url': url,
                'direct': False
            })
        except:
            pass
コード例 #15
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = urlparse.urlparse(url).query
            try: query = '%02d' % int(re.compile('E(\d*)$').findall(query)[0])
            except: query = ''

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
            result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
            result = [i[0] for i in result if query.endswith(i[1]) or query == ''][0]

            url = urlparse.urljoin(self.base_link, result)

            url = client.source(url, output='geturl')
            if not 'google' in url: raise Exception()

            url = url.split('get_video_info')[0]
            url = resolvers.request(url)

            for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})

            return sources
        except:
            return sources
コード例 #16
0
ファイル: tvrelease_tv.py プロジェクト: soltje/filmkodi
    def check(self, i):
        try:
            result = client.source(i['url'])
            result = client.parseDOM(result, 'td', attrs = {'class': 'td_cols'})[0]
            result = result.split('"td_heads"')
            result = client.parseDOM(result, 'a', ret='href')

            for url in result:
                try:
                    if 'go4up.com' in url:
                        url = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]
                        url = client.source(self.go4up_link_2 % url)
                        url = client.parseDOM(url, 'div', attrs = {'id': 'linklist'})[0]
                        url = client.parseDOM(url, 'a', ret='href')[0]

                    host = urlparse.urlparse(url).netloc
                    host = host.rsplit('.', 1)[0].split('.', 1)[-1]
                    host = host.strip().lower()

                    if not host in ['uptobox', 'hugefiles', 'uploadrocket']: raise Exception()

                    if host == 'hugefiles': check = hugefiles.check(url)
                    elif host == 'uploadrocket': check = uploadrocket.check(url)
                    elif host == 'uptobox': check = uptobox.check(url)

                    if check == False: raise Exception()

                    self.sources.append({'source': host, 'quality': 'HD', 'provider': 'TVrelease', 'url': url, 'info': i['info']})
                except:
                    pass
        except:
            pass
コード例 #17
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = urlparse.urljoin(self.base_link, self.tvsearch_link)

            result = client.source(query)

            tvshowtitle = cleantitle.tv(tvshowtitle)

            result = zip(
                client.parseDOM(result, 'a', {'class': 'underilne'}, 'href'),
                client.parseDOM(result, 'a', {'class': 'underilne'}))
            result = [
                i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])
            ][0]
            check = urlparse.urljoin(self.base_link, result)
            check = client.source(check)
            if not str(imdb) in check: raise Exception()

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #18
0
ファイル: yadisk.py プロジェクト: anopid/filmkodi
def resolve(url):
    try:
        cookie = client.source(url, output='cookie')
        result = client.source(url, cookie=cookie)
        sk = re.compile('"sk":"([^"]+)",').findall(result)[0]
        idclient = binascii.b2a_hex(os.urandom(16))
        id = re.compile('"id":"([^"]+)",').findall(result)[0]
        if len(id) > 0 and len(sk) > 0:
            post = {
                'idClient': idclient,
                'version': '3.9.2',
                'sk': sk,
                '_model.0': 'do-get-resource-url',
                'id.0': id
            }
            result = client.source(
                'https://yadi.sk/models/?_m=do-get-resource-url',
                post=post,
                cookie=cookie)
            control.log("-----------------------------YANDEX RES %s" % result)
            result = json.loads(result)
            print('res', result)
            url = result['models'][0]['data']['file']
            return url
        return
    except:
        return
コード例 #19
0
ファイル: ororo_tv_null.py プロジェクト: mpie/repo
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            result = client.source(self.base_link)
            if not "'index show'" in str(result) and not (self.user == '' or
                                                          self.password == ''):
                cookie = client.source(self.sign_link,
                                       post=self.post,
                                       output='cookie')
                result = client.source(self.base_link, cookie=cookie)

            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index show'})
            result = [(client.parseDOM(i, 'a', attrs={'class': 'name'})[0],
                       client.parseDOM(i, 'span', attrs={'class': 'value'})[0],
                       client.parseDOM(i, 'a', ret='href')[0]) for i in result]

            tvshowtitle = cleantitle.tv(tvshowtitle)
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [
                i[2] for i in result if tvshowtitle == cleantitle.tv(i[0])
            ][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #20
0
ファイル: watch1080p_mv.py プロジェクト: Azzudare/filmkodi
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            result = client.source(url).decode('utf-8').encode('utf-8')
            #print("Result Get source - 1",url,result)

            result = client.parseDOM(result, 'a', attrs = {'class': 'icons btn_watch_detail'},ret='href')
            #print("Result Get source ",result)
            result = client.source(result[0])
            #print("Result Get source ",result)
            result = client.parseDOM(result,'div',attrs= {'class':'server'})
            #print("Result Get source ",result)
            result = re.compile('(<a.*?</a>)', re.DOTALL).findall(result[0])
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
            for i in range(len(result)):
                url = result[i][0][0]
                quality = 'SD'
                if '1080' in result[i][1][0]: quality = '1080p'
                elif '720' in result[i][1][0]: quality = 'HD'
                elif '480' in result[i][1][0]: quality = 'SD'
                sources.append({'source': 'watch1080p', 'quality': quality, 'provider': 'Watch1080p', 'url': url})

            return sources
        except:
            return sources
コード例 #21
0
ファイル: ororo_tv.py プロジェクト: JRepoInd/lambda-addons
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            result = client.source(self.base_link, headers=self.headers)
            if not "'index show'" in result:
                cookie = client.source(self.sign_link, headers=self.headers, post=self.key_link, output="cookie")
                result = client.source(self.base_link, headers=self.headers, cookie=cookie)

            result = client.parseDOM(result, "div", attrs={"class": "index show"})
            result = [
                (
                    client.parseDOM(i, "a", attrs={"class": "name"})[0],
                    client.parseDOM(i, "span", attrs={"class": "value"})[0],
                    client.parseDOM(i, "a", ret="href")[0],
                )
                for i in result
            ]

            tvshowtitle = cleantitle.tv(tvshowtitle)
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            result = [i for i in result if any(x in i[1] for x in years)]
            result = [i[2] for i in result if tvshowtitle == cleantitle.tv(i[0])][0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
コード例 #22
0
ファイル: wsonline_tv.py プロジェクト: karnage11/filmkodi
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            year, month = re.compile('(\d{4})-(\d{2})').findall(date)[-1]
            if int(year) <= 2008: raise Exception()

            cat = urlparse.urljoin(self.base_link, url)
            cat = cat.split('category/', 1)[-1].rsplit('/')[0]


            url = urlparse.urljoin(self.base_link, '/episode/%s-s%02de%02d' % (cat, int(season), int(episode)))
            result = client.source(url, output='response', error=True)

            if '404' in result[0]:
                url = urlparse.urljoin(self.base_link, '/%s/%s/%s-s%02de%02d' % (year, month, cat, int(season), int(episode)))
                result = client.source(url, output='response', error=True)

            if '404' in result[0]:
                url = urlparse.urljoin(self.base_link, '/%s/%s/%s-%01dx%01d' % (year, month, cat, int(season), int(episode)))
                result = client.source(url, output='response', error=True)

            if '404' in result[0]: raise Exception()

            try: url = re.compile('//.+?(/.+)').findall(url)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #23
0
ファイル: wsonline_tv.py プロジェクト: gitrazr/filmkodi
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            year, month = re.compile('(\d{4})-(\d{2})').findall(date)[-1]
            if int(year) <= 2008: raise Exception()

            cat = urlparse.urljoin(self.base_link, url)
            cat = cat.split('category/', 1)[-1].rsplit('/')[0]


            url = urlparse.urljoin(self.base_link, '/episode/%s-s%02de%02d' % (cat, int(season), int(episode)))
            result = client.source(url, output='response', error=True)

            if '404' in result[0]:
                url = urlparse.urljoin(self.base_link, '/%s/%s/%s-s%02de%02d' % (year, month, cat, int(season), int(episode)))
                result = client.source(url, output='response', error=True)

            if '404' in result[0]:
                url = urlparse.urljoin(self.base_link, '/%s/%s/%s-%01dx%01d' % (year, month, cat, int(season), int(episode)))
                result = client.source(url, output='response', error=True)

            if '404' in result[0]: raise Exception()

            try: url = re.compile('//.+?(/.+)').findall(url)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #24
0
ファイル: watch1080p_mv.py プロジェクト: Azzudare/filmkodi
    def resolve(self, url):
        link = client.source(url)
        url=re.compile('src="(.+?)" style').findall(link)[0]
        link = client.source(url)
        try:
                url=re.compile("window.atob\('(.+?)'\)\)").findall(link)[0]
                func_count = len(re.findall('window\.atob', link))
                print(">>>>>>>> ILE",func_count)
                for _i in xrange(func_count):
                    url = base64.decodestring(url)
                url=re.compile("<source src='(.+?)'").findall(url)[0]
                control.log(">> u2 %s |ENcoded %s",url, resolvers.request(url))
                url = resolvers.request(url)

        except:
                try:
                    url=re.compile('src="(.+?)"').findall(link)[0]

                    host = urlparse.urlparse(url).netloc
                    host = host.replace('www.', '').replace('embed.', '')
                    host = host.rsplit('.', 1)[0]
                    host = host.lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    url = resolvers.request(url)


                except:pass
        #print("--------------->>>>> URL",url)
        return url
コード例 #25
0
ファイル: dayt_mv_tv.py プロジェクト: gitrazr/filmkodi
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            if url == None: return sources
            myurl = urlparse.urljoin(self.base_link, '/forum/' + url)
            result = client2.http_get(myurl)
            result10 = result
            result10 = client.parseDOM(result10, 'div', attrs={'id': '5throw'})[0]
            result10 = client.parseDOM(result10, 'a', attrs={'rel': 'nofollow'}, ret='href')
            mquality = 'HD'
            if '1080'in url: mquality = '1080p'
            for i in result10:
                if 'mail.ru' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'MAIL.RU', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
                if 'yadi.sk' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'YADISK', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})

            result = client.parseDOM(result, 'iframe', ret='src')
            result = [i for i in result if 'pasep' in i][0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            links = resolvers.request(result)
            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dayt', 'url': i[0]})
            return sources
        except:
            return sources
コード例 #26
0
ファイル: ororo_tv.py プロジェクト: JRepoInd/lambda-addons
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None:
                return

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, headers=self.headers)
            if not "menu season-tabs" in result:
                cookie = client.source(self.sign_link, headers=self.headers, post=self.key_link, output="cookie")
                result = client.source(url, headers=self.headers, cookie=cookie)

            result = client.parseDOM(
                result, "a", ret="data-href", attrs={"href": "#%01d-%01d" % (int(season), int(episode))}
            )[0]

            try:
                url = re.compile("//.+?(/.+)").findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode("utf-8")
            return url
        except:
            return
コード例 #27
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.moviesearch_link % (urllib.quote_plus(title))

            result = client.source(query)

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = client.parseDOM(result, 'h3', attrs = {'class': '.+?'})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
            result = [(i[0][0], i[1][-1]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(i[0], re.compile('(^Watch Full "|^Watch |)(.+? [(]\d{4}[)])').findall(i[1])) for i in result]
            result = [(i[0], i[1][0][-1]) for i in result if len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            check = urlparse.urljoin(self.base_link, result)
            check = client.source(check)
            if not str(imdb) in check: raise Exception()

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #28
0
ファイル: einthusan_mv.py プロジェクト: AMObox/teammaniac
    def get_movie(self, imdb, title, year):
        try:
            search = 'http://www.omdbapi.com/?i=%s' % imdb
            search = client.source(search)
            search = json.loads(search)
            country = [i.strip() for i in search['Country'].split(',')]
            if not 'India' in country: return

            languages = ['hindi', 'tamil', 'telugu', 'malayalam']
            language = [i.strip().lower() for i in search['Language'].split(',')]
            language = [i for i in language if any(x == i for x in languages)][0]

            query = self.search_link % (urllib.quote_plus(title), language)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, 'div', attrs = {'class': 'search-category'})
            result = [i for i in result if 'Movies' in client.parseDOM(i, 'p')[0]][0]
            result = client.parseDOM(result, 'li')

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
            r = [i for i in result if any(x in i[1] for x in years)]
            if not len(r) == 0: result = r
            result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = url.replace('../', '/')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #29
0
ファイル: moviestorm_tv.py プロジェクト: bialagary/mw
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = urlparse.urljoin(self.base_link, self.tvsearch_link)

            result = client.source(query)

            result = client.parseDOM(result, 'div', attrs = {'class': 'movies_content'})[0]

            tvshowtitle = cleantitle.tv(tvshowtitle)

            result = re.compile('(<li>.+?</li>)').findall(result)
            result = [re.compile('href="(.+?)">(.+?)<').findall(i) for i in result]
            result = [i[0] for i in result if len(i) > 0]
            result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0]

            check = urlparse.urljoin(self.base_link, result)
            check = client.source(check)
            if not str(imdb) in check: raise Exception()

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #30
0
ファイル: watch1080p_mv.py プロジェクト: karnage11/filmkodi
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            result = client.source(url)
            result = client.parseDOM(result,
                                     'a',
                                     attrs={'class': 'icons btn_watch_detail'},
                                     ret='href')
            result = client.source(result[0])
            result = client.parseDOM(result, 'div', attrs={'class': 'server'})
            result = re.compile('(<a.*?</a>)', re.DOTALL).findall(result[0])
            result = [(client.parseDOM(i, 'a',
                                       ret='href'), client.parseDOM(i, 'a'))
                      for i in result]
            for i in range(len(result)):
                url = result[i][0][0]
                quality = 'SD'
                if '1080' in result[i][1][0]: quality = '1080p'
                elif '720' in result[i][1][0]: quality = 'HD'
                elif '480' in result[i][1][0]: quality = 'SD'
                sources.append({
                    'source': 'watch1080p',
                    'quality': quality,
                    'provider': 'Watch1080p',
                    'url': url
                })

            return sources
        except:
            return sources
コード例 #31
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, 'li', attrs = {'class': '[^"]*movie[^"]*'})

            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h4')) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result][0]

            url = re.compile('movie-detail/(.+?)/').findall(result)[0]
            url = self.detail_link % url
            url = url.encode('utf-8')

            if len(result) > 1:
                y = client.source(urlparse.urljoin(self.base_link, url))
                y = re.compile('(\d{4})-\d{2}-\d{2}').findall(y)[-1]
                if not str(y) in years: raise Exception()

            return url
        except:
            return
コード例 #32
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = urlparse.urljoin(self.base_link, self.tvsearch_link)

            result = client.source(query)

            result = client.parseDOM(result, 'div', attrs = {'class': 'movies_content'})[0]

            tvshowtitle = cleantitle.tv(tvshowtitle)

            result = re.compile('(<li>.+?</li>)').findall(result)
            result = [re.compile('href="(.+?)">(.+?)<').findall(i) for i in result]
            result = [i[0] for i in result if len(i) > 0]
            result = [i[0] for i in result if tvshowtitle == cleantitle.tv(i[1])][0]

            check = urlparse.urljoin(self.base_link, result)
            check = client.source(check)
            if not str(imdb) in check: raise Exception()

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #33
0
ファイル: dizigold_tv.py プロジェクト: Magoo204/lambda-addons
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            query = urlparse.urljoin(self.base_link, self.ajax_link)
            post = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0]
            post = self.player_link % post

            result = client.source(query, post=post, headers=self.headers)
            result = json.loads(result)
            result = result['data']

            result = re.compile('"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(\d*p)"').findall(result)

            links = [{'url': i[0], 'quality': i[1]} for i in result if 'google' in i[0]]
            links += [{'url': '%s|User-Agent=%s&Referer=%s' % (i[0], urllib.quote_plus(client.agent()), urllib.quote_plus(url)), 'quality': i[1]} for i in result if not 'google' in i[0]]


            try: sources.append({'source': 'GVideo', 'quality': '1080p', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '1080p'][0]})
            except: pass
            try: sources.append({'source': 'GVideo', 'quality': 'HD', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '720p'][0]})
            except: pass
            try: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '480p'][0]})
            except: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigold', 'url': [i['url'] for i in links if i['quality'] == '360p'][0]})


            return sources
        except:
            return sources
コード例 #34
0
    def resolve(self, url):
        link = client.source(url)
        url=re.compile('src="(.+?)" style').findall(link)[0]
        link = client.source(url)
        try:
                url=re.compile("window.atob\('(.+?)'\)\)").findall(link)[0]
                content=base64.b64decode(url)
                data=base64.b64decode(content)
                url=re.compile("<source src='(.+?)'").findall(data)[0]
                url = url + 'User-Agent%3DMozilla%2F5.0%20(X11%3B%20Linux%20x86_64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F48.0.2564.82%20Safari%2F537.36%27'

        except:
                try:
                    url=re.compile('src="(.+?)"').findall(link)[0]

                    host = urlparse.urlparse(url).netloc
                    host = host.replace('www.', '').replace('embed.', '')
                    host = host.rsplit('.', 1)[0]
                    host = host.lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    url = resolvers.request(url)

                except:pass
        return url
コード例 #35
0
ファイル: einthusan_mv.py プロジェクト: soltje/filmkodi
    def get_movie(self, imdb, title, year):
        try:
            search = 'http://www.omdbapi.com/?i=%s' % imdb
            search = client.source(search)
            search = json.loads(search)
            country = [i.strip() for i in search['Country'].split(',')]
            if not 'India' in country: return

            languages = ['hindi', 'tamil', 'telugu', 'malayalam']
            language = [i.strip().lower() for i in search['Language'].split(',')]
            language = [i for i in language if any(x == i for x in languages)][0]

            query = self.search_link % (urllib.quote_plus(title), language)
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, 'div', attrs = {'class': 'search-category'})
            result = [i for i in result if 'Movies' in client.parseDOM(i, 'p')[0]][0]
            result = client.parseDOM(result, 'li')

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
            r = [i for i in result if any(x in i[1] for x in years)]
            if not len(r) == 0: result = r
            result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = url.replace('../', '/')
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #36
0
ファイル: dizigoldv2_tv.py プロジェクト: soltje/filmkodi
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            query = urlparse.urljoin(self.base_link, self.ajax_link)
            post = re.compile('var\s*view_id\s*=\s*"(\d*)"').findall(result)[0]
            post = self.player_link % post

            result = client.source(query, post=post, headers=self.headers)
            result = json.loads(result)
            result = result['data']

            result = re.compile('"file"\s*:\s*"(.+?)".+?"label"\s*:\s*"(\d*p)"').findall(result)

            links = [{'url': i[0], 'quality': i[1]} for i in result if 'google' in i[0]]
            links += [{'url': '%s|User-Agent=%s&Referer=%s' % (i[0], urllib.quote_plus(client.agent()), urllib.quote_plus(url)), 'quality': i[1]} for i in result if not 'google' in i[0]]


            try: sources.append({'source': 'GVideo', 'quality': '1080p', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '1080p'][0]})
            except: pass
            try: sources.append({'source': 'GVideo', 'quality': 'HD', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '720p'][0]})
            except: pass
            try: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '480p'][0]})
            except: sources.append({'source': 'GVideo', 'quality': 'SD', 'provider': 'Dizigoldv2', 'url': [i['url'] for i in links if i['quality'] == '360p'][0]})


            return sources
        except:
            return sources
コード例 #37
0
ファイル: tunemovie_mv_tv.py プロジェクト: soltje/filmkodi
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(
                url)[0]

            query = self.search_link % (urllib.quote_plus(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query)

            result = client.source(query)
            result = client.parseDOM(result, 'div', attrs={'id': 'post-.+?'})

            tvshowtitle = cleantitle.tv(tvshowtitle)
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            years = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1)
            ]

            result = [(client.parseDOM(i, 'a', ret='href')[0],
                       client.parseDOM(i, 'a', ret='title')[0],
                       client.parseDOM(i,
                                       'div',
                                       attrs={'class': 'status status-year'}))
                      for i in result]
            result = [x for y, x in enumerate(result) if x not in result[:y]]
            result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0]
            result = [(i[0], re.compile('(.+?) Season (\d*)$').findall(i[1]),
                       i[2]) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result
                      if len(i[1]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            result = [i for i in result if season == i[2]]
            result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1))
                      for i in result]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client.source(url)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'id': 'episode_show'})[0]
            result = re.compile('(<a.+?</a>)').findall(result)
            result = [(client.parseDOM(i, 'a',
                                       ret='href')[0], client.parseDOM(i,
                                                                       'a')[0])
                      for i in result]
            result = [i[0] for i in result if episode == i[1]][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #38
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\?S\d*E\d*$').findall(url)

            try: url, season, episode = re.compile('(.+?)\?S(\d*)E(\d*)$').findall(url)[0]
            except: pass

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            if len(content) == 0:
                u = client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video.+?'})[0]
            else:
                u = re.compile('playSeries\((\d+),(%01d),(%01d)\)' % (int(season), int(episode))).findall(result)[0]
                u = self.episode_link % (u[0], u[1], u[2])
                u = urlparse.urljoin(self.base_link, u)
                u = client.source(u)
                u = json.loads(u)['url']

            url = '%s|User-Agent=%s&Referer=%s' % (u, urllib.quote_plus(client.agent()), urllib.quote_plus(url))

            sources.append({'source': 'MovieTV', 'quality': 'HD', 'provider': 'MovieTV', 'url': url})

            return sources
        except:
            return sources
コード例 #39
0
ファイル: default.py プロジェクト: SemperfidelisTV/SemperFI-
def PLAYLINKAFDAH(name,url):
        moviename = name

        link = open_url(url)
        base_link_1 = 'https://afdah.org'
        
        search_link = '/results?q=%s'
        info_link = '/video_info'
        base_link = random.choice([base_link_1])
        url = urlparse.urljoin(base_link, url)

        result = client.source(url)

        video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(result)[0]
        post = urllib.urlencode({'video_id': video_id})

        result = client.source(urlparse.urljoin(base_link, info_link), post=post)

        u = [i for i in result.split('&') if 'google' in i][0]
        u = urllib.unquote_plus(u)
        u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
        u = [googleplus.tag(i)[0] for i in u]
        u = [i for i in u if i['quality'] in ['1080p', 'HD']]
        for i in u:
		i['quality']=re.sub('HD','720p',i['quality'])
		
		 
		addLink(moviename + " - " + i['quality'],i['url'],101,icon,fanart)
コード例 #40
0
ファイル: tvrelease_tv.py プロジェクト: AMObox/teammaniac
    def check(self, i):
        try:
            result = client.source(i['url'])
            result = client.parseDOM(result, 'td', attrs = {'class': 'td_cols'})[0]
            result = result.split('"td_heads"')
            result = client.parseDOM(result, 'a', ret='href')

            for url in result:
                try:
                    if 'go4up.com' in url:
                        url = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]
                        url = client.source(self.go4up_link_2 % url)
                        url = client.parseDOM(url, 'div', attrs = {'id': 'linklist'})[0]
                        url = client.parseDOM(url, 'a', ret='href')[0]

                    host = urlparse.urlparse(url).netloc
                    host = host.rsplit('.', 1)[0].split('.', 1)[-1]
                    host = host.strip().lower()

                    if not host in ['uptobox', 'hugefiles', 'uploadrocket']: raise Exception()

                    if host == 'hugefiles': check = hugefiles.check(url)
                    elif host == 'uploadrocket': check = uploadrocket.check(url)
                    elif host == 'uptobox': check = uptobox.check(url)

                    if check == False: raise Exception()

                    self.sources.append({'source': host, 'quality': 'HD', 'provider': 'TVrelease', 'url': url, 'info': i['info']})
                except:
                    pass
        except:
            pass
コード例 #41
0
ファイル: movietv_mv_tv_null.py プロジェクト: mpie/repo
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\?S\d*E\d*$').findall(url)

            try: url, season, episode = re.compile('(.+?)\?S(\d*)E(\d*)$').findall(url)[0]
            except: pass

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            if len(content) == 0:
                u = client.parseDOM(result, 'source', ret='src', attrs = {'type': 'video.+?'})[0]
            else:
                u = re.compile('playSeries\((\d+),(%01d),(%01d)\)' % (int(season), int(episode))).findall(result)[0]
                u = self.episode_link % (u[0], u[1], u[2])
                u = urlparse.urljoin(self.base_link, u)
                u = client.source(u)
                u = json.loads(u)['url']

            url = '%s|User-Agent=%s&Referer=%s' % (u, urllib.quote_plus(client.agent()), urllib.quote_plus(url))

            sources.append({'source': 'MovieTV', 'quality': 'HD', 'provider': 'MovieTV', 'url': url})

            return sources
        except:
            return sources
コード例 #42
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            video_id = re.compile('video_id *= *[\'|\"](.+?)[\'|\"]').findall(
                result)[0]
            post = urllib.urlencode({'video_id': video_id})

            result = client.source(urlparse.urljoin(self.base_link,
                                                    self.info_link),
                                   post=post)

            u = [i for i in result.split('&') if 'google' in i][0]
            u = urllib.unquote_plus(u)
            u = [urllib.unquote_plus(i.split('|')[-1]) for i in u.split(',')]
            u = [googleplus.tag(i)[0] for i in u]
            u = [i for i in u if i['quality'] in ['1080p', 'HD']]

            for i in u:
                sources.append({
                    'source': 'GVideo',
                    'quality': i['quality'],
                    'provider': 'Afdah',
                    'url': i['url']
                })

            return sources
        except:
            return sources
コード例 #43
0
ファイル: dizibox_tv.py プロジェクト: gitrazr/filmkodi
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            season, episode = '%01d' % int(season), '%01d' % int(episode)

            result = client.source(url)

            if not season == '1':
                url = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'season-.+?'})
                url = [i for i in url if '/%s-sezon-' % season in i][0]
                result = client.source(url)

            result = client.parseDOM(result, 'a', ret='href')
            result = [i for i in result if '%s-sezon-%s-bolum-' % (season, episode) in i][0]

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #44
0
ファイル: ororo_tv.py プロジェクト: soltje/filmkodi
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            if (self.user == '' or self.password == ''): raise Exception()

            cookie = client.source(self.sign, post=self.post, headers=self.headers, cookie=self.lang, output='cookie')
            cookie = '%s; %s' % (cookie, self.lang)

            url = urlparse.urljoin(self.base_link, self.tvsearch_link)

            result = client.source(url, cookie=cookie)

            tvshowtitle = cleantitle.get(tvshowtitle)
            years = ['%s' % str(year)]

            result = client.parseDOM(result, 'div', attrs={'class': 'index show'})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs={'class': 'name'}),
                       client.parseDOM(i, 'span', attrs={'class': 'value'})) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.get(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #45
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)

            season, episode = '%01d' % int(season), '%01d' % int(episode)

            result = client.source(url)

            if not season == '1':
                url = client.parseDOM(result,
                                      'a',
                                      ret='href',
                                      attrs={'class': 'season-.+?'})
                url = [i for i in url if '/%s-sezon-' % season in i][0]
                result = client.source(url)

            result = client.parseDOM(result, 'a', ret='href')
            result = [
                i for i in result
                if '%s-sezon-%s-bolum-' % (season, episode) in i
            ][0]

            try:
                url = re.compile('//.+?(/.+)').findall(result)[0]
            except:
                url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #46
0
ファイル: dizigold_tv.py プロジェクト: Magoo204/lambda-addons
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = urlparse.urljoin(self.base_link, self.ajax_link)
            post = self.search_link % (urllib.quote_plus(tvshowtitle))

            result = client.source(query, post=post, headers=self.headers)
            result = json.loads(result)

            tvshowtitle = cleantitle.tv(tvshowtitle)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), re.compile('<h5>.+?(\d{4}).+?</h5>').findall(i)) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client.source(url)

            url = client.parseDOM(result, 'div', ret='value', attrs = {'id': 'icerikid'})[0]
            url = url.encode('utf-8')
            return url
        except:
            return
コード例 #47
0
ファイル: mvsnap_mv_tv.py プロジェクト: fopina/lambda-addons
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = urlparse.urlparse(url).query

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            result = client.parseDOM(result, 'select', attrs = {'id': 'myDropdown'})[0]
            result = zip(client.parseDOM(result, 'option', ret='value'), client.parseDOM(result, 'option'))
            result = [i[0] for i in result if query == i[1] or  query == ''][0]

            url = urlparse.urljoin(self.base_link, result)

            url = client.source(url, output='geturl')
            if not 'google' in url: raise Exception()

            url = googleplus.tag(url)
            for i in url: sources.append({'source': 'GVideo', 'quality': i['quality'], 'provider': 'MVsnap', 'url': i['url']})

            return sources
        except:
            return sources
コード例 #48
0
ファイル: tvshows.py プロジェクト: kevintone/tdbaddon
    def predb(self, items):
        for i in range(0, len(self.list)):
            try: imdb = self.list[i]['imdb']
            except: imdb = '0'
            try: tmdb = self.list[i]['tmdb']
            except: tmdb = '0'
            try: tvdb = self.list[i]['tvdb']
            except: tvdb = '0'

            if imdb == '0' and tmdb == '0' and tvdb == '0':
                title = self.list[i]['title'].lower()
                if 'season' in title:
                    title = title[:title.index('season')-1]
                origSeriesName = title
                title = urllib.quote_plus(title)

                try :
                    # try searching the id from tvdb
                    url = self.tvdb_search_link % title
                    result = client.source(url)
                    series = client.parseDOM(result, "Series")
                    for show in series:
                        seriesName = client.parseDOM(show, "SeriesName")[0]
                        if origSeriesName == seriesName.lower():
                            tvdb = client.parseDOM(show, "seriesid")[0]
                            break
                except:
                    pass
                if tmdb == '0' :
                    try:
                        # try searching in tmdb
                        url = self.tmdb_search_link % title
                        result = client.source(url)
                        result = json.loads(result)
                        result = result['results']

                        for j in range(0, len(result)):
                            seriesName = result[j]['name']
                            if origSeriesName == seriesName.lower():
                                tmdb = result[j]['id']
                                break ;
                    except:
                        pass

                if imdb == '0':
                    try :
                        # searching in imdb
                        url = self.imdb_by_title % (urllib.quote_plus(self.list[i]['title']))
                        item3 = client.request(url, timeout='10')
                        item3 = json.loads(item3)
                        imdb = item3['imdbID']
                        if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'
                        else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
                        imdb = imdb.encode('utf-8')
                    except:
                        pass

            self.list[i].update({"tmdb":tmdb, "imdb":imdb, "tvdb":tvdb})
        return self.list
コード例 #49
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            result = client.source(url)

            url = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'fullwindowlink'})[0]
            url = client.parseDOM(url, 'a', ret='href')[0]
            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)
            result = client.parseDOM(result, 'body')[0]

            post = re.compile('movie_player_file *= *"(.+?)"').findall(
                result)[0]
            post = urllib.urlencode({'url': post})

            cookie = client.source(self.cookie_link,
                                   output='cookie',
                                   close=False)

            headers = {
                'Host': 'gl.hdmoviezone.net',
                'Accept': 'text/html, */*; q=0.01',
                'Content-Type':
                'application/x-www-form-urlencoded; charset=UTF-8',
                'Origin': 'http://www.hdmoviezone.net',
                'Cookie': cookie
            }

            result = client.source(self.stream_link,
                                   post=post,
                                   headers=headers)

            result = json.loads(result)
            result = result['content']

            links = [i['url'] for i in result]

            for url in links:
                try:
                    i = googleplus.tag(url)[0]
                    sources.append({
                        'source': 'GVideo',
                        'quality': i['quality'],
                        'provider': 'Moviezone',
                        'url': i['url']
                    })
                except:
                    pass

            return sources
        except:
            return sources
コード例 #50
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            content = re.compile('(.+?)\?S\d*E\d*$').findall(url)

            try:
                url, season, episode = re.compile(
                    '(.+?)\?S(\d*)E(\d*)$').findall(url)[0]
            except:
                pass

            self.base_link = random.choice(
                [self.base_link_1, self.base_link_2])

            post_id = re.compile('/.+?/(.+)').findall(url)[0].rsplit('/')[0]

            player = urlparse.urljoin(self.base_link, self.player_link)

            if len(content) == 0:
                post = self.player_post_1 % post_id
            else:
                post = client.source(player,
                                     post=self.player_post_2 % post_id,
                                     headers=self.headers)
                post = client.parseDOM(post,
                                       'ul',
                                       attrs={'class': 'movie-parts'})[0]
                post = client.parseDOM(post, 'li')
                post = [(client.parseDOM(i, 'a',
                                         ret='href'), client.parseDOM(i, 'a'))
                        for i in post]
                post = [(i[0][0], i[1][0]) for i in post
                        if len(i[0]) > 0 and len(i[1]) > 0]
                post = [i[0] for i in post if '%01d' % int(episode) == i[1]][0]
                post = urlparse.parse_qs(
                    urlparse.urlparse(post).query)['part_id'][0]
                post = self.player_post_3 % (post_id, post)

            url = client.source(player, post=post, headers=self.headers)
            url = re.compile('<source\s+src="([^"]+)').findall(url)[0]
            url = client.replaceHTMLCodes(url)

            if 'google' in url: quality = googleplus.tag(url)[0]['quality']
            else: quality = 'HD'

            sources.append({
                'source': 'GVideo',
                'quality': quality,
                'provider': 'Xmovies',
                'url': url
            })

            return sources
        except:
            return sources
コード例 #51
0
    def __config(self):
        try:
            headers = {'User-Agent': 'Apache-HttpClient/UNAVAILABLE (java 1.4)'}

            url = self.base_link + self.config_link + self.__extra()

            client.source(url, headers=headers)
        except:
            return
コード例 #52
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            query = urlparse.urlparse(url).query
            try:
                query = '%02d' % int(re.compile('E(\d*)$').findall(query)[0])
            except:
                query = ''

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url)

            result = client.parseDOM(result,
                                     'select',
                                     attrs={'id': 'myDropdown'})[0]
            result = zip(client.parseDOM(result, 'option', ret='value'),
                         client.parseDOM(result, 'option'))
            result = [
                i[0] for i in result if i[1].endswith(query) or query == ''
            ][0]

            direct = re.compile('(.+)[|](.+?)[,]').findall(result)

            if len(direct) > 0:
                quality = 'HD' if 'hd' in direct[0][0].lower() else 'SD'
                sources.append({
                    'source': 'MVsnap',
                    'quality': quality,
                    'provider': 'MVsnap',
                    'url': direct[0][1]
                })
                return sources

            url = urlparse.urljoin(self.base_link, result)

            url = client.source(url, output='geturl')
            if not 'google' in url: raise Exception()

            url = url.split('get_video_info')[0]
            url = resolvers.request(url)

            for i in url:
                sources.append({
                    'source': 'GVideo',
                    'quality': i['quality'],
                    'provider': 'MVsnap',
                    'url': i['url']
                })

            return sources
        except:
            return sources
コード例 #53
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            s, e = re.compile('/(.+?)/(\d+)').findall(url)[0]

            sources_url = urlparse.urljoin(self.base_link, url)

            result = client.source(sources_url)

            links = client.parseDOM(result, 'a', ret='onclick', attrs = {'class': 'alterlink'})

            for link in links:
                try:
                    b = re.compile('(\d+)').findall(link)[0]

                    host = re.compile("'(.+?)'").findall(link)[0]

                    if host == 'Roosy':
                        type = host
                        a = 'awQa5s14d5s6s12s'
                        dmg = self.vdmg_link
                    else:
                        url = urlparse.urljoin(self.base_link, self.give_link)
                        post = urllib.urlencode({'i': b, 'n': host, 'p': 0})
                        result = client.source(url, post=post, headers=self.headers, close=False)
                        result = json.loads(result)

                        type = result['p']['tur']
                        a = result['p']['c']
                        dmg = self.idmg_link

                    url = dmg % (type, a, b, s, e, int(time.time() * 1000))
                    url = urlparse.urljoin(self.base_link, url)

                    result = client.source(url, headers=self.headers, close=False)
                    result = result.replace('\n', '')

                    url = re.compile('(var\s+kaynaklar\s*=\s*.+)').findall(result)[0]
                    url = re.compile('file *: *"(.+?)".+?label *: *"(.+?)"').findall(url)
                    url = [('%s|User-Agent=%s&Referer=%s' % (i[0].decode('unicode_escape'), urllib.quote_plus(client.agent()), urllib.quote_plus(sources_url)), i[1]) for i in url]

                    try: sources.append({'source': 'Dizimag', 'quality': '1080p', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '1080p'][0]})
                    except: pass
                    try: sources.append({'source': 'Dizimag', 'quality': 'HD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '720p'][0]})
                    except: pass
                    try: sources.append({'source': 'Dizimag', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '480p'][0]})
                    except: sources.append({'source': 'Dizimag', 'quality': 'SD', 'provider': 'Dizimag', 'url': [i[0] for i in url if i[1] == '360p'][0]})
                except:
                    pass

            return sources
        except:
            return sources