def sources(self, url, hostDict, hostprDict):
     try:                                      
         search_url = url['url'] 
         post = url['post']
         search_more_post = url['more']
         result = client.request(search_url, post=post)
                
         sources = []
         if not result.startswith('http'):
             return sources
         
         valid, host = source_utils.is_host_valid(result, hostDict)
         q = source_utils.check_sd_url(result)
         first_found = {'source': host, 'quality': q, 'language': 'pl', 'url': result, 'info': '', 'direct': False, 'debridonly': False}        
        
         search_url = urlparse.urljoin(self.base_link, self.search_more)
         result = client.request(search_url, post=search_more_post)
         result = dom_parser.parse_dom(result, 'a')            
         for el in result :
             desc = el.content
             info = desc[desc.find("(") + 1:desc.find(")")]
             link = el.attrs['href']                                 
             
             valid, host = source_utils.is_host_valid(link, hostDict)
             if not valid: continue
             q = source_utils.check_sd_url(link)
             
             sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False})
         
         first_found['info'] = self.get_info_from_others(sources)
         sources.append(first_found)
         
         return sources
     except:
         return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            try:
                search_url = url['url']
                post = url['post']
                referer = urlparse.urljoin(self.film_web, post['urlstrony'])
                result = client.request(search_url, post=post, referer=referer)
                if not result.startswith('http'):
                    return sources

                valid, host = source_utils.is_host_valid(result, hostDict)
                q = source_utils.check_sd_url(result)
                info = ''
                if 'lektor' in result:
                    info = 'Lektor'
                if 'napisy' in result:
                    info = 'Napisy'
                first_found = {
                    'source': host,
                    'quality': '720p',
                    'language': 'pl',
                    'url': result,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                }
                first_found['info'] = self.get_info_from_others(sources)
                sources.append(first_found)
            except:
                pass
            search_more_post = url['more']
            #search_url = urlparse.urljoin(self.base_link, self.search_more)
            result = client.request(self.base_link2, post=search_more_post)
            provider = client.parseDOM(result, 'option', ret='value')
            links = client.parseDOM(result, 'div', ret='data')
            wersja = client.parseDOM(result, 'div', attrs={'class': 'wersja'})
            #result = dom_parser.parse_dom(result, 'a')
            counter = 0
            for link in links:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: continue
                q = source_utils.check_sd_url(link)
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': 'pl',
                    'url': link,
                    'info': wersja[counter],
                    'direct': False,
                    'debridonly': False
                })
                counter += 1
            return sources
        except:
            return sources
示例#3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            hldr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            season = 'S%02d/' % int(data['season'])
            title = data['tvshowtitle']

            '''
            Check for season directory, no need for extra checks. Path is there or it's not
            '''
            url = urlparse.urljoin(self.base_link, self.search_link % (title, season))
            results = client.request(url)
            if results is None:
                return sources
            '''
            All results at this level are now subfolders for resolution (1080p, HD, 2160p, etc)
            '''
            results = re.compile('<tr><td><a href="(.+?)"').findall(results)
            for dirlink in results:
                if dirlink.startswith('.') or dirlink.startswith('?'):
                    continue
                sublink = urlparse.urljoin(url, dirlink)
                '''
                Ok, so, if the url ends in a / then this is a folder, and we need to dig deeper to
                find the season episodes baaaaaaaby
                Otherwise, the season episodes are NOT in subfolders for resolution
                '''
                if dirlink.endswith('/'):
                    subhtml = client.request(sublink)
                    subres = re.compile('<tr><td><a href="(.+?)"').findall(subhtml)
                    for link in subres:
                        if link.startswith('.') or link.startswith('?'):
                            continue
                        if hldr in link:
                            link = urlparse.urljoin(sublink, link)
                            quality = source_utils.check_sd_url(link)
                            sources.append({'source': 'Direct', 'quality': quality, 'language': 'en',
                                            'url': link, 'direct': True, 'debridonly': False})
                else:
                    if hldr in dirlink:
                        link = urlparse.urljoin(sublink, dirlink)
                        quality = source_utils.check_sd_url(link)
                        sources.append({'source': 'Direct', 'quality': quality, 'language': 'en',
                                        'url': link, 'direct': True, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DL2.LAVINTV - Exception: \n' + str(failure))
            return sources
示例#4
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            timer = control.Time(start=True)

            # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}
            r = self.cfscraper.get(url).content
            give_me = client.parseDOM(r, "div", attrs={"id": "lnk list-downloads"})
            for url in give_me:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('Shaanig - Timeout Reached')
                    break

                some_links = client.parseDOM(url, 'a', ret='href')
                for url in some_links:
                    quality = source_utils.check_sd_url(url)
                    url = url.split('?s=')[1]
                    final = urlparse.urljoin('http:', url)
                    sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': final, 'direct': True, 'debridonly': False})

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('SHAANIG - Exception: \n' + str(failure))
            return sources
示例#5
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['title']
            year = data['year']
            url = self.base_link + self.search_link % year

            html = client.request(url, headers=headers)
            if html is None:
                return sources

            regex_string = r'<a href="{0}(.+?)"'.format(title)
            results = re.compile(regex_string).findall(html)
            for link in results:
                if 'Trailer' in link:
                    continue
                if 'Dubbed' in link:
                    continue
                url = self.base_link + self.search_link % year + title + link

                quality = source_utils.check_sd_url(url)
                sources.append({'source': 'Direct', 'quality': quality, 'language': 'en',
                                'url': url, 'direct': True, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DLUPLOAD8 - Exception: \n' + str(failure))
            return sources
示例#6
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:
            if url == None: return sources
            r = client.request(urlparse.urljoin(self.base_link, url),
                               redirect=False)
            info = self.get_lang_by_type(client.parseDOM(r, 'title')[0])
            r = client.parseDOM(r, 'div', attrs={'class':
                                                 'tab-pane active'})[0]
            r = client.parseDOM(r, 'script')[0]
            script = r.split('"')[1]
            decoded = self.shwp(script)

            link = client.parseDOM(decoded, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
示例#7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         result = self.session.get(url[0]).content
         result = result.decode('utf-8')
         h = HTMLParser()
         result = h.unescape(result)
         quality = str(
             client.parseDOM(result,
                             'div',
                             attrs={'class': 'poster-qulabel'}))
         quality = source_utils.check_sd_url(quality)
         info = self.get_lang_by_type(url[1])
         video_link = str(client.parseDOM(result, 'iframe', ret='src')[0])
         valid, host = source_utils.is_host_valid(video_link, hostDict)
         sources.append({
             'source': host,
             'quality': quality,
             'language': info[0],
             'url': video_link,
             'info': info[1],
             'direct': False,
             'debridonly': False
         })
         return sources
     except:
         return sources
示例#8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            with requests.Session() as s:
                link = cleantitle.clean_search_query(url['tvshowtitle']) + ".s" + \
                       url['season'] + "e" + url['episode']
                p = s.get(self.search_link + link + "/type/video")

                soup = BeautifulSoup(p.text, 'html.parser').find_all('table')[0]
                soup = soup.find_all('button')
                for i in soup:
                    fileUrl = i['data-clipboard-text']
                    if re.sub('[^0-9a-zA-Z]+', '.', link).lower() in fileUrl.lower():
                        hoster = fileUrl.split('/')[2]
                        quality = source_utils.check_sd_url(fileUrl)
                        sources.append({
                            'source': hoster,
                            'quality': quality,
                            'language': 'en',
                            'url': fileUrl,
                            'direct': False,
                            'debridonly': False,
                            'info':'FilePursuit App Available on the Play Store'
                        })
            return sources

        except:
            print("Unexpected error in Filepursuit Script: Sources", sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         User_Agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
         headers = {'User-Agent': User_Agent}
         r = requests.get(url, headers=headers,
                          allow_redirects=False).content
         try:
             match = re.compile('<div><iframe src="(.+?)"').findall(r)
             for url in match:
                 host = url.split('//')[1].replace('www.', '')
                 host = host.split('/')[0].split('.')[0].title()
                 quality = source_utils.check_sd_url(url)
                 r = requests.get(url,
                                  headers=headers,
                                  allow_redirects=False).content
                 if 'http' in url:
                     match = re.compile("url: '(.+?)',").findall(r)
                 else:
                     match = re.compile('file: "(.+?)",').findall(r)
                 for url in match:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             return
     except Exception:
         return
     return sources
示例#10
0
 def work(self, link, testDict):
     if str(link).startswith("http"):
         link = self.getlink(link)
         q = source_utils.check_sd_url(link)
         valid, host = source_utils.is_host_valid(link, testDict)
         if not valid: return 0
         return host, q, link
示例#11
0
def more_rapidvideo(link, hostDict, lang, info):
    if "rapidvideo.com" in link:
        sources = []
        try:
            headers = {'User-Agent': client.agent()}
            response = requests.get(link, headers=headers).content
            test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""",
                              response)
            numGroups = len(test)
            for i in range(1, numGroups):
                url = test[i]
                valid, host = source_utils.is_host_valid(url, hostDict)
                q = source_utils.check_sd_url(url)
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return []
    return []
示例#12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            with requests.Session() as s:
                if 'episode' in url:
                    link = cleantitle.clean_search_query(url['tvshowtitle']) + ".s" + \
                       url['season'] + "e" + url['episode']
                else:
                    link = cleantitle.clean_search_query("%s.%s") % (url['title'], url['year'])
                p = s.get(self.search_link + link + "/type/videos")
                soup = BeautifulSoup(p.text, 'html.parser').find_all('table')[0]
                soup = soup.find_all('button')
                for i in soup:
                    fileUrl = i['data-clipboard-text']
                    source_check = self.link_check(fileUrl.lower(), re.sub('[^0-9a-zA-Z]+', '.', link).lower())
                    if source_check  != False:
                        hoster = fileUrl.split('/')[2]
                        quality = source_utils.check_sd_url(fileUrl)
                        sources.append({
                            'source': hoster,
                            'quality': quality,
                            'language': 'en',
                            'url': fileUrl,
                            'direct': False,
                            'debridonly': False,
                            'info':'FilePursuit App Available on the Play Store'
                        })
            return sources

        except:
            print("Unexpected error in Filepursuit Script: Sources", sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
示例#13
0
def more_cdapl(link, hostDict, lang, info):
    if "cda.pl" in link:
        sources = []
        try:
            headers = {'User-Agent': client.agent()}
            response = requests.get(link, headers=headers).content
            test = client.parseDOM(response,
                                   'div',
                                   attrs={'class': 'wrapqualitybtn'})
            urls = client.parseDOM(test, 'a', ret='href')
            if urls:
                for url in urls:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    q = source_utils.check_sd_url(url)
                    direct = re.findall(
                        """file":"(.*)","file_cast""",
                        requests.get(url, headers=headers).content)[0].replace(
                            "\\/", "/")
                    sources.append({
                        'source': 'CDA',
                        'quality': q,
                        'language': lang,
                        'url': direct,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
            return sources
        except:
            return []
    return []
示例#14
0
def more_cdapl(link, hostDict, lang, info):
    if "cda.pl" in link:
        sources = []
        try:
            headers = {
                'User-Agent':
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36"
            }
            response = requests.get(link, headers=headers).content
            test = client.parseDOM(response,
                                   'div',
                                   attrs={'class': 'wrapqualitybtn'})
            urls = client.parseDOM(test, 'a', ret='href')
            if urls:
                for url in urls:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    q = source_utils.check_sd_url(url)
                    direct = re.findall(
                        """file":"(.*)","file_cast""",
                        requests.get(url, headers=headers).content)[0].replace(
                            "\\/", "/")
                    sources.append({
                        'source': 'CDA',
                        'quality': q,
                        'language': lang,
                        'url': direct,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
            return sources
        except:
            return sources
    else:
        return []
示例#15
0
def more_rapidvideo(link, hostDict, lang, info):
    if "rapidvideo.com" in link:
        sources = []
        try:
            headers = {
                'User-Agent':
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36"
            }
            response = requests.get(link, headers=headers).content
            test = re.findall("""(https:\/\/www.rapidvideo.com\/e\/.*)">""",
                              response)
            numGroups = len(test)
            for i in range(1, numGroups):
                url = test[i]
                valid, host = source_utils.is_host_valid(url, hostDict)
                q = source_utils.check_sd_url(url)
                sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
    else:
        return []
示例#16
0
文件: s1dl.py 项目: helloman37/repo
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = requests.get(url).content
         print r
         r = re.compile('a href="(.+?)"').findall(r)
         print r
         for u in r:
             if 'Trailer' in u:
                 continue
             if 'AUDIO' in u:
                 continue
             url = self.base_link + self.title + u
             print url
             quality = source_utils.check_sd_url(url)
             sources.append({
                 'source': 'Direct',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'direct': True,
                 'debridonly': False
             })
         return sources
     except:
         return
示例#17
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:

            if url == None: return sources
            result = client.request(urlparse.urljoin(self.base_link, url),
                                    redirect=False)

            section = client.parseDOM(result,
                                      'section',
                                      attrs={'id': 'video_player'})[0]
            link = client.parseDOM(section, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            spans = client.parseDOM(section, 'span')
            info = None
            for span in spans:
                if span == 'Z lektorem':
                    info = 'Lektor'

            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
示例#18
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            hldr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            season = 'S%02d/' % int(data['season'])
            title = data['tvshowtitle']

            url = urlparse.urljoin(self.base_link, self.search_link % (title, season))
            headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'}
            results = client.request(url, headers=headers)
            if results is None:
                return sources

            results = re.compile('<tr><td><a href="(.+?)"').findall(results)
            for dirlink in results:
                if dirlink.startswith('.') or dirlink.startswith('?'):
                    continue
                sublink = urlparse.urljoin(url, dirlink)

                if dirlink.endswith('/'):
                    subhtml = client.request(sublink, headers=headers)
                    subres = re.compile('<tr><td><a href="(.+?)"').findall(subhtml)
                    for link in subres:
                        if link.startswith('.') or link.startswith('?'):
                            continue
                        if hldr in link:
                            link = urlparse.urljoin(sublink, link)
                            quality = source_utils.check_sd_url(link)
                            sources.append({'source': 'Direct', 'quality': quality, 'language': 'en',
                                            'url': link, 'direct': True, 'debridonly': False})
                else:
                    if hldr in dirlink:
                        link = urlparse.urljoin(sublink, dirlink)
                        quality = source_utils.check_sd_url(link)
                        sources.append({'source': 'Direct', 'quality': quality, 'language': 'en',
                                        'url': link, 'direct': True, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DLlavinTV - Exception: \n' + str(failure))
            return sources
示例#19
0
 def sources(self, url, hostDict, hostprDict):
     
     sources = []
     try:
         if url == None: return sources
         headers = {
                 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                 'Referer': 'http://iitv.pl/'
         }
         result = client.request(url)
         result = client.parseDOM(result, 'div', attrs={'class':'tab-wrapper'})[0]
         lektor = client.parseDOM(result, 'ul', attrs={'id':'lecPL'})
         if len(lektor) > 0:
             links = client.parseDOM(lektor, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': 'Lektor', 'direct': False, 'debridonly': False})
         napisy = client.parseDOM(result, 'ul', attrs={'id':'subPL'})
         if len(napisy) > 0:
             links = client.parseDOM(napisy, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': 'Napisy', 'direct': False, 'debridonly': False})
         eng = client.parseDOM(result, 'ul', attrs={'id':'org'})
         if len(eng) > 0:
             links = client.parseDOM(eng, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'en', 'url': link, 'info': 'ENG', 'direct': False, 'debridonly': False})
         debug =1;
         return sources
     except Exception, e:
         print str(e)
         return sources
示例#20
0
 def sources(self, url, hostDict, hostprDict):
     
     sources = []
     try:
         if url == None: return sources
         headers = {
                 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                 'Referer': 'http://iitv.pl/'
         }
         result = client.request(url)
         result = client.parseDOM(result, 'div', attrs={'class':'tab-wrapper'})[0]
         lektor = client.parseDOM(result, 'ul', attrs={'id':'lecPL'})
         if len(lektor) > 0:
             links = client.parseDOM(lektor, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': 'Lektor', 'direct': False, 'debridonly': False})
         napisy = client.parseDOM(result, 'ul', attrs={'id':'subPL'})
         if len(napisy) > 0:
             links = client.parseDOM(napisy, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': 'Napisy', 'direct': False, 'debridonly': False})
         eng = client.parseDOM(result, 'ul', attrs={'id':'org'})
         if len(eng) > 0:
             links = client.parseDOM(eng, 'a', ret='href')
             for link in links:
                 if str(link).startswith("http://"):
                     link = self.getlink(link)
                     q = source_utils.check_sd_url(link)
                     valid, host = source_utils.is_host_valid(link, hostDict)
                     if not valid: continue
                     sources.append({'source': host, 'quality': q, 'language': 'en', 'url': link, 'info': 'ENG', 'direct': False, 'debridonly': False})
         debug =1;
         return sources
     except Exception, e:
         print str(e)
         return sources
示例#21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            r = client.request(url)
            tag = client.parseDOM(r, 'div', attrs={'class': 'video-container'})
            tag = client.parseDOM(tag, 'p')[-1].lower()

            url = client.parseDOM(r,
                                  'div',
                                  attrs={'class': 'single-content tabs'})
            url = client.parseDOM(url, 'a', ret='href')[0]
            url = url.encode('utf-8')

            post = urllib.urlencode({
                'lock_password': '',
                'Submit': 'Online Linkek'
            })
            r = client.request(url, post=post)

            result = client.parseDOM(r, 'a', ret='href')
            result += client.parseDOM(r.lower(), 'iframe', ret='src')
            result = [i for i in result if not 'youtube.com' in i]
            if not result: raise Exception()

            info = '' if 'feliratos' in tag else 'szinkron'
            quality = 'CAM' if ('cam' in tag or 'mozis' in tag
                                or u'kamer\u00E1s' in tag) else 'SD'

            locDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]

            for item in result:
                try:
                    host = re.search('(?:\/\/|\.)([^www][\w]+[.][\w]+)\/',
                                     item).group(1)
                    host = host.strip().lower().split('.', 1)[0]
                    host = [x[1] for x in locDict if host == x[0]][0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    url = item.encode('utf-8')
                    if quality == 'SD':
                        quality = source_utils.check_sd_url(url)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'hu',
                        'info': info,
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass
            return sources
        except:
            return sources
示例#22
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['title']
            year = data['year']
            url = self.base_link + self.search_link % year

            timer = control.Time(start=True)

            html = client.request(url, headers=headers)
            if html is None:
                return sources

            # this method guarantees only results matching our formatted title get pulled out of the html
            regex_string = r'<tr><td class="link"><a href="{0}(.+?)"'.format(
                title)
            results = re.compile(regex_string).findall(html)
            for link in results:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('DL7Lavin - Timeout Reached')
                    break

                if 'Trailer' in link:
                    continue
                if 'Dubbed' in link:
                    continue
                url = self.base_link + self.search_link % year + title + link

                quality = source_utils.check_sd_url(url)
                sources.append({
                    'source': 'Direct',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DL7LAVIN - Exception: \n' + str(failure))
            return sources
示例#23
0
    def sources(self, url, hostDict, hostprDict):
        try:                                      
            search_url = url['url'] 
            post = url['post']
            search_more_post = url['more']
            referer = urlparse.urljoin(self.film_web, post['urlstrony'])
            result = client.request(search_url, post=post,referer=referer)
                   
            sources = []
            if not result.startswith('http'):
                return sources
            
            valid, host = source_utils.is_host_valid(result, hostDict)
            q = source_utils.check_sd_url(result)
            first_found = {'source': host, 'quality': q, 'language': 'pl', 'url': result, 'info': '', 'direct': False, 'debridonly': False}      
            
#             if control.setting('provider.filmwebbooster.extrasearch') == 'true':
            search_url = urlparse.urljoin(self.base_link, self.search_more)
            result = client.request(search_url, post=search_more_post)
            result = dom_parser.parse_dom(result, 'a')            
            for el in result :
                desc = el.content
                info = desc[desc.find("(") + 1:desc.find(")")]
                lang = 'pl'
                if info.lower() == 'eng':
                    lang = 'en'
                    info = None
                link = el.attrs['href']                                 
                
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: continue
                q = source_utils.check_sd_url(link)
                
                sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
            
            first_found['info'] = self.get_info_from_others(sources)
            sources.append(first_found)
            
            return sources
        except:
            return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []         
         try:                             
             search_url = url['url'] 
             post = url['post']
             referer = urlparse.urljoin(self.film_web, post['urlstrony'])
             result = client.request(search_url, post=post,referer=referer)
             if not result.startswith('http'):
                 return sources
         
             valid, host = source_utils.is_host_valid(result, hostDict)
             q = source_utils.check_sd_url(result)
             info = ''
             if 'lektor' in result:
                 info = 'Lektor'
             if 'napisy' in result:
                 info = 'Napisy'
             first_found = {'source': host, 'quality': '720p', 'language': 'pl', 'url': result, 'info': info, 'direct': False, 'debridonly': False}      
             first_found['info'] = self.get_info_from_others(sources)
             sources.append(first_found)
         except:
             pass
         search_more_post = url['more']
         #search_url = urlparse.urljoin(self.base_link, self.search_more)
         result = client.request(self.base_link2, post=search_more_post)
         provider = client.parseDOM(result, 'option', ret='value')
         links = client.parseDOM(result, 'div', ret='data')
         wersja = client.parseDOM(result, 'div', attrs={'class':'wersja'})
         #result = dom_parser.parse_dom(result, 'a')            
         counter = 0
         for link in links :
             valid, host = source_utils.is_host_valid(link, hostDict)
             if not valid: continue
             q = source_utils.check_sd_url(link)
             sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': wersja[counter], 'direct': False, 'debridonly': False})
             counter += 1
         return sources
     except:
         return sources
示例#25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            with requests.Session() as s:
                headers = {"Referer": self.domain,\
                           "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",\
                           "Host": "www.BitLord.com","User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0",\
                           "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",\
                           "Accept-Encoding": "gzip, deflate, br","Accept-Language": "en-US,en;q=0.5",\
                           "Connection": "keep-alive","DNT":"1"}
                if 'episode' in url:
                    iep = url['episode'].zfill(2)
                    ise = url['season'].zfill(2)
                    se = 's' + ise + 'e' + iep
                    sel = url['tvshowtitle'].replace(' ', '.') + '.' + se
                    cate = '4'

                else:
                    sel = url['title'].replace(' ', '.') + '.' + url['year']
                    cate = '3'

                sel = sel.lower()
                bdata = {'filters[adult]': 'false', 'filters[category]': cate, 'filters[field]': 'category', 'filters[sort]': 'asc',\
                         'filters[time]': '4', 'limit': '25', 'offset': '0', 'query': sel}

                gs = s.post(self.search_link, data=bdata).text

                gl = re.compile('me\W+(.*?)[\'"].*?tih:(.*?)\W',
                                re.I).findall(gs)
                for nam, haas in gl:
                    print('FDGDFGDFGFD-----45345345', haas)
                    checkca = s.get(self.checkc %
                                    (self.api_key, haas, self.api_key)).text
                    quality = source_utils.check_sd_url(nam)
                    if 'finished' in checkca:
                        url = self.pr_link % (self.api_key, haas)
                        sources.append({
                            'source': 'cached',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False,
                            'info': nam,
                        })
            return sources
        except:
            print("Unexpected error in BitLord Script: Sources",
                  sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
示例#26
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:

            if url == None: return sources
            result = client.request(urlparse.urljoin(self.base_link, url),
                                    redirect=False)
            cookies = client.request(urlparse.urljoin(self.base_link, url),
                                     output='cookie')
            headers = {
                'cookie': cookies,
                'dnt': '1',
                'accept-encoding': 'gzip, deflate, br',
                'accept-language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7',
                'user-agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.91 Safari/537.36',
                'accept': 'text/html, */*; q=0.01',
                'referer': self.base_link + url,
                'authority': 'www.boxfilm.pl',
                'x-requested-with': 'XMLHttpRequest',
            }

            response = requests.get(
                'https://www.boxfilm.pl/include/player.php',
                headers=headers).content
            section = client.parseDOM(result,
                                      'section',
                                      attrs={'id': 'video_player'})[0]
            link = client.parseDOM(response, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            spans = client.parseDOM(section, 'span')
            info = None
            for span in spans:
                if span == 'Z lektorem':
                    info = 'Lektor'

            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
示例#27
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if debrid.status() == False: raise Exception()
         r = self.scraper.get(url).content
         r = re.findall('<iframe src="(.+?)"', r)
         for url in r:
             valid, host = source_utils.is_host_valid(url, hostDict)
             quality = source_utils.check_sd_url(url)
             sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
         return sources
     except:
         return
示例#28
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			r = client.request(url)
			match = re.compile('<td align="center"><strong><a href="(.+?)"').findall(r)
			for url in match: 
				host = url.split('//')[1].replace('www.','')
				host = host.split('/')[0].split('.')[0].title()
				quality = source_utils.check_sd_url(url)
				sources.append({'source': host, 'quality': quality, 'language': 'en','url': url,'direct': False,'debridonly': False})
		except Exception:
			return
		return sources
示例#29
0
 def sources(self, url, hostDict, hostprDict):
     try:                                      
         search_url = url['url'] 
         post = url['post']
         search_more_post = url['more']
         result = client.request(search_url, post=post)
                
         sources = []
         if not result.startswith('http'):
             return sources
         
         valid, host = source_utils.is_host_valid(result, hostDict)
         q = source_utils.check_sd_url(result)
         first_found = {'source': host, 'quality': q, 'language': 'pl', 'url': result, 'info': '', 'direct': False, 'debridonly': False}        
        
         search_url = urlparse.urljoin(self.base_link, self.search_more)
         result = client.request(search_url, post=search_more_post)
         result = dom_parser.parse_dom(result, 'a')            
         for el in result :
             desc = el.content
             info = desc[desc.find("(") + 1:desc.find(")")]
             lang = 'pl'
             if info.lower() == 'eng':
                 lang='en'
                 info=None
             link = el.attrs['href']                                 
             
             valid, host = source_utils.is_host_valid(link, hostDict)
             if not valid: continue
             q = source_utils.check_sd_url(link)
             
             sources.append({'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False, 'debridonly': False})
         
         first_found['info'] = self.get_info_from_others(sources)
         sources.append(first_found)
         
         return sources
     except:
         return sources
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			r = client.request(url)
			match = re.compile('<td align="center"><strong><a href="(.+?)"').findall(r)
			for url in match: 
				host = url.split('//')[1].replace('www.','')
				host = host.split('/')[0].split('.')[0].title()
				quality = source_utils.check_sd_url(url)
				sources.append({'source': host, 'quality': quality, 'language': 'en','url': url,'direct': False,'debridonly': False})
		except Exception:
			return
		return sources
示例#31
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         with requests.Session() as s:
             headers = {"Referer": self.domain,\
                        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",\
                        "Host": "www.BitLord.com","User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0",\
                        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",\
                        "Accept-Encoding": "gzip, deflate, br","Accept-Language": "en-US,en;q=0.5",\
                        "Connection": "keep-alive","DNT":"1"}
             if 'episode' in url:
                 iep = url['episode'].zfill(2)
                 ise = url['season'].zfill(2)
                 se = 's' + ise + 'e' + iep
                 sel = url['tvshowtitle'].replace(' ','.') + '.' + se
                 cate = '4'
                 
             else:
                 sel = url['title'].replace(' ','.') + '.' + url['year']
                 cate = '3'
                 
             sel = sel.lower()
             bdata = {'filters[adult]': 'false', 'filters[category]': cate, 'filters[field]': 'category', 'filters[sort]': 'asc',\
                      'filters[time]': '4', 'limit': '25', 'offset': '0', 'query': sel}
             
             gs = s.post(self.search_link, data=bdata).text
             
             gl = re.compile('me\W+(.*?)[\'"].*?tih:(.*?)\W', re.I).findall(gs)
             for nam,haas in gl:
                 print('FDGDFGDFGFD-----45345345', haas)
                 checkca = s.get(self.checkc % (self.api_key, haas, self.api_key)).text
                 quality = source_utils.check_sd_url(nam)
                 if 'finished' in checkca:
                     url = self.pr_link % (self.api_key, haas)
                     sources.append({
                         'source': 'cached',
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False,
                         'info': nam,
                     })  
         return sources
     except:
         print("Unexpected error in BitLord Script: Sources", sys.exc_info()[0])
         exc_type, exc_obj, exc_tb = sys.exc_info()
         print(exc_type, exc_tb.tb_lineno)
         return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            with requests.Session() as s:
                headers = {"Referer": self.domain,\
                           "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",\
                           "Host": "www.magnetdl.com","User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0",\
                           "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",\
                           "Accept-Encoding": "gzip, deflate, br","Accept-Language": "en-US,en;q=0.5",\
                           "Connection": "keep-alive","DNT":"1"}
                if 'episode' in url:
                    iep = url['episode'].zfill(2)
                    ise = url['season'].zfill(2)
                    se = 's' + ise + 'e' + iep
                    sel = url['tvshowtitle'].replace(' ', '-') + '-' + se

                else:
                    sel = url['title'].replace(' ', '-') + '-' + url['year']

                sel = sel.lower()
                gs = s.get(self.search_link % (sel[0], sel),
                           headers=headers).text
                gl = re.compile(
                    'ih:(.*?)\W.*?ef\W+.*?tle\W+(.*?)[\'"].*?\d</td.*?d>(.*?)<',
                    re.I).findall(gs)
                for hass, nam, siz in gl:
                    checkca = s.get(self.checkc %
                                    (self.api_key, hass, self.api_key)).text
                    quality = source_utils.check_sd_url(nam)
                    if 'finished' in checkca:
                        url = self.pr_link % (self.api_key, hass)
                        sources.append({
                            'source': 'cached',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False,
                            'info': siz + ' ' + nam,
                        })
            return sources
        except:
            print("Unexpected error in MagnetDL Script: Sources",
                  sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
示例#33
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None: return sources
         for link in url:
             try:
                 lang = link[u'quality']
                 video_link = link[u'url']
                 lang, info = self.get_lang_by_type(lang)
                 q = source_utils.check_sd_url(video_link)
                 valid, host = source_utils.is_host_valid(
                     video_link, hostDict)
                 if 'rapidvideo' in video_link:
                     content = requests.get(video_link,
                                            timeout=3,
                                            allow_redirects=True).content
                     q = re.findall("""data-res=\"(.*?)\"""", content)[0]
                     if int(q) == 720:
                         q = 'HD'
                     elif int(q) > 720:
                         q = '1080'
                     elif int(q) < 720:
                         q = 'SD'
                 if 'streamango' in video_link or 'openload' in video_link:
                     content = requests.get(video_link,
                                            timeout=3,
                                            allow_redirects=True).content
                     q = re.findall("""og:title\" content=\"(.*?)\"""",
                                    content)[0]
                     q = source_utils.get_release_quality('', q)[0]
                 if valid:
                     if 'ebd' in host.lower():
                         host = 'CDA'
                     sources.append({
                         'source': host,
                         'quality': q,
                         'language': lang,
                         'url': video_link,
                         'info': info,
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
示例#34
0
 def sources(self, url, hostDict, hostprDict):
     try:
         # import pydevd
         # pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
         sources = []
         result = self.session.get(url).content
         result = result.decode('utf-8')
         h = HTMLParser()
         result = h.unescape(result)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'tabela_wiersz mb-1'})
         for counter, item in enumerate(result, 0):
             try:
                 test = client.parseDOM(result,
                                        'span',
                                        attrs={'class': 'tabela_text'})
                 info = test[(2 + (3 * counter))]
                 info = self.get_lang_by_type(info)
                 quality = test[(1 + (3 * counter))]
                 quality = source_utils.check_sd_url(quality)
                 try:
                     id = re.findall("""ShowMovie\('(.*?)'\)""", item)[0]
                 except:
                     id = re.findall("""ShowSer\('(.*?)'\)""", item)[0]
                 try:
                     host = re.findall("""<\/i> (.*?)<\/span>""", item)[0]
                     if 'serial' in url:
                         id = id + '/s'
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': info[0],
                         'url': id,
                         'info': info[1],
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     continue
             except:
                 continue
         return sources
     except:
         return sources
示例#35
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            with requests.Session() as s:
                if 'episode' in url:
                    iep = url['episode'].zfill(2)
                    ise = url['season'].zfill(2)
                    se = 's' + ise + 'e' + iep
                    sel = url['tvshowtitle'].replace(' ', '+') + '+' + se

                else:
                    se = url['year']
                    sel = url['title'].replace(' ', '+') + '+' + se

                gs = s.get(self.search_link % (sel)).text
                gl = re.compile('f\W+(/t.*?)"\st.*?' + se, re.I).findall(gs)
                for res in gl:
                    rih = s.get(self.domain + res).text
                    gih = re.compile(
                        'Size.*?n>(.*?)<.*?hash.*?n\W+(.*?)\W.*?>D\w+(.*?)<',
                        re.DOTALL).findall(rih)
                    for si, hass, nam in gih:
                        checkca = s.get(
                            self.checkc %
                            (self.api_key, hass, self.api_key)).text
                        quality = source_utils.check_sd_url(nam)
                        if 'finished' in checkca:
                            url = self.pr_link % (self.api_key, hass)
                            sources.append({
                                'source': 'cached',
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'direct': False,
                                'debridonly': False,
                                'info': si + '|' + nam,
                            })

            return sources
        except:
            print("Unexpected error in 1337xx Script: Sources",
                  sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
示例#36
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            hldr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            season = 'S%02d/' % int(data['season'])
            title = data['tvshowtitle']

            '''
            Check for season directory, no need for extra checks. Path is there or it's not
            '''
            url = urlparse.urljoin(self.base_link, self.search_link % (title, season))

            timer = control.Time(start=True)

            results = client.request(url, headers=headers)
            if results is None:
                return sources

            results = re.compile('<a href="(.+?)"').findall(results)
            for link in results:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('DL3F2M - Timeout Reached')
                    break

                if link.startswith('.') or link.startswith('?'):
                    continue
                if hldr in link:
                    link = urlparse.urljoin(url, link)
                    quality = source_utils.check_sd_url(link)
                    sources.append({'source': 'Direct', 'quality': quality, 'language': 'en',
                                    'url': link, 'direct': True, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DL3F2M.IO - Exception: \n' + str(failure))
            return sources
示例#37
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         r = re.findall('<iframe src="(.+?)"', r)
         for url in r:
             valid, host = source_utils.is_host_valid(url, hostDict)
             quality = source_utils.check_sd_url(url)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return
示例#38
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         with requests.Session() as s:
             if 'episode' in url:
                 iep = url['episode'].zfill(2)
                 ise = url['season'].zfill(2)
                 se = 's' + ise + 'e' + iep
                 sel = url['tvshowtitle'].replace(' ','+') + '+' + se
                
             else:
                 se = url['year']
                 sel = url['title'].replace(' ','+') + '+' + se
                 
                
             gs = s.get(self.search_link % (sel)).text
             gl = re.compile('f\W+(/t.*?)"\st.*?'+se, re.I).findall(gs)
             for res in gl:
                 rih = s.get(self.domain+res).text
                 gih = re.compile('Size.*?n>(.*?)<.*?hash.*?n\W+(.*?)\W.*?>D\w+(.*?)<', re.DOTALL).findall(rih)
                 for si,hass,nam in gih:
                     checkca = s.get(self.checkc % (self.api_key, hass, self.api_key)).text
                     quality = source_utils.check_sd_url(nam)
                     if 'finished' in checkca:
                         url = self.pr_link % (self.api_key, hass)
                         sources.append({
                             'source': 'cached',
                             'quality': quality,
                             'language': 'en',
                             'url': url,
                             'direct': False,
                             'debridonly': False,
                             'info': si+'|'+nam,
                         })
                         
         return sources
     except:
         print("Unexpected error in 1337xx Script: Sources", sys.exc_info()[0])
         exc_type, exc_obj, exc_tb = sys.exc_info()
         print(exc_type, exc_tb.tb_lineno)
         return sources
示例#39
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:
            if url == None: return sources
            r = client.request(urlparse.urljoin(self.base_link, url), redirect=False)
            info = self.get_lang_by_type(client.parseDOM(r, 'title')[0])
            r = client.parseDOM(r, 'div', attrs={'class': 'tab-pane active'})[0]
            r = client.parseDOM(r, 'script')[0]
            script = r.split('"')[1]
            decoded = self.shwp(script)
            
            link = client.parseDOM(decoded, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            q = source_utils.check_sd_url(link)
            sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
示例#40
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         with requests.Session() as s:
             gettoken = s.get(self.tokenta).text
             xbmc.sleep(2000)
             tokenapi = re.compile('n\W+(.*?)[\'"]', re.I).findall(gettoken)[0]
             if 'episode' in url:
                 iep = url['episode'].zfill(2)
                 ise = url['season'].zfill(2)
                 se = 's' + ise + 'e' + iep
                 sel = url['tvshowtitle'].replace(' ','.') + '.' + se
                 search_link = self.tvsearch
             else:
                 sel = url['title'].replace(' ','.') + '.' + url['year']
                 search_link = self.msearch
             gs = s.get(search_link % (sel, tokenapi)).text
             gl = re.compile('ame\W+(.*?)[\'"].*?ih:(.*?)\W', re.I).findall(gs)
             for nam,hass in gl:
                 checkca = s.get(self.checkc % (self.api_key, hass, self.api_key)).text
                 quality = source_utils.check_sd_url(nam)
                 if 'finished' in checkca:
                     url = self.pr_link % (self.api_key, hass)
                     sources.append({
                         'source': 'cached',
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False,
                         'info': nam,
                     })  
         return sources
     except:
         print("Unexpected error in Torrentapi Script: Sources", sys.exc_info()[0])
         exc_type, exc_obj, exc_tb = sys.exc_info()
         print(exc_type, exc_tb.tb_lineno)
         return sources
示例#41
0
    def sources(self, url, hostDict, hostprDict):
        
        sources = []
        try:

            if url == None: return sources            
            result = client.request(urlparse.urljoin(self.base_link, url), redirect=False)
            
            section = client.parseDOM(result, 'section', attrs={'id':'video_player'})[0]
            link = client.parseDOM(section, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            spans = client.parseDOM(section, 'span')
            info = None
            for span in spans:
                if span == 'Z lektorem':
                    info = 'Lektor'

            q = source_utils.check_sd_url(link)
            sources.append({'source': host, 'quality':q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False})
            
            return sources
        except:
            return sources
示例#42
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         User_Agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
         headers = {'User-Agent':User_Agent}
         r = requests.get(url,headers=headers,allow_redirects=False).content
         try:
             match = re.compile('<div><iframe src="(.+?)"').findall(r)
             for url in match:
                 host = url.split('//')[1].replace('www.','')
                 host = host.split('/')[0].split('.')[0].title()
                 quality = source_utils.check_sd_url(url)
                 r = requests.get(url,headers=headers,allow_redirects=False).content
                 if 'http' in url:
                     match = re.compile("url: '(.+?)',").findall(r)
                 else:
                     match = re.compile('file: "(.+?)",').findall(r)
                 for url in match:
                     sources.append({'source': host, 'quality': quality, 'language': 'en','url': url,'direct': False,'debridonly': False}) 
         except:
             return
     except Exception:
         return
     return sources
示例#43
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            r = client.request(url, headers=headers, output='extended', timeout='10')

            if imdb not in r[0]:
                raise Exception()

            cookie = r[4]
            headers = r[3]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append(
                            {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
                             'url': i, 'direct': True, 'debridonly': False})
                    except Exception:
                        pass
            except Exception:
                pass

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except Exception:
                auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)
            headers['Authorization'] = auth
            headers['Referer'] = url

            u = '/ajax/vsozrflxcw.php'
            self.base_link = client.request(self.base_link, headers=headers, output='geturl')
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid}
            post = urllib.urlencode(post)
            cookie += ';%s=%s' % (idEl, elid)
            headers['Cookie'] = cookie

            r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True)
            r = str(json.loads(r))

            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    if 'google' in i:
                        quality = 'SD'

                        if 'googleapis' in i:
                            try:
                                quality = source_utils.check_sd_url(i)
                            except Exception:
                                pass

                        if 'googleusercontent' in i:
                            i = directstream.googleproxy(i)
                            try:
                                quality = directstream.googletag(i)[0]['quality']
                            except Exception:
                                pass

                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': i,
                            'direct': True,
                            'debridonly': False
                        })

                    elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                        try:
                            quality = source_utils.check_sd_url(i)

                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'direct': True,
                                'debridonly': False
                            })

                        except Exception:
                            pass
                    else:
                        valid, hoster = source_utils.is_host_valid(i, hostDict)
                        if not valid:
                            continue

                        sources.append({
                            'source': hoster,
                            'quality': '720p',
                            'language': 'en',
                            'url': i,
                            'direct': False,
                            'debridonly': False
                        })
                except Exception:
                    pass
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('CartoonHD - Exception: \n' + str(failure))
            return sources
示例#44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            r = client.request(url, headers=headers, output='extended', timeout='10')

            if not imdb in r[0]: raise Exception()


            cookie = r[4] ; headers = r[3] ; result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append(
                            {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
                             'url': i, 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except: auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)
            headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'
            headers['Authorization'] = auth
            headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Accept-Encoding'] = 'gzip,deflate,br'
            headers['Referer'] = url

            u = '/ajax/tnembedr.php'
            self.base_link = client.request(self.base_link, headers=headers, output='geturl')
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid}
            post = urllib.urlencode(post)
            cookie += ';%s=%s'%(idEl,elid)
            headers['Cookie'] = cookie

            r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True)
            r = str(json.loads(r))
            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                #try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                #except: pass
                if 'googleusercontent' in i:
                    try:
                        newheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
                               'Accept': '*/*',
                               'Host': 'lh3.googleusercontent.com',
                               'Accept-Language': 'en-US,en;q=0.8,de;q=0.6,es;q=0.4',
                               'Accept-Encoding': 'identity;q=1, *;q=0',
                               'Referer': url,
                               'Connection': 'Keep-Alive',
                               'X-Client-Data': 'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=',
                               'Range': 'bytes=0-'
                          }
                        resp = client.request(i, headers=newheaders, redirect=False, output='extended', timeout='10')
                        loc = resp[2]['Location']
                        c = resp[2]['Set-Cookie'].split(';')[0]
                        i = '%s|Cookie=%s' % (loc, c)
                        urls, host, direct = [{'quality': 'SD', 'url': i}], 'gvideo', True    
                                            
                    except: 
                        pass

                try:
                    #direct = False
                    quali = 'SD'
                    quali = source_utils.check_sd_url(i)
                    if 'googleapis' in i:
                        sources.append({'source': 'gvideo', 'quality': quali, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        continue
                    valid, hoster = source_utils.is_host_valid(i, hostDict)
                    if not urls or urls == []:
                        urls, host, direct = source_utils.check_directstreams(i, hoster)
                    if valid:
                         for x in urls:
                             if host == 'gvideo':
                                 try:
                                     x['quality'] = directstream.googletag(x['url'])[0]['quality']
                                 except: 
                                     pass

                             sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})                             
                    else:
                        sources.append({'source': 'CDN', 'quality': quali, 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
示例#45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            headers['Referer'] = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except Exception:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time()*1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                if not valid:
                                    continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append({'source': hoster, 'quality': q, 'language': 'en',
                                                'url': url, 'direct': False, 'debridonly': False})
                                continue
                            else:
                                url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0:
                                    count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except Exception:
                                try:
                                    uri = [uri['file']]
                                except Exception:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append({'source': 'gvideo', 'quality': q, 'language': 'en',
                                                    'url': url, 'direct': True, 'debridonly': False})
                                    continue

                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(url)[0]['quality']
                                        except Exception:
                                            pass
                                        url = directstream.google(url)
                                    else:
                                        direct = False
                                    sources.append({'source': hoster, 'quality': q, 'language': 'en',
                                                    'url': url, 'direct': direct, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'en',
                                                    'url': url, 'direct': True, 'debridonly': False})
                    except Exception:
                        pass
            except Exception:
                pass

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('SolarMoviez - Exception: \n' + str(failure))
            return sources