def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             data = client.parseDOM(r, 'div', attrs={'class': 'playex'})
             data = [client.parseDOM(i, 'iframe', ret='src') for i in data if i]
             try:
                 for url in data[0]:
                     quality, info = source_utils.get_release_quality(url, None)
                     valid, host = source_utils.is_host_valid(url,hostDict)
                     if not valid: continue
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url.replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                 })
             except:
                 pass
         except:
             pass
         return sources
     except Exception:
         return
Exemple #2
0
 def _get_sources(self, item):
     try:
         links = dom_parser2.parse_dom(item, 'a', req='href')
         links = [i.attrs['href'] for i in links]
         info = []
         try:
             size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
             div = 1 if size.endswith('GB') else 1024
             size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
             size = '%.2f GB' % size
             info.append(size)
         except Exception:
             pass
         info = ' | '.join(info)
         for url in links:
             if 'youtube' in url: continue
             if any(x in url for x in ['.rar.', '.zip.', '.iso.']) or any(
                     url.endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception()
             valid, host = source_utils.is_host_valid(url, self.hostDict)
             if not valid: continue
             host = client.replaceHTMLCodes(host)
             host = host.encode('utf-8')
             quality, info2 = source_utils.get_release_quality(url, url)
             if url in str(self._sources): continue
             self._sources.append(
                 {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                  'debridonly': True})
     except Exception:
         pass
    def _get_items(self, r):
        try:
            size = re.search(r'<size>([\d]+)</size>', r).groups()[0]
            seeders = re.search(r'<seeders>([\d]+)</seeders>', r).groups()[0]
            _hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>', r).groups()[0]
            name = re.search(r'<title>(.+?)</title>', r).groups()[0]
            url = 'magnet:?xt=urn:btih:%s&dn=%s' % (_hash.upper(), urllib.quote_plus(name))
            t = name.split(self.hdlr)[0]

            try:
                y = re.findall(r'[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
            except BaseException:
                y = re.findall(r'[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()

            try:
                div = 1000 ** 3
                size = float(size) / div
                size = '%.2f GB' % size
            except BaseException:
                size = '0'

            quality, info = source_utils.get_release_quality(name, name)
            info.append(size)
            info = ' | '.join(info)

            if not seeders == '0':
                if cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title):
                    if y == self.hdlr:
                        self._sources.append({'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})

        except BaseException:
            pass
Exemple #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                iframe = client.parseDOM(html, 'iframe', attrs = {'class': 'embed-responsive-item'}, ret='src')[0]
                host = iframe.split('//')[1].replace('www.','')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({'source':host,'quality':'SD','language': 'en','url':iframe,'direct':False,'debridonly':False})
            except:
                flashvar = client.parseDOM(html, 'param', attrs = {'name': 'flashvars'}, ret='value')[0]
                link = flashvar.split('file=')[1]
                host = link.split('//')[1].replace('www.','')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({'source':host,'quality':'SD','language': 'en','url':link,'direct':False,'debridonly':False})

            containers = client.parseDOM(html, 'div', attrs={'class':'dwn-box'})

            for list in containers:
                link = client.parseDOM(list, 'a', attrs={'rel':'nofollow'}, ret='href')[0]
                redirect = client.request(link, output='geturl')
                quality,info = source_utils.get_release_quality(redirect)
                sources.append({'source':'DirectLink','quality':quality,'language': 'en','url':redirect,'info':info,'direct':True,'debridonly':False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolTV - Exception: \n' + str(failure))
            return
Exemple #5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                Links = client.parseDOM(post, 'enclosure', ret='url')
                if not len(Links) == None:
                    for vid_url in Links:
                        quality,info = source_utils.get_release_quality(url, vid_url)
                        host = vid_url.split('//')[1].replace('www.','')
                        host = host.split('/')[0].lower()
                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info, 'direct': False, 'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Best-Moviez - Exception: \n' + str(failure))
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split('&nbsp;')[0]
            quality, info = source_utils.get_release_quality(quality)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'})
            r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r]
            r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if i[0] and i[1]]

            for hoster, id in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
Exemple #7
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = proxy.request(url, 'main_body')

            links = client.parseDOM(result, 'tbody')

            for i in links:
                try:
                    url = client.parseDOM(i, 'a', ret='href')[0]
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
                    url = base64.b64decode(url)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality,info = source_utils.get_release_quality(quality, url)

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Exemple #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            r = client.request(url)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
                  dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
                  for i in r if i]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r]
            for i in r:
                try:
                    url = i[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    valid, host = source_utils.is_host_valid(i[1], hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    
                    info = []
                    quality, info = source_utils.get_release_quality(i[2], i[2])

                    info = ' | '.join(info)
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:
                if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
                    hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip()
                    link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
                    rel = re.search('(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)', i[1]).group().strip()
                    quality, info = source_utils.get_release_quality(rel)
                    if not quality:
                        quality = 'SD'

                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            try:
                if 'tvshowtitle' in data:
                    epi = 'EP%d' % int(data['episode'])
                    links = self.searchShow(data['tvshowtitle'], data['season'])
                    url = [i[1] for i in links if epi.lower() == i[0].lower()]

                else:
                    url = self.searchMovie(data['title'], data['year'])
                    try:
                        url = client.parseDOM(url, 'iframe', ret='src', attrs={'id':'advanced_iframe'})
                    except:
                        url = re.findall('''<h4>server\d+</h4>.+?src=['"]([^'"]+)''', url, re.I|re.DOTALL)

            except:
                pass

            for u in url:
                if 'entervideo' in u:
                    r = client.request(u)
                    url = client.parseDOM(r, 'source', ret='src')[0]
                    quality, info = source_utils.get_release_quality(url, url)
                    sources.append({'source': 'CDN', 'quality': quality, 'language': 'en', 'url': url,
                                    'direct': True, 'debridonly': False})
                elif 'vidnode' in u:
                    headers = {'Host': 'vidnode.net',
                               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
                               'Upgrade-Insecure-Requests': '1',
                               'Accept-Language': 'en-US,en;q=0.9'}
                    r = client.request(u, headers=headers)
                    links = re.findall('''\{file:\s*['"]([^'"]+).*?label:\s*['"](\d+\s*P)['"]''', r, re.DOTALL|re.I)
                    for u, qual in links:
                        quality, info = source_utils.get_release_quality(qual, u)
                        url = u
                        sources.append({'source': 'CDN', 'quality': quality, 'language': 'en', 'url': url,
                                'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Exemple #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources


            headers={'User-Agent':client.randomagent()}
            html = client.request(url,headers=headers)

            Links = re.compile('id="link_.+?target="_blank" id="(.+?)"',re.DOTALL).findall(html)
            for vid_url in Links:
                if 'openload' in vid_url:
                    try:
                        source_html   = client.request(vid_url,headers=headers)
                        source_string = re.compile('description" content="(.+?)"',re.DOTALL).findall(source_html)[0]
                        quality,info = source_utils.get_release_quality(source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append({'source': 'Openload','quality': quality,'language': 'en','url':vid_url,'info':info,'direct': False,'debridonly': False})
                elif 'streamango' in vid_url:
                    try:  
                        source_html = client.request(vid_url,headers=headers)
                        source_string = re.compile('description" content="(.+?)"',re.DOTALL).findall(source_html)[0]
                        quality,info = source_utils.get_release_quality(source_string, vid_url)  
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append({'source': 'Streamango','quality': quality,'language': 'en','url':vid_url,'info':info,'direct': False,'debridonly': False})
                else:
                    if resolveurl.HostedMediaFile(vid_url):
                        quality,info = source_utils.get_release_quality(vid_url, vid_url)  
                        host = vid_url.split('//')[1].replace('www.','')
                        host = host.split('/')[0].split('.')[0].title()
                        sources.append({'source': host,'quality': quality,'language': 'en','url':vid_url,'info':info,'direct': False,'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return sources
Exemple #12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            links = dom_parser.parse_dom(r, 'table')
            links = [i.content for i in links if dom_parser.parse_dom(i, 'span', attrs={'class': re.compile('linkSearch(-a)?')})]
            links = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(links))
            links = [dom_parser.parse_dom(i, 'a', req='href') for i in links if re.findall('(.+?)\s*\(\d+\)\s*<', i)]
            links = [i[0].attrs['href'] for i in links if i]

            url = re.sub('/streams-\d+', '', url)

            for link in links:
                if '/englisch/' in link: continue
                control.sleep(3000)
                if link != url: r = client.request(urlparse.urljoin(self.base_link, link))

                quality = 'SD'
                info = []

                detail = dom_parser.parse_dom(r, 'th', attrs={'class': 'thlink'})
                detail = [dom_parser.parse_dom(i, 'a', req='href') for i in detail]
                detail = [(i[0].attrs['href'], i[0].content.replace('&#9654;', '').strip()) for i in detail if i]

                if detail:
                    quality, info = source_utils.get_release_quality(detail[0][1])
                    r = client.request(urlparse.urljoin(self.base_link, detail[0][0]))

                r = dom_parser.parse_dom(r, 'table')
                r = [dom_parser.parse_dom(i, 'a', req=['href', 'title']) for i in r if not dom_parser.parse_dom(i, 'table')]
                r = [(l.attrs['href'], l.attrs['title']) for i in r for l in i if l.attrs['title']]

                info = ' | '.join(info)

                for stream_link, hoster in r:
                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    direct = False

                    if hoster.lower() == 'gvideo':
                        direct = True

                    sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': stream_link, 'info': info, 'direct': direct, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
    def _get_sources(self, item):
        try:
            name = item[0]
            url = item[1]
            quality, info = source_utils.get_release_quality(url, name)
            info.append(item[2])
            info = ' | '.join(info)

            self._sources.append(
                {'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                 'debridonly': True})
        except BaseException:
            pass
    def _get_sources(self, item):
        try:
            name = item[0]
            quality, info = source_utils.get_release_quality(name, name)
            info.append(item[2])
            info = ' | '.join(info)
            data = client.request(item[1])
            url = re.search('''href=["'](magnet:\?[^"']+)''', data).groups()[0]

            self._sources.append(
                {'source': 'torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                 'debridonly': True})
        except BaseException:
            pass
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         html = client.request(url)
         links = re.compile('<form id="linkplayer.+?href="(.+?)"',re.DOTALL).findall(html)
         for link in links:
             quality,info = source_utils.get_release_quality(link, url)
             host = link.split('//')[1].replace('www.','')
             host = host.split('/')[0].split('.')[0].title()
             sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': False})
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('Timewatch - Sources - Exception: \n' + str(failure))
         return sources
Exemple #16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(urllib.quote_plus(query))
                url = urlparse.urljoin(self.base_link, url)

            else:
                url = self.moviesearch.format(urllib.quote_plus(query))
                url = urlparse.urljoin(self.base_link, url)

            items = self._get_items(url)

            hostDict = hostDict + hostprDict
            for item in items:
                try:
                    name = item[0]
                    quality, info = source_utils.get_release_quality(name, name)
                    info.append(item[2])
                    info = ' | '.join(info)
                    url = item[1]
                    url = url.split('&tr')[0]

                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})
                except BaseException:
                    pass

            return sources
        except BaseException:
            return sources
Exemple #17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            html = client.request(url)
            url_list = re.compile('<h2><a href="([^"]+)"',re.DOTALL).findall(html)

            hostDict = hostprDict + hostDict

            for url in url_list:
                if cleantitle.get(title) in cleantitle.get(url):
                    html = client.request(url)
                    links = re.compile('href="([^"]+)" rel="nofollow"',re.DOTALL).findall(html)
                    for vid_url in links:
                        if 'ouo.io' in vid_url:
                            continue
                        if 'sh.st' in vid_url:
                            continue
                        if 'linx' in vid_url:
                            log_utils.log('2DDL - sources - linx: ' + str(vid_url))
                            continue
                        if '.rar' not in vid_url:
                            if '.srt' not in vid_url:
                                'SD',info = source_utils.get_release_quality(url, vid_url)
                                host = vid_url.split('//')[1].replace('www.','')
                                host = host.split('/')[0].lower()
                                sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': vid_url, 'info': info, 'direct': False, 'debridonly': False})
            return sources
        except Exception, argument:
            return sources  
Exemple #18
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if len(url['episode']) == 1:
             url['episode'] = "0" + url['episode']
         if len(url['season']) == 1:
             url['season'] = "0" + url['season']
         s = requests.Session()
         url = url['tvshowtitle'] + "+S" + url['season'] + "e" + url['episode']
         url = (self.base_link + self.meta_search_link % (self.api_key, url.replace(' ', '+')))
         print("info - " + url)
         p = s.get(url)
         p = json.loads(p.text)
         files = p['files']
         for i in files:
             if not int(i['files_num_video_player']) > 1:
                 name = i['name']
                 id = i['id']
                 url_dl = ''
                 for x in accepted_extensions:
                     if 'url_dl' in i:
                         if i['url_dl'].endswith(x):
                             url_dl = i['url_dl']
                             quality = source_utils.get_release_quality(name , url_dl)
                             print('info - ' + str(quality) + " link " + url_dl + " name " + name)
                             sources.append({'source': host, 'quality': quality[0], 'language': "en", 'url': url_dl, 'info': quality[1],
                          'direct': True, 'debridonly': False})
                         else:
                             continue
                     else:
                         continue
                 if url_dl == '':
                     continue
             else:
                 continue
         for i in sources:
             print("info - sources - " + str(i))
         return sources
     except:
         print("Unexpected error in Furk Script: source", sys.exc_info()[0])
         exc_type, exc_obj, exc_tb = sys.exc_info()
         print(exc_type, exc_tb.tb_lineno)
         pass
Exemple #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r)
            r = client.parseDOM(r, 'item')
            title = client.parseDOM(r, 'title')[0]
            if hdlr in title:
                r = re.findall('<h3.+?>(.+?)</h3>\s*<h5.+?<strong>(.+?)</strong.+?h3.+?adze.+?href="(.+?)">.+?<h3', r[0], re.DOTALL)
                for name, size, url in r:
                    quality, info = source_utils.get_release_quality(name, url)
                    try:
                        size = re.sub('i', '', size)
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})


            return sources
        except:
            return sources
Exemple #20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
      
            hostDict = hostprDict + hostDict

            r = client.request(url)           

            urls = client.parseDOM(r, 'a', ret = 'href')

            for url in urls:
                try:

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    
                    quality, infoo = source_utils.get_release_quality(url)
                    
                    info = []
                    
                    if any(x in url.upper() for x in ['HEVC', 'X265', 'H265']): info.append('HEVC')
                    
                    info.append('ALLRLS')
                    
                    info = ' | '.join(info)
                    
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                     
                except:
                    pass

            return sources
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        try:
            if url is None: return sources
            sources = []

            result = client.request(url)
            res_chk = re.compile('<title>(.+?)</title>',re.DOTALL).findall(result)[0]
            r = client.parseDOM(result, 'tbody')
            r = client.parseDOM(r, 'tr')
            r = [(re.findall('<td>(.+?)</td>', i)[0], client.parseDOM(i, 'a', ret='href')[0]) for i in r]

            if r:
                for i in r:
                    try:
                        hostchk = i[0]
                        if 'other'in hostchk: continue

                        vid_page = urlparse.urljoin(self.base_link, i[1])
                        html = client.request(vid_page)
                        vid_div = re.compile('<div class="wrap">(.+?)</div>',re.DOTALL).findall(html)[0]
                        vid_url = re.compile('href="(.+?)"',re.DOTALL).findall(vid_div)[0]
                        quality,info = source_utils.get_release_quality(res_chk, vid_url)
                        host = vid_url.split('//')[1].replace('www.','')
                        host = host.split('/')[0].lower()
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url':vid_url,
                            'info':info,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('Vodly - Exception: \n' + str(failure))
            return
Exemple #22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            pid = re.findall('[e|t]\s*=\s*"(\w+)"\s*,', r)[0]

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream-container'})[0].content
            r = re.compile('<div id="stream-h">.*?</li>.*?</div>\s*</div>', re.IGNORECASE | re.DOTALL).findall(r)
            r = [(dom_parser.parse_dom(i, 'div', attrs={'id': 'mirror-head'}), dom_parser.parse_dom(i, 'div', attrs={'id': 'stream-links'})) for i in r]
            r = [(i[0][0].content, i[1]) for i in r if i[0]]
            r = [(re.findall('.+\s[\||-]\s(.*)', i[0]), i[1]) for i in r]
            r = [(i[0][0].strip(), i[1]) for i in r if len(i[0]) > 0]

            for name, links in r:
                quality, info = source_utils.get_release_quality(name)

                links = [dom_parser.parse_dom(i.content, 'a', req=['href', 'title', 'data-mirror', 'data-host']) for i in links]
                links = [([i[0].attrs.get('data-mirror'), i[0].attrs.get('data-host'), pid, url], i[0].content) for i in links]

                info = ' | '.join(info)

                for link, hoster in links:
                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    link = urllib.urlencode({'mirror': link[0], 'host': link[1], 'pid': link[2], 'ceck': 'sk'})

                    sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'info'})
            rel = dom_parser.parse_dom(rel, 'div', attrs={'itemprop': 'description'})
            rel = dom_parser.parse_dom(rel, 'p')
            rel = [re.sub('<.+?>|</.+?>', '', i.content) for i in rel]
            rel = [re.findall('release:\s*(.*)', i, re.I) for i in rel]
            rel = [source_utils.get_release_quality(i[0]) for i in rel if i]
            quality, info = (rel[0]) if rel else ('SD', [])

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'links'})
            r = dom_parser.parse_dom(r, 'table')
            r = dom_parser.parse_dom(r, 'tr', attrs={'id': re.compile('\d+')})
            r = [dom_parser.parse_dom(i, 'td') for i in r]
            r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1].content).strip()) for i in r if len(r) >= 1]
            r = [(dom_parser.parse_dom(i[0], 'a', req='href'), i[1]) for i in r]
            r = [(i[0][0].attrs['href'], i[1]) for i in r if i[0]]

            info = ' | '.join(info)

            for link, hoster in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
	def sources(self, url, hostDict, hostprDict):
		sources = []

		try:
			if url == None:
				return sources

			url = urlparse.urljoin(self.base_link, url)

			r = client.request(url)
			r = client.parseDOM(r, 'div', attrs={'id': 'stream-container'})[0]

			r = re.compile('<div id="stream-h">.*?</li>.*?</div>\s*</div>', re.IGNORECASE | re.DOTALL).findall(r)
			r = [(client.parseDOM(i, 'div', attrs={'id': 'mirror-head'}),
				  client.parseDOM(i, 'div', attrs={'id': 'stream-links'})
				  ) for i in r]
			r = [(i[0][0], i[1]) for i in r if len(i[0]) > 0]
			r = [(re.findall('.+\|(.+)', i[0]), i[1]) for i in r]
			r = [(i[0][0].strip(), i[1]) for i in r if len(i[0]) > 0]

			for name, links in r:
				quality, info = source_utils.get_release_quality(name)

				links = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in links]
				links = [(i[0][0], i[1][0].lower().strip()) for i in links if len(i[0]) > 0 and len(i[1]) > 0]

				info = ' | '.join(info)

				for link, hoster in links:
					valid, hoster = source_utils.is_host_valid(hoster, hostDict)
					if not valid: continue

					sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})

			return sources
		except:
			return sources
Exemple #25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()


            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']


            posts = []

            if 'tvshowtitle' in data:
                query = '%s %s S%02dE%02d' % (data['tvshowtitle'], int(data['year']), int(data['season']), int(data['episode']))
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

                referer = self.search_link2 % urllib.quote_plus(query)
                referer = urlparse.urljoin(self.search_base_link, referer)

                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.search_base_link, url)
                
                result = client.request(url, cookie=self.search_cookie, XHR=True, referer=referer)
                try: posts += json.loads(re.findall('({.+?})$', result)[0])['results']
                except: pass
            else:
                query = '%s %s' % (data['title'], data['year'])
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

                referer = self.search_link2 % urllib.quote_plus(query)
                referer = urlparse.urljoin(self.search_base_link, referer)

                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.search_base_link, url)

                result = client.request(url, cookie=self.search_cookie, XHR=True, referer=referer)
                try: posts += json.loads(re.findall('({.+?})$', result)[0])['results']
                except: pass


            links = [] ; dupes = []
            for post in posts:
                try:
                    name = post['post_title'] ; url = post['post_name']

                    if not url in dupes:
                        dupes.append(url)

                        t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
                        if not cleantitle.get(title) in cleantitle.get(t): raise Exception()

                      
                        try: y = re.findall('[\.|\(|\[|\s](S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
                        except: y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                        if 'S' in y and 'E' in y: cat = 'episode'
                        elif 'S' in y: cat = 'tvshow'
                        elif y.isdigit(): cat = 'movie'

                        if cat == 'movie': hdlr = data['year']
                        elif cat == 'episode': hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
                        elif cat == 'tvshow': hdlr = 'S%02d' % int(data['season'])

                        if not y == hdlr: raise Exception()

                        items = []

                        content = post['post_content']

                        try: items += zip([i for i in client.parseDOM(content, 'p') if 'Release Name:' in i], [i for i in client.parseDOM(content, 'p') if '<strong>Download' in i])
                        except: pass

                        try: items += client.parseDOM(content, 'p', attrs = {'style': '.+?'})
                        except: pass

                        for item in items:
                            try:
                                if type(item) == tuple: item = '######URL######'.join(item)

                                fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                                fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                                fmt = [i.lower() for i in fmt]

                                if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                                if any(i in ['extras'] for i in fmt): raise Exception()

                                if '1080p' in fmt: quality = '1080p'
                                elif '720p' in fmt: quality = '720p'
                                else: quality = 'SD'
                                if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                                elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                                quality, infoo = source_utils.get_release_quality(name, i[1])

                                info = []

                                if '3d' in fmt: info.append('3D')

                                try:
                                    if cat == 'tvshow': raise Exception()
                                    size = re.findall('(\d+(?:\.|/,|)\d+(?:\s+|)(?:GB|GiB|MB|MiB))', item)[0].strip()
                                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                                    size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                                    size = '%.2f GB' % size
                                    info.append(size)
                                except:
                                    pass

                                info = ' | '.join(info)

                                url = item.rsplit('######URL######')[-1]
                                url = zip(client.parseDOM(url, 'a'), client.parseDOM(url, 'a', ret='href'))

                                for i in url: links.append({'url': i[1], 'quality': quality, 'info': info, 'host': i[0], 'cat': cat})
                            except:
                                pass

                except:
                    pass


            check = [i for i in links if not i['quality'] == 'CAM']
            if len(check) > 0: links = check

            hostDict = hostprDict + hostDict

            for i in links:
                try:
                    url = i['url']
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if i['cat'] == 'tvshow':
                        if not i['quality'] in ['1080p', 'HD']: raise Exception()
                        if not any(i['host'].lower() in x for x in hostDict): raise Exception()
                        url = client.request(url)
                        url = client.parseDOM(url, 'ol')[0]
                        url = client.parseDOM(url, 'div', attrs = {'style': '.+?'})[int(data['episode'])-1]

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': i['quality'], 'language': 'en', 'url': url, 'info': i['info'], 'direct': False, 'debridonly': True})
                except:
                    pass

            return sources
        except:
            return sources
Exemple #26
0
    def get_sources(self, link):
        try:
            url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', link,
                                            re.DOTALL)[0])
            url = urllib.unquote_plus(url).split('&tr=')[0].replace(' ', '.')
            url = url.encode('ascii', errors='ignore').decode('ascii',
                                                              errors='ignore')

            name = url.split('&dn=')[1]
            if any(x in url.lower() for x in [
                    'french', 'italian', 'spanish', 'truefrench', 'dublado',
                    'dubbed'
            ]):
                raise Exception()

            t = name.split(self.hdlr)[0].replace(self.year, '').replace(
                '(', '').replace(')', '').replace('&', 'and').replace(
                    '.US.', '.').replace('.us.', '.')
            if cleantitle.get(t) != cleantitle.get(self.title):
                return

            if self.hdlr not in name:
                return

            if url in str(self.sources):
                return

            try:
                seeders = int(
                    client.parseDOM(link, 'td',
                                    attrs={'class': 'sy'})[0].replace(',', ''))
                if self.min_seeders > seeders:
                    return
            except:
                pass

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                    link)[0]
                div = 1 if size.endswith('GB') else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(
                    ',', '.'))) / div
                size = '%.2f GB' % size
                info.insert(0, size)
            except:
                size = '0'
                pass

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True
            })

        except:
            source_utils.scraper_error('EXTRATORRENT')
            pass
Exemple #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'Season %d' % int(data['season']) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            self.scraper = cfscrape.create_scraper()
            r = self.scraper.get(url).content
            posts = client.parseDOM(r, 'li')

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post, 'a', req='href')[0]
                    t = re.findall('title=.+?>\s*(.+?)$', data.content, re.DOTALL)[0]
                    t2 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)
                    y = re.findall('[\.|\(|\[|\s](S\d*E\d*|Season\s*\d*|\d{4})[\.|\)|\]|\s]', t)[-1]

                    if not (cleantitle.get_simple(t2.replace('720p / 1080p', '')) == cleantitle.get(
                        title) and y == hdlr): raise Exception()

                    link = client.parseDOM(post, 'a', ret='href')[0]
                    if not 'Episodes' in post: u = self.movie_links(link)
                    else:
                        sep = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
                        u = self.show_links(link, sep)

                    for item in u:
                        quality, info = source_utils.get_release_quality(item[0][0], None)
                        try:
                            size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', item[0][1])[-1]
                            div = 1 if size.endswith(' GB') else 1024
                            size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                            size = '%.2f GB' % size
                            info.append(size)
                        except:
                            pass

                        info = ' | '.join(info)

                        url = item[0][0]
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        sources.append({'source': 'popcorn', 'quality': quality, 'language': 'en', 'url': url,
                                        'info': info, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Exemple #28
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.geturl(title)

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            timer = control.Time(start=True)

            html = self.scraper.get(url).content
            if html is None:
                log_utils.log('TPB - Website Timed Out')
                return sources
            html = html.replace('&nbsp;', ' ')
            try:
                results = client.parseDOM(html,
                                          'table',
                                          attrs={'id': 'searchResult'})[0]
            except Exception:
                return sources

            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('TPB - Timeout Reached')
                    break

                try:
                    try:
                        name = re.findall(
                            'class="detLink" title=".+?">(.+?)</a>', entry,
                            re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        # t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)
                        if not cleantitle.get(title) in cleantitle.get(name):
                            continue
                    except Exception:
                        continue
                    try:
                        y = re.findall(
                            '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                            name)[-1].upper()
                        if not y == hdlr:
                            continue
                    except Exception:
                        continue

                    try:
                        seeders = int(
                            re.findall('<td align="right">(.+?)</td>', entry,
                                       re.DOTALL)[0])
                    except Exception:
                        continue
                    if self.min_seeders > seeders:
                        continue

                    try:
                        link = 'magnet:%s' % (re.findall(
                            'a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        link = str(
                            client.replaceHTMLCodes(link).split('&tr')[0])
                    except Exception:
                        continue

                    quality, info = source_utils.get_release_quality(
                        name, name)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            entry)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except Exception:
                    failure = traceback.format_exc()
                    log_utils.log('TPB - Cycle Broken: \n' + str(failure))
                    continue

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|\.|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link,
                                   url).replace('-', '+').replace('%3A+', '+')

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs={'class': 'item-post'})
            r = [
                re.findall('<a href="(.+?)">(.+?)<', i, re.DOTALL)[0]
                for i in r
            ]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(item[0])
                    data = client.parseDOM(data,
                                           'div',
                                           attrs={'class': 'single-link'})[0]
                    data = dom_parser.parse_dom(data, 'a', req='href')

                    u = [(t, i.attrs['href']) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if any(x in url for x in [
                            '.rar', '.zip', '.iso', 'www.share-online.biz',
                            'https://ouo.io', 'http://guard.link'
                    ]):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    info = ' | '.join(info)
                    if control.setting('deb.rd_check') == 'true':
                        check = rd_check.rd_deb_check(url)
                        if check:
                            info = 'RD Checked' + ' | ' + info
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': check,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                    else:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('---Crazyhdsource Testing - Exception: \n' +
                          str(failure))
            return sources
Exemple #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            if not control.setting('rdcached.providers') == 'true':
                return sources
            if self.api_key == '': return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            if 'tvshowtitle' in data:
                season = 'S%02d' % int(data['season'])
                episode = 'E%02d' % int(data['episode'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            checktorr_r = self.checkrdcache()
            result = json.loads(checktorr_r)

            items = []

            for i in result:
                try:
                    if not i['status'] == 'downloaded': raise Exception()

                    name = i['filename']
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr:
                        if 'tvshowtitle' in data:
                            if not y == season:
                                raise Exception()
                            else:
                                items += self.getSeasonItems(i, hdlr)
                    else:
                        info_url = urlparse.urljoin(
                            self.base_link,
                            self.torrentsinfo_link % (i['id'], self.api_key))

                        r = client.request(info_url)

                        torr_info = json.loads(r)
                        links = torr_info['links']
                        if len(links) == 0: raise Exception()
                        links = links[0]

                        u = [(name, links)]
                        items += u
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    quality, info = source_utils.get_release_quality(
                        name, None)
                    filetype = source_utils.getFileType(name)
                    info += [filetype.strip(), name]
                    info = filter(None, info)
                    info = ' | '.join(info)

                    sources.append({
                        'source': 'RDCACHED',
                        'quality': quality,
                        'language': 'en',
                        'url': item[1],
                        'info': info,
                        'direct': False,
                        'debridonly': False,
                        'cached': True
                    })
                except:
                    pass

            return sources
        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
Exemple #31
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         if debrid.status() is False:
             raise Exception()
         if debrid.tor_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         query = '%s %s' % (data['title'], data['year'])
         url = self.search_link % urllib.quote(query)
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         try:
             results = client.parseDOM(html, 'div', attrs={'class':
                                                           'row'})[2]
         except:
             return sources
         items = re.findall(
             'class="browse-movie-bottom">(.+?)</div>\s</div>', results,
             re.DOTALL)
         if items is None:
             return sources
         for entry in items:
             try:
                 try:
                     link, name = re.findall(
                         '<a href="(.+?)" class="browse-movie-title">(.+?)</a>',
                         entry, re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name)
                     if not cleantitle.get(name) == cleantitle.get(
                             data['title']):
                         continue
                 except:
                     continue
                 y = entry[-4:]
                 if not y == data['year']:
                     continue
                 response = client.request(link)
                 try:
                     entries = client.parseDOM(
                         response, 'div', attrs={'class': 'modal-torrent'})
                     for torrent in entries:
                         link, name = re.findall(
                             'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                             torrent, re.DOTALL)[0]
                         link = 'magnet:%s' % link
                         link = str(
                             client.replaceHTMLCodes(link).split('&tr')[0])
                         if link in str(sources):
                             continue
                         quality, info = source_utils.get_release_quality(
                             name, name)
                         try:
                             size = re.findall(
                                 '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                                 torrent)[-1]
                             div = 1 if size.endswith(
                                 ('GB', 'GiB')) else 1024
                             size = float(re.sub('[^0-9|/.|/,]', '',
                                                 size)) / div
                             size = '%.2f GB' % size
                             info.append(size)
                         except:
                             pass
                         info = ' | '.join(info)
                         sources.append({
                             'source': 'Torrent',
                             'quality': quality,
                             'language': 'en',
                             'url': link,
                             'info': info,
                             'direct': False,
                             'debridonly': True
                         })
                 except:
                     continue
             except:
                 continue
         return sources
     except:
         return sources
Exemple #32
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            imdb = data['imdb']
            url = self.embed_link.format(imdb)
            data = client.request(url, referer=self.base_link)
            frames_link = re.findall('''\$\.post\('(.+?)',{'imdb''', data,
                                     re.DOTALL)[0]
            frames_link = urlparse.urljoin(
                self.frames_link,
                frames_link) if frames_link.startswith('/') else frames_link
            try:
                get_ip = client.request('https://whatismyipaddress.com')
                ip = re.findall(
                    '''/ip/(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})['"]''', get_ip,
                    re.DOTALL)[0]
            except IndexError:
                ip = '0'
            post = {'imdb': imdb, 'ip': ip}
            frames = client.request(frames_link, post=post)
            frames = json.loads(frames)
            for url in frames:
                try:
                    subs = url['sub']
                    sub = re.findall(
                        '''sub_id['"]:\s*(\d+)\,.+?lg['"]:\s*u['"]greek['"]''',
                        str(subs))[0]
                except:
                    subs = url['sub']
                    sub = re.findall(
                        '''sub_id['"]:\s*(\d+)\,.+?lg['"]:\s*u['"]english['"]''',
                        str(subs))[0]

                sub = self.sub_link.format(sub)
                link = url['src'][0]['src']
                quality = url['src'][0]['label']

                url = link.replace(' ',
                                   '%20') + '|User-Agent={}&Referer={}'.format(
                                       urllib.quote(client.agent()),
                                       'https://redirector.googlevideo.com/')
                quality, info = source_utils.get_release_quality(quality)

                sources.append({
                    'source': 'GVIDEO',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'sub': sub,
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Exemple #33
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            try:
                if 'tvshowtitle' in data:
                    epi = 'EP%d' % int(data['episode'])
                    links = self.searchShow(data['tvshowtitle'],
                                            data['season'])
                    url = [i[1] for i in links if epi.lower() == i[0].lower()]

                else:
                    url = self.searchMovie(data['title'], data['year'])
                    try:
                        url = client.parseDOM(url,
                                              'iframe',
                                              ret='src',
                                              attrs={'id': 'advanced_iframe'})
                    except:
                        url = re.findall(
                            '''<h4>server\d+</h4>.+?src=['"]([^'"]+)''', url,
                            re.I | re.DOTALL)

            except:
                pass

            for u in url:
                if 'entervideo' in u:
                    r = client.request(u)
                    url = client.parseDOM(r, 'source', ret='src')[0]
                    quality, info = source_utils.get_release_quality(url, url)
                    sources.append({
                        'source': 'CDN',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })
                elif 'vidnode' in u:
                    headers = {
                        'Host': 'vidnode.net',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
                        'Upgrade-Insecure-Requests': '1',
                        'Accept-Language': 'en-US,en;q=0.9'
                    }
                    r = client.request(u, headers=headers)
                    links = re.findall(
                        '''\{file:\s*['"]([^'"]+).*?label:\s*['"](\d+\s*P)['"]''',
                        r, re.DOTALL | re.I)
                    for u, qual in links:
                        quality, info = source_utils.get_release_quality(
                            qual, u)
                        url = u
                        sources.append({
                            'source': 'CDN',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })

            return sources
        except:
            return sources
Exemple #34
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            links = dom_parser.parse_dom(r, 'table')
            links = [
                i.content for i in links if dom_parser.parse_dom(
                    i, 'span', attrs={'class': re.compile('linkSearch(-a)?')})
            ]
            links = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(links))
            links = [
                dom_parser.parse_dom(i, 'a', req='href') for i in links
                if re.findall('(.+?)\s*\(\d+\)\s*<', i)
            ]
            links = [i[0].attrs['href'] for i in links if i]

            url = re.sub('/streams-\d+', '', url)

            for link in links:
                if '/englisch/' in link: continue
                if link != url:
                    r = client.request(urlparse.urljoin(self.base_link, link))

                quality = 'SD'
                info = []

                detail = dom_parser.parse_dom(r,
                                              'th',
                                              attrs={'class': 'thlink'})
                detail = [
                    dom_parser.parse_dom(i, 'a', req='href') for i in detail
                ]
                detail = [(i[0].attrs['href'],
                           i[0].content.replace('&#9654;', '').strip())
                          for i in detail if i]

                if detail:
                    quality, info = source_utils.get_release_quality(
                        detail[0][1])
                    r = client.request(
                        urlparse.urljoin(self.base_link, detail[0][0]))

                r = dom_parser.parse_dom(r, 'table')
                r = [
                    dom_parser.parse_dom(i, 'a', req=['href', 'title'])
                    for i in r if not dom_parser.parse_dom(i, 'table')
                ]
                r = [(l.attrs['href'], l.attrs['title']) for i in r for l in i
                     if l.attrs['title']]

                info = ' | '.join(info)

                for stream_link, hoster in r:
                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': stream_link,
                        'info': info,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
Exemple #35
0
    def get_sources(self, link):
        try:
            url = re.compile('href="(.+?)"').findall(link)[0]
            url = '%s%s' % (self.base_link, url)
            result = client.request(url)
            if 'magnet' not in result:
                raise Exception()

            url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', result,
                                            re.DOTALL)[0])
            url = urllib.unquote(url).decode('utf8').replace('&amp;', '&')
            url = url.split('&xl=')[0]

            if url in str(self.sources):
                raise Exception()

            size_list = client.parseDOM(result,
                                        "td",
                                        attrs={"class": "table_col2"})

            if any(x in url.lower() for x in [
                    'french', 'italian', 'spanish', 'truefrench', 'dublado',
                    'dubbed'
            ]):
                raise Exception()

            name = url.split('&dn=')[1]
            t = name.split(self.hdlr)[0].replace(self.year, '').replace(
                '(', '').replace(')', '').replace('&',
                                                  'and').replace('+', ' ')

            if cleantitle.get(t) != cleantitle.get(self.title):
                raise Exception()

            if self.hdlr not in name:
                raise Exception()

            quality, info = source_utils.get_release_quality(name, url)

            for match in size_list:
                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        match)[0]
                    div = 1 if size.endswith('GB') else 1024
                    size = float(
                        re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                                '.'))) / div
                    size = '%.2f GB' % size
                    info.insert(0, size)
                    if size:
                        break
                except:
                    size = '0'
                    pass

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True
            })

        except:
            pass
Exemple #36
0
    def _get_sources(self, i):
        try:
            name = i[0]
            name = client.replaceHTMLCodes(name)
            r = client.request(i[1].strip())
            r = client.parseDOM(r, 'p')
            r = [
                e for e in r if all(x in e.lower() for x in ['single', 'link'])
            ]
            links = client.parseDOM(r[0], 'a', ret='href')
            for url in links:
                try:
                    if any(x in url
                           for x in ['.rar.', '.zip.', '.iso.']) or any(
                               url.endswith(x)
                               for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '',
                        name,
                        flags=re.I)
                    if not cleantitle.get(t) == cleantitle.get(self.title):
                        raise Exception()
                    try:
                        y = re.findall(
                            '(?:\.|\(|\[|\s*|)(S\d+E\d+|S\d+)(?:\.|\)|\]|\s*|)',
                            name, re.I)[-1].upper()
                    except BaseException:
                        y = re.findall(
                            '(?:\.|\(|\[|\s*|)(\d{4})(?:\.|\)|\]|\s*|)', name,
                            re.I)[0].upper()
                    if not y == self.hdlr: raise Exception()

                    valid, host = source_utils.is_host_valid(
                        url, self.hostDict)
                    if not valid: continue
                    if host in ['1fichier.com', 'uptobox.com']:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    info = []
                    quality, info = source_utils.get_release_quality(name, url)
                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            i[2])[0]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except BaseException:
                        pass

                    info = ' | '.join(info)
                    self._sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except BaseException:
                    pass
        except BaseException:
            pass
Exemple #37
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post,
                                        'enclosure',
                                        ret='url',
                                        attrs={'type': 'video.+?'})

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GiB|MiB|GB|MB))', post)
                    s = s[0] if s else '0'

                    items += [(t, i, s) for i in u]

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    try:
                        size = re.sub('i', '', item[2])
                        print size
                        div = 1 if size.endswith('GB') else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Exemple #38
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            html = client.request(url)
            html = html.replace('&nbsp;', ' ')
            try:
                results = client.parseDOM(html,
                                          'table',
                                          attrs={'id': 'searchResult'})[0]
            except Exception:
                return sources
            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        name = re.findall(
                            'class="detLink" title=".+?">(.+?)</a>', entry,
                            re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        # t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)
                        if not cleantitle.get(title) in cleantitle.get(name):
                            continue
                    except Exception:
                        continue
                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr:
                        continue

                    try:
                        link = 'magnet:%s' % (re.findall(
                            'a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        link = str(
                            client.replaceHTMLCodes(link).split('&tr')[0])
                    except Exception:
                        continue

                    quality, info = source_utils.get_release_quality(
                        name, name)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            entry)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '[B]%.2f GB[/B]' % size
                        info.append(size)
                    except Exception:
                        pass

                    #try:
                    #info.append(name)
                    #except Exception:
                    #pass

                    info = ' | '.join(info)
                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except Exception:
                    continue

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except Exception:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not control.setting(
                    'pmcached.providers') == 'true' and not control.setting(
                        'rdcached.providers') == 'true':
                return sources
            if self.pm_api_key == '' and self.rd_api_key == '': return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            imdb_id = data['imdb']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link,
                                   self.search_link % urllib.quote_plus(query))

            r = client.request(url)

            result = json.loads(r)
            result = result['results']

            items = []

            for item in result:
                try:
                    name = item['title']
                    magnetlink = item['magnet']

                    size = ''
                    try:
                        size = item['size']
                        size = float(size) / (1024**3)
                        size = '%.2f GB' % size
                    except:
                        pass

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name)
                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()
                    if not y == hdlr: raise Exception()

                    u = [(name, magnetlink, size)]
                    items += u
                except:
                    pass

            if control.setting('pmcached.providers'
                               ) == 'true' and not self.pm_api_key == '':
                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        checkurl = urlparse.urljoin(
                            self.pm_base_link, self.pm_checkcache_link %
                            (self.pm_api_key, _hash, self.pm_api_key))
                        r = client.request(checkurl)
                        if not 'finished' in r: raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(
                            name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)
                        url = 'magnet:?xt=urn:btih:%s' % _hash

                        sources.append({
                            'source': 'PMCACHED',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False,
                            'cached': True
                        })
                    except:
                        pass

            if control.setting('rdcached.providers'
                               ) == 'true' and not self.rd_api_key == '':
                checktorr_r = self.checkrdcache()
                checktorr_result = json.loads(checktorr_r)

                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        _hash = _hash.lower()

                        url = ''
                        for i in checktorr_result:
                            try:
                                if _hash == i['hash'] and i[
                                        'status'] == 'downloaded':
                                    url = i['links'][0]
                                    break
                            except:
                                pass

                        if url == '':
                            checkurl = urlparse.urljoin(
                                self.rd_base_link, self.rd_checkcache_link %
                                (_hash, self.rd_api_key))
                            r = client.request(checkurl)
                            checkinstant = json.loads(r)
                            checkinstant = checkinstant[_hash]

                            checkinstant_num = 0
                            try:
                                checkinstant_num = len(checkinstant['rd'])
                            except:
                                pass

                            if checkinstant_num == 0: raise Exception()
                            url = 'rdmagnet:?xt=urn:btih:%s' % _hash

                        if url == '': raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(
                            name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)

                        sources.append({
                            'source': 'RDCACHED',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': False,
                            'cached': True
                        })
                    except:
                        pass

            return sources
        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
Exemple #40
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'article', attrs={'id': 'post-\d+'})
            posts = client.parseDOM(posts, 'h1')
            posts = zip(
                client.parseDOM(posts, 'a', ret='href'),
                (client.parseDOM(posts, 'a', attrs={'rel': 'bookmark'})))

            for item in posts:

                try:
                    name = item[1]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name, re.I)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d+E\d+|S\d+)[\.|\)|\]|\s]',
                        name, re.I)[-1].upper()

                    if not y == hdlr: raise Exception()

                    r = client.request(item[0], referer=self.base_link)
                    r = client.parseDOM(r, 'article', attrs={'id': 'post-\d+'})
                    #links = re.findall('>Single Links</b>(.+?)<p><b><span', data, re.DOTALL)
                    links = [
                        i for i in client.parseDOM(r, 'p')
                        if 'Single Links' in i
                    ]
                    links = zip(
                        client.parseDOM(links, 'a', ret='href'),
                        client.parseDOM(links, 'a', attrs={'href': '.+?'}))

                    for item in links:
                        try:
                            quality, info = source_utils.get_release_quality(
                                item[1], item[0])
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                                    r[0], re.DOTALL)[0].strip()
                                div = 1 if size.endswith(
                                    ('GB', 'GiB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            if any(x in item[0]
                                   for x in ['.rar', '.zip', '.iso']):
                                raise Exception()
                            url = client.replaceHTMLCodes(item[0])
                            url = url.encode('utf-8')

                            hostDict = hostDict + hostprDict

                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            if not valid: continue
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                        except:
                            pass

                except:
                    pass

            return sources
        except:
            return sources
Exemple #41
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            r = cache.get(self.scraper.get, 4,
                          urlparse.urljoin(self.base_link, url)).content

            if 'serie' not in url:
                links = dom_parser.parse_dom(r, 'table')
                links = [
                    i.content for i in links if dom_parser.parse_dom(
                        i,
                        'span',
                        attrs={'class': re.compile('linkSearch(-a)?')})
                ]
                links = re.compile('(<a.+?/a>)',
                                   re.DOTALL).findall(''.join(links))
                links = [
                    dom_parser.parse_dom(i, 'a', req='href') for i in links
                    if re.findall('(.+?)\s*\(\d+\)\s*<', i)
                ]
                links = [i[0].attrs['href'] for i in links if i]

                url = re.sub('/streams-\d+', '', url)
            else:
                links = [url]

            for link in links:
                if '/englisch/' in link: continue

                if link != url:
                    r = cache.get(self.scraper.get, 4,
                                  urlparse.urljoin(self.base_link,
                                                   link)).content

                detail = dom_parser.parse_dom(r,
                                              'th',
                                              attrs={'class': 'thlink'})
                detail = [
                    dom_parser.parse_dom(i, 'a', req='href') for i in detail
                ]
                detail = [(i[0].attrs['href'],
                           i[0].content.replace('&#9654;', '').strip())
                          for i in detail if i]

                if 'serie' in url:
                    detail.append((url, "x264"))

                for release in detail:
                    quality, info = source_utils.get_release_quality(
                        release[1])
                    r = client.request(
                        urlparse.urljoin(self.base_link, release[0]))

                    r = dom_parser.parse_dom(r, 'table')
                    r = [
                        dom_parser.parse_dom(i, 'a', req=['href', 'title'])
                        for i in r if not dom_parser.parse_dom(i, 'table')
                    ]
                    r = [(l.attrs['href'], l.attrs['title']) for i in r
                         for l in i if l.attrs['title']]

                    info = ' | '.join(info)
                    info += " Recaptcha"

                    for stream_link, hoster in r:
                        valid, hoster = source_utils.is_host_valid(
                            hoster, hostDict)
                        if not valid: continue

                        direct = False

                        if hoster.lower() == 'gvideo':
                            direct = True

                        sources.append({
                            'source': hoster,
                            'quality': quality,
                            'language': 'de',
                            'url': stream_link,
                            'info': info,
                            'direct': direct,
                            'debridonly': False,
                            'checkquality': True,
                            'captcha': True
                        })

            return sources
        except:
            source_faultlog.logFault(__name__, source_faultlog.tagScrape, url)
            return sources
    def searchShowPack(self, title, season, episode, query, category):
        try:
            sources = []

            se_ep = season + episode
            url = self.search_link % (urllib.quote_plus(query), category)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            result = client.parseDOM(r, 'table', attrs={'class': 'table.+?'})[0]
            result = re.sub('(?s)<thead>.+?<\/thead>', '', result)
            result = client.parseDOM(result, 'tr')
            result = [(client.parseDOM(i, 'a', attrs={'title': 'Magnet link'}, ret='href')[0], client.parseDOM(i, 'a')[0], i) for i in result]
            result = [(i[0], re.sub('<.+?>', '', i[1]), i[2]) for i in result]

            items = []

            for item in result:
                try:
                    name = item[1]
                    magnetlink = item[0]

                    size = ''
                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[0]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                    except:
                        pass

                    t = re.sub('(\.|\(|\[|\s)((?:S|s)\d+)(\.|\)|\]|\s|)(.+|)', '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
                    y = re.findall('[\.|\(|\[|\s]((?:S|s)\d*)[\.|\)|\]|\s]', name)[-1].upper()
                    if not y.lower() == season.lower(): raise Exception()
                    if not size == '':
                        u = [(name, magnetlink, size)]
                    else:
                        u = [(name, magnetlink)]
                    items += u
                except:
                    pass

            for item in items:
                try:
                    _hash = re.findall('btih:(.*?)\W', item[1])[0]
                    checkurl = urlparse.urljoin(self.pm_base_link, self.pm_checkcache_link % (self.pm_api_key, _hash, self.pm_api_key))
                    r = client.request(checkurl)
                    if not 'finished' in r: raise Exception()

                    name = client.replaceHTMLCodes(item[0])
                    quality, info = source_utils.get_release_quality(name, None)
                    filetype = source_utils.getFileType(name)
                    info += [filetype.strip(), name]
                    info = filter(None, info)
                    info = ' | '.join(info)

                    season_url = urlparse.urljoin(self.pm_base_link, self.pm_dl_link % (self.pm_api_key, _hash))
                    r = client.request(season_url)
                    streamitems = json.loads(r)
                    if not streamitems['status'] == 'success': raise Exception()
                    streamitems = streamitems['content']
                    streamitems = [i for i in streamitems if not i['stream_link'] == False]
                    streamitems = [(i['link'], i['size']) for i in streamitems if se_ep.lower() in i['link'].rsplit('/')[-1].lower()]
                    streamitems = sorted(streamitems, key=lambda x: int(x[1]), reverse = True)
                    url = streamitems[0][0]

                    size = ''
                    try:
                        size = streamitems[0][1]
                        size = float(size)/1073741824
                        size = '%.2f GB' % size
                    except:
                        pass
                    try: info = '%s (%s) | %s' % (size, item[2], info)
                    except: pass

                    sources.append({'source': 'PMCACHED', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False, 'cached': True})
                except:
                    pass

            return sources
        except:
            return sources
Exemple #43
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            items = []

            for post in posts:

                try:
                    t = client.parseDOM(post, 'title')[0]
                    t2 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)

                    if not cleantitle.get_simple(t2.replace('Watch Online','')) == cleantitle.get(title): raise Exception()

                    l = client.parseDOM(post, 'link')[0]
 
                    p = client.parseDOM(post, 'pubDate')[0]

                    if data['year'] in p: items += [(t, l)]

                except:
                    pass

            print items
            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)
                    
                    u = client.request(item[1])
                    if 'http://www.imdb.com/title/%s/' % data['imdb'] in u:
                        
                        l = client.parseDOM(u, 'div', {'class': 'movieplay'})[0]
                        l = client.parseDOM(u, 'iframe', ret='data-lazy-src')[0]

                        quality, info = source_utils.get_release_quality(name, l)
                        info = ' | '.join(info)

                        url = l

                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(url,hostDict)
                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
                                        'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Exemple #44
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = url
            imdb = data['imdb']

            try:
                query = urlparse.urljoin(self.base_link, self.search_link)
                result = requests.get(query).text
                m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
                for i in m:
                    print(i)
                m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                if m:
                    link = m
                else:
                    query = urlparse.urljoin(self.base_link, self.search_link2)
                    result = requests.get(query).text
                    m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
                    m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                    if m:
                        link = m
                    else:
                        query = urlparse.urljoin(self.base_link, self.search_link3)
                        result = requests.get(query).text
                        m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
                        m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                        if m: link = m

            except:
                traceback.print_exc()
                return

            for item in link:
                try:

                    quality, info = source_utils.get_release_quality(item[2], None)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[0])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        traceback.print_exc()
                        pass

                    info = ' | '.join(info)

                    url = item[2]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': True, 'debridonly': False})
                except:
                    traceback.print_exc()
                    pass

            return sources
        except:
            traceback.print_exc()
            return sources
Exemple #45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = url
            imdb = data['imdb']

            try:
                query = urlparse.urljoin(self.base_link, self.search_link)
                result = requests.get(query).text
                m = re.findall(
                    'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                    result, re.DOTALL)
                for i in m:
                    print(i)
                m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                if m:
                    link = m
                else:
                    query = urlparse.urljoin(self.base_link, self.search_link2)
                    result = requests.get(query).text
                    m = re.findall(
                        'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                        result, re.DOTALL)
                    m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                    if m:
                        link = m
                    else:
                        query = urlparse.urljoin(self.base_link,
                                                 self.search_link3)
                        result = requests.get(query).text
                        m = re.findall(
                            'Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse',
                            result, re.DOTALL)
                        m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                        if m: link = m

            except:
                traceback.print_exc()
                return

            for item in link:
                try:

                    quality, info = source_utils.get_release_quality(
                        item[2], None)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[0])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        traceback.print_exc()
                        pass

                    info = ' | '.join(info)

                    url = item[2]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': 'DL',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': True,
                        'debridonly': False
                    })
                except:
                    traceback.print_exc()
                    pass

            return sources
        except:
            traceback.print_exc()
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            hostDict = hostprDict + hostDict

            items = []
            urls = []
            posts = []
            links = []

            url = urlparse.urljoin(self.base_link,
                                   self.search_link % data['imdb'])
            r = client.request(url)
            if 'CLcBGAs/s1600/1.jpg' in r:
                url = client.parseDOM(r, 'a', ret='href')[0]
                self.base_link = url = urlparse.urljoin(
                    url, self.search_link % data['imdb'])
                r = client.request(url)
            posts = client.parseDOM(r, 'article')
            if not posts:
                if 'tvshowtitle' in data:
                    url = urlparse.urljoin(
                        self.base_link, self.search_link %
                        (cleantitle.geturl(title).replace('-', '+') + '+' +
                         hdlr))
                    r = client.request(url,
                                       headers={'User-Agent': client.agent()})
                    posts += client.parseDOM(r, 'article')
                    url = urlparse.urljoin(
                        self.base_link, self.search_link %
                        cleantitle.geturl(title).replace('-', '+'))
                    r = client.request(url,
                                       headers={'User-Agent': client.agent()})
                    posts += client.parseDOM(r, 'article')

            if not posts: return sources
            for post in posts:
                try:
                    t = client.parseDOM(post, 'img', ret='title')[0]
                    u = client.parseDOM(post, 'a', ret='href')[0]
                    s = re.search(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)
                    s = s.groups()[0] if s else '0'
                    items += [(t, u, s, post)]
                except:
                    pass
            items = set(items)
            items = [
                i for i in items
                if cleantitle.get(title) in cleantitle.get(i[0])
            ]

            for item in items:
                name = item[0]
                u = client.request(item[1])
                if 'tvshowtitle' in data:
                    if hdlr.lower() not in name.lower():
                        pattern = '''<p>\s*%s\s*<\/p>(.+?)<\/ul>''' % hdlr.lower(
                        )
                        r = re.search(pattern, u, flags=re.I | re.S)
                        if not r: continue
                        links = client.parseDOM(r.groups()[0], 'a', ret='href')
                    else:
                        links = client.parseDOM(u, 'a', ret='href')
                else:
                    links = client.parseDOM(u, 'a', ret='href')
                for url in links:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    info = []
                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[0]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            items = []

            for post in posts:

                try:
                    t = client.parseDOM(post, 'title')[0]
                    t2 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)

                    if not cleantitle.get_simple(t2.replace('Watch Online','')) == cleantitle.get(title): raise Exception()

                    l = client.parseDOM(post, 'link')[0]
 
                    p = client.parseDOM(post, 'pubDate')[0]

                    if data['year'] in p: items += [(t, l)]

                except:
                    pass

            print items
            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)
                    
                    u = client.request(item[1])
                    if 'http://www.imdb.com/title/%s/' % data['imdb'] in u:
                        
                        l = client.parseDOM(u, 'div', {'class': 'movieplay'})[0]
                        l = client.parseDOM(u, 'iframe', ret='data-lazy-src')[0]

                        quality, info = source_utils.get_release_quality(name, l)
                        info = ' | '.join(info)

                        url = l

                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(url,hostDict)
                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
                                        'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Exemple #48
0
    def sources(self, url, hostDict, hostprDict):

        api_key = self.get_api()

        if not api_key:
            return

        sources = []

        try:

            content_type = 'episode' if 'tvshowtitle' in url else 'movie'
            match = 'extended'
            moderated = 'no' if content_type == 'episode' else 'yes'
            search_in = ''

            if content_type == 'movie':
                title = url['title'].replace(':', ' ').replace(' ',
                                                               '+').replace(
                                                                   '&', 'and')
                title = title.replace("'", "")
                year = url['year']
                link = '@name+%s+%s+@files+%s+%s' \
                        % (title, year, title, year)

            elif content_type == 'episode':
                title = url['tvshowtitle'].replace(':', ' ').replace(
                    ' ', '+').replace('&', 'and')
                season = int(url['season'])
                episode = int(url['episode'])
                season00_ep00_SE = 's%02de%02d' % (season, episode)
                season0_ep0_SE = 's%de%d' % (season, episode)
                season00_ep00_X = '%02dx%02d' % (season, episode)
                season0_ep0_X = '%dx%d' % (season, episode)
                season0_ep00_X = '%dx%02d' % (season, episode)
                link = '@name+%s+@files+%s+|+%s+|+%s+|+%s+|+%s' \
                        % (title, season00_ep00_SE, season0_ep0_SE, season00_ep00_X, season0_ep0_X, season0_ep00_X)

            s = requests.Session()
            link = (
                self.base_link + self.meta_search_link %
                (api_key, link, match, moderated, search_in, self.search_limit)
            )

            p = s.get(link)
            p = json.loads(p.text)

            if p['status'] != 'ok':
                return

            files = p['files']

            for i in files:
                if i['is_ready'] == '1' and i['type'] == 'video':
                    try:
                        source = 'SINGLE'
                        if int(i['files_num_video']) > 3:
                            source = 'PACK [B](x%02d)[/B]' % int(
                                i['files_num_video'])
                        file_name = i['name']
                        file_id = i['id']
                        file_dl = i['url_dl']
                        if content_type == 'episode':
                            url = '%s<>%s<>%s<>%s<>%s<>%s' % (
                                file_id, season00_ep00_SE, season0_ep0_SE,
                                season00_ep00_X, season0_ep0_X, season0_ep00_X)
                            details = self.details(file_name, i['size'],
                                                   i['video_info'])
                        else:
                            url = '%s<>%s<>%s+%s' % (file_id, 'movie', title,
                                                     year)
                            details = self.details(file_name, i['size'],
                                                   i['video_info']).split('|')
                            details = details[0] + ' | ' + file_name.replace(
                                '.', ' ')

                        quality = source_utils.get_release_quality(
                            file_name, file_dl)
                        sources.append({
                            'source': source,
                            'quality': quality[0],
                            'language': "en",
                            'url': url,
                            'info': details,
                            'direct': True,
                            'debridonly': False
                        })
                    except:
                        pass

                else:
                    continue

            return sources

        except:
            print("Unexpected error in Furk Script: source", sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            pass
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not control.setting('pmcached.providers') == 'true' and not control.setting('rdcached.providers') == 'true': return sources
            if self.pm_api_key == '' and self.rd_api_key == '': return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            category = 'TV' if 'tvshowtitle' in data else 'Movies'

            if 'tvshowtitle' in data and control.setting('pmcached.providers') == 'true' and not self.pm_api_key == '':
                season = 'S%02d' % (int(data['season']))
                episode = 'E%02d' % (int(data['episode']))
                seasonquery = '%s S%02d' % (data['tvshowtitle'], int(data['season']))
                seasonquery = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', seasonquery)
                sources += self.searchShowPack(title, season, episode, seasonquery, category)

            url = self.search_link % (urllib.quote_plus(query), category)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            result = client.parseDOM(r, 'table', attrs={'class': 'table.+?'})[0]
            result = re.sub('(?s)<thead>.+?<\/thead>', '', result)
            result = client.parseDOM(result, 'tr')
            result = [(client.parseDOM(i, 'a', attrs={'title': 'Magnet link'}, ret='href')[0], client.parseDOM(i, 'a')[0], i) for i in result]
            result = [(i[0], re.sub('<.+?>', '', i[1]), i[2]) for i in result]

            items = []
            
            for item in result:
                try:
                    name = item[1]
                    magnetlink = item[0]

                    size = ''
                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[0]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                    except:
                        pass

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)', '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
                    y = re.findall('[\.|\(|\[|\s](\d{4}|(?:S|s)\d*(?:E|e)\d*|(?:S|s)\d*)[\.|\)|\]|\s]', name)[-1].upper()
                    if not y == hdlr: raise Exception()

                    u = [(name, magnetlink, size)]
                    items += u
                except:
                    pass

            if control.setting('pmcached.providers') == 'true' and not self.pm_api_key == '':
                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        checkurl = urlparse.urljoin(self.pm_base_link, self.pm_checkcache_link % (self.pm_api_key, _hash, self.pm_api_key))
                        r = client.request(checkurl)
                        if not 'finished' in r: raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)
                        url = 'magnet:?xt=urn:btih:%s' % _hash

                        sources.append({'source': 'PMCACHED', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False, 'cached': True})
                    except:
                        pass

            if control.setting('rdcached.providers') == 'true' and not self.rd_api_key == '':
                checktorr_r = self.checkrdcache()
                checktorr_result = json.loads(checktorr_r)

                for item in items:
                    try:
                        _hash = re.findall('btih:(.*?)\W', item[1])[0]
                        _hash = _hash.lower()

                        url = ''
                        for i in checktorr_result:
                            try:
                                if _hash == i['hash'] and i['status'] == 'downloaded':
                                    url = i['links'][0]
                                    break
                            except:
                                pass

                        if url == '':
                            checkurl = urlparse.urljoin(self.rd_base_link, self.rd_checkcache_link % (_hash, self.rd_api_key))
                            r = client.request(checkurl)
                            checkinstant = json.loads(r)
                            checkinstant = checkinstant[_hash]

                            checkinstant_num = 0
                            try:
                                checkinstant_num = len(checkinstant['rd'])
                            except:
                                pass

                            if checkinstant_num == 0: raise Exception()
                            url = 'rdmagnet:?xt=urn:btih:%s' % _hash

                        if url == '': raise Exception()

                        name = client.replaceHTMLCodes(item[0])
                        quality, info = source_utils.get_release_quality(name, None)
                        filetype = source_utils.getFileType(name)
                        info += [filetype.strip(), name]
                        info = filter(None, info)
                        info = ' | '.join(info)
                        if not item[2] == '':
                            info = '%s | %s' % (item[2], info)

                        sources.append({'source': 'RDCACHED', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False, 'cached': True})
                    except:
                        pass

            return sources
        except:
            log_utils.log('>>>> %s TRACE <<<<\n%s' % (__file__.upper().split('\\')[-1].split('.')[0], traceback.format_exc()), log_utils.LOGDEBUG)
            return sources
Exemple #50
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'Module'})
            r = [(r, dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')}, req='href'))]
            r = [(i[0], i[1][0].attrs['href'] if i[1] else '') for i in r]

            rels = dom_parser.parse_dom(r[0][0], 'a', attrs={'href': re.compile('[^\'"]*ReleaseList[^\'"]*')}, req='href')
            if rels and len(rels) > 1:
                r = []
                for rel in rels:
                    relData = client.request(urlparse.urljoin(self.base_link, rel.attrs['href']))
                    relData = dom_parser.parse_dom(relData, 'table', attrs={'class': 'release-list'})
                    relData = dom_parser.parse_dom(relData, 'tr', attrs={'class': 'row'})
                    relData = [(dom_parser.parse_dom(i, 'td', attrs={'class': re.compile('[^\'"]*list-name[^\'"]*')}),
                                dom_parser.parse_dom(i, 'img', attrs={'class': 'countryflag'}, req='alt'),
                                dom_parser.parse_dom(i, 'td', attrs={'class': 'release-types'})) for i in relData]
                    relData = [(i[0][0].content, i[1][0].attrs['alt'].lower(), i[2][0].content) for i in relData if i[0] and i[1] and i[2]]
                    relData = [(i[0], i[2]) for i in relData if i[1] == 'deutsch']
                    relData = [(i[0], dom_parser.parse_dom(i[1], 'img', attrs={'class': 'release-type-stream'})) for i in relData]
                    relData = [i[0] for i in relData if i[1]]
                    #relData = dom_parser.parse_dom(relData, 'a', req='href')[:3]
                    relData = dom_parser.parse_dom(relData, 'a', req='href')

                    for i in relData:
                        i = client.request(urlparse.urljoin(self.base_link, i.attrs['href']))
                        i = dom_parser.parse_dom(i, 'div', attrs={'id': 'Module'})
                        i = [(i, dom_parser.parse_dom(i, 'a', attrs={'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')}, req='href'))]
                        r += [(x[0], x[1][0].attrs['href'] if x[1] else '') for x in i]

            r = [(dom_parser.parse_dom(i[0], 'div', attrs={'id': 'ModuleReleaseDownloads'}), i[1]) for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a', attrs={'class': re.compile('.*-stream.*')}, req='href'), i[1]) for i in r if len(i[0]) > 0]

            for items, rel in r:
                rel = urlparse.urlparse(rel).query
                rel = urlparse.parse_qs(rel)['xrel_search_query'][0]

                quality, info = source_utils.get_release_quality(rel)

                items = [(i.attrs['href'], i.content) for i in items]
                items = [(i[0], dom_parser.parse_dom(i[1], 'img', req='src')) for i in items]
                items = [(i[0], i[1][0].attrs['src']) for i in items if i[1]]
                items = [(i[0], re.findall('.+/(.+\.\w+)\.\w+', i[1])) for i in items]
                items = [(i[0], i[1][0]) for i in items if i[1]]

                info = ' | '.join(info)

                for link, hoster in items:
                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
Exemple #51
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    u = re.findall('>Single Link(.+?)p>\s*<span', c.replace('\n', ''))[0]

                    u = client.parseDOM(u, 'a', ret='href')

                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', c)
                    s = s[0] if s else '0'

                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()
                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url,hostDict)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Exemple #52
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = urlparse.urljoin(
                self.base_link,
                self.search_link.format(query[0].lower(),
                                        cleantitle.geturl(query)))
            r = client.request(url)
            r = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(r, 'tr')
            posts = [i for i in posts if 'magnet:' in i]
            for post in posts:
                post = post.replace('&nbsp;', ' ')
                name = client.parseDOM(post, 'a', ret='title')[1]
                t = name.split(hdlr)[0]
                if not cleantitle.get(re.sub('(|)', '',
                                             t)) == cleantitle.get(title):
                    continue
                try:
                    y = re.findall(
                        '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except:
                    y = re.findall(
                        '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == hdlr:
                    continue
                links = client.parseDOM(post, 'a', ret='href')
                magnet = [
                    i.replace('&amp;', '&') for i in links if 'magnet:' in i
                ][0]
                url = magnet.split('&tr')[0]
                quality, info = source_utils.get_release_quality(name, name)
                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(
                        re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                                '.'))) / div
                    size = '%.2f GB' % size
                except:
                    size = '0'
                info.append(size)
                info = ' | '.join(info)
                if control.setting('torrent.rd_check') == 'true':
                    checked = rd_check.rd_cache_check(url)
                    if not checked:
                        continue
                    sources.append({
                        'source': 'Cached Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': checked,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                else:
                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Magnetdl Testing - Exception: \n' + str(failure))
            return sources
Exemple #53
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            if not (self.api and not self.api == ''):
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            year = int(data['year']) if 'year' in data and not data['year'] == None else None
            season = int(data['season']) if 'season' in data and not data['season'] == None else None
            episode = int(data['episode']) if 'episode' in data and not data['episode'] == None else None
            query = '%s S%02dE%02d' % (title, season, episode) if 'tvshowtitle' in data else '%s %d' % (title, year)

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query += ' lang:%s' % self.language[0]
            query = urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, self.search_link)

            hostDict = hostprDict + hostDict

            iterations = self.streamLimit/self.streamIncrease
            last = self.streamLimit - (iterations * self.streamIncrease)
            if not last:
                iterations = iterations - 1
                last = self.streamIncrease
            iterations = iterations + 1

            seen_urls = set()
            for type in self.types:
                searchFrom = 0
                searchCount = self.streamIncrease
                for offset in range(iterations):
                    if iterations == offset + 1: searchCount = last
                    urlNew = url % (type, self.api, query, searchCount, searchFrom)
                    searchFrom = searchFrom + self.streamIncrease

                    results = client.request(urlNew)
                    results = json.loads(results)

                    apistatus  = results['status']
                    if apistatus != 'success': break

                    results = results['result']

                    added = False
                    for result in results:
                        jsonName = result['title']
                        jsonSize = result['sizeinternal']
                        jsonExtension = result['extension']
                        jsonLanguage = result['lang']
                        jsonHoster = result['hostername'].lower()
                        jsonLink = result['hosterurls'][0]['url']
                                                    
                        if jsonLink in seen_urls: continue
                        seen_urls.add(jsonLink)

                        if not hdlr in jsonName.upper(): continue
                                                
                        if not self.releaseValid(title, jsonName): continue # filter non en releases

                        if not jsonHoster in hostDict: continue

                        if jsonExtension == 'rar': continue

                        quality, info = source_utils.get_release_quality(jsonName)
                        info.append(self.formatSize(jsonSize))
                        info.append(jsonName)
                        info = '|'.join(info)

                        sources.append({'source' : jsonHoster, 'quality':  quality, 'language' : jsonLanguage, 'url' : jsonLink, 'info': info, 'direct' : False, 'debridonly' : False})
                        added = True

                    if not added:
                        break

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not debrid.status(): raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'h2')
            r = [re.findall('''<a.+?href=["']([^"']+)["']>(.+?)</a>''', i, re.DOTALL) for i in r]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(urlparse.urljoin(self.base_link, item[0][0]))
                    data = dom_parser2.parse_dom(data, 'a', attrs={'target': '_blank'})
                    u = [(t, i.content) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(name, item[1])

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[1]
                    if not url.startswith('http'):continue
                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Exemple #55
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() == False: raise Exception()
         hostDict = hostprDict + hostDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = '%sS%02dE%02d' % (
             data['year'], int(data['season']), int(data['episode'])
         ) if 'tvshowtitle' in data else data['year']
         query = '%s %s S%02dE%02d' % (
             data['tvshowtitle'], data['year'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         try:
             url = self.search_link % urllib.quote_plus(query)
             url = urlparse.urljoin(self.base_link, url)
             r = self.scraper.get(url).content
             posts = client.parseDOM(r, 'div', attrs={'class': 'post'})
             items = []
             dupes = []
             for post in posts:
                 try:
                     t = client.parseDOM(post, 'a')[0]
                     t = re.sub('<.+?>|</.+?>', '', t)
                     x = re.sub(
                         '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                         '', t)
                     if not cleantitle.get(title) in cleantitle.get(x):
                         raise Exception()
                     y = re.findall(
                         '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                         t)[-1].upper()
                     if not y == hdlr: raise Exception()
                     fmt = re.sub(
                         '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                         '', t.upper())
                     fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                     fmt = [i.lower() for i in fmt]
                     #if not any(i in ['1080p', '720p'] for i in fmt): raise Exception()
                     if len(dupes) > 2: raise Exception()
                     dupes += [x]
                     u = client.parseDOM(post, 'a', ret='href')[0]
                     r = self.scraper.get(u).content
                     u = client.parseDOM(r, 'a', ret='href')
                     u = [(i.strip('/').split('/')[-1], i) for i in u]
                     items += u
                 except:
                     pass
         except:
             pass
         for item in items:
             try:
                 name = item[0]
                 name = client.replaceHTMLCodes(name)
                 t = re.sub(
                     '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                     '', name)
                 if not cleantitle.get(t) == cleantitle.get(title):
                     raise Exception()
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr: raise Exception()
                 quality, info = source_utils.get_release_quality(
                     name, item[1])
                 url = item[1]
                 if any(x in url for x in ['.rar', '.zip', '.iso']):
                     raise Exception()
                 url = client.replaceHTMLCodes(url)
                 url = url.encode('utf-8')
                 host = re.findall(
                     '([\w]+[.][\w]+)$',
                     urlparse.urlparse(url.strip().lower()).netloc)[0]
                 if not host in hostDict: raise Exception()
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except:
                 pass
         return sources
     except:
         return
Exemple #56
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            try:
                r = client.request(url)
                posts = client.parseDOM(r, 'tr')
                for post in posts:
                    links = re.findall('<a href="(/torrent/.+?)">(.+?)<', post,
                                       re.DOTALL)
                    for link, data in links:
                        link = urlparse.urljoin(self.base_link, link)
                        link = client.request(link)
                        link = re.findall(
                            'a class=".+?" rel=".+?" href="(magnet:.+?)"',
                            link, re.DOTALL)
                        try:
                            size = re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                                post)[0]
                            div = 1 if size.endswith('GB') else 1024
                            size = float(
                                re.sub('[^0-9|/.|/,]', '',
                                       size.replace(',', '.'))) / div
                            size = '%.2f GB' % size
                        except BaseException:
                            size = '0'
                        for url in link:
                            if hdlr not in url:
                                continue
                            url = url.split('&tr')[0]
                            quality, info = source_utils.get_release_quality(
                                data)
                            if any(x in url for x in [
                                    'Tamil', 'FRENCH', 'Ita', 'italian',
                                    'TRUEFRENCH', '-lat-', 'Dublado'
                            ]):
                                continue
                            info.append(size)
                            info = ' | '.join(info)
                            sources.append({
                                'source': 'Torrent',
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
            except:
                return
            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = '%sS%02dE%02d' % (data['year'], int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s S%02dE%02d' % (
                data['tvshowtitle'],
                data['year'],
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'],
                data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            try:
                url = self.search_link % urllib.quote_plus(query)
                url = urlparse.urljoin(self.base_link, url)

                r = self.scraper.get(url).content

                posts = client.parseDOM(r, 'div', attrs={'class': 'post'})

                items = []
                dupes = []

                for post in posts:
                    try:
                        t = client.parseDOM(post, 'a')[0]
                        t = re.sub('<.+?>|</.+?>', '', t)

                        x = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)
                        if not cleantitle.get(title) in cleantitle.get(x):
                            raise Exception()
                        y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', t)[-1].upper()
                        if not y == hdlr:
                            raise Exception()

                        fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', t.upper())
                        fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                        fmt = [i.lower() for i in fmt]
                        # if not any(i in ['1080p', '720p'] for i in fmt): raise Exception()

                        if len(dupes) > 2:
                            raise Exception()
                        dupes += [x]

                        u = client.parseDOM(post, 'a', ret='href')[0]

                        r = self.scraper.get(u).content
                        u = client.parseDOM(r, 'a', ret='href')
                        u = [(i.strip('/').split('/')[-1], i) for i in u]
                        items += u
                    except Exception:
                        pass
            except Exception:
                pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr:
                        raise Exception()

                    quality, info = source_utils.get_release_quality(name, item[1])

                    url = item[1]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en',
                                    'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except Exception:
                    pass

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('SceneRls - Exception: \n' + str(failure))
            return
Exemple #58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            html = client.request(url)
            try:
                iframe = client.parseDOM(
                    html,
                    'iframe',
                    attrs={'class': 'embed-responsive-item'},
                    ret='src')[0]
                host = iframe.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'en',
                    'url': iframe,
                    'direct': False,
                    'debridonly': False
                })
            except:
                flashvar = client.parseDOM(html,
                                           'param',
                                           attrs={'name': 'flashvars'},
                                           ret='value')[0]
                link = flashvar.split('file=')[1]
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'en',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            containers = client.parseDOM(html,
                                         'div',
                                         attrs={'class': 'dwn-box'})

            for list in containers:
                link = client.parseDOM(list,
                                       'a',
                                       attrs={'rel': 'nofollow'},
                                       ret='href')[0]
                redirect = client.request(link, output='geturl')
                quality, info = source_utils.get_release_quality(redirect)
                sources.append({
                    'source': 'DirectLink',
                    'quality': quality,
                    'language': 'en',
                    'url': redirect,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('CoolTV - Exception: \n' + str(failure))
            return
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            if not (self.api and not self.api == ''):
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = int(
                data['year']
            ) if 'year' in data and not data['year'] == None else None
            season = int(
                data['season']
            ) if 'season' in data and not data['season'] == None else None
            episode = int(
                data['episode']
            ) if 'episode' in data and not data['episode'] == None else None
            query = '%s S%02dE%02d' % (
                title, season,
                episode) if 'tvshowtitle' in data else '%s %d' % (title, year)

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query += ' lang:%s' % self.language[0]
            query = urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, self.search_link)

            hostDict = hostprDict + hostDict

            iterations = self.streamLimit / self.streamIncrease
            last = self.streamLimit - (iterations * self.streamIncrease)
            if not last:
                iterations = iterations - 1
                last = self.streamIncrease
            iterations = iterations + 1

            seen_urls = set()
            for type in self.types:
                searchFrom = 0
                searchCount = self.streamIncrease
                for offset in range(iterations):
                    if iterations == offset + 1: searchCount = last
                    urlNew = url % (type, self.api, query, searchCount,
                                    searchFrom)
                    searchFrom = searchFrom + self.streamIncrease

                    results = client.request(urlNew)
                    results = json.loads(results)

                    apistatus = results['status']
                    if apistatus != 'success': break

                    results = results['result']

                    added = False
                    for result in results:
                        jsonName = result['title']
                        jsonSize = result['sizeinternal']
                        jsonExtension = result['extension']
                        jsonLanguage = result['lang']
                        jsonHoster = result['hostername'].lower()
                        jsonLink = result['hosterurls'][0]['url']

                        if jsonLink in seen_urls: continue
                        seen_urls.add(jsonLink)

                        if not jsonHoster in hostDict: continue

                        if not self.extensionValid(jsonExtension): continue

                        quality, info = source_utils.get_release_quality(
                            jsonName)
                        info.append(self.formatSize(jsonSize))
                        info.append(jsonName)
                        info = '|'.join(info)

                        sources.append({
                            'source': jsonHoster,
                            'quality': quality,
                            'language': jsonLanguage,
                            'url': jsonLink,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                        added = True

                    if not added:
                        break

            return sources
        except:
            return sources
Exemple #60
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']

            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post, 'a', req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
                    if not y == hdlr: raise Exception()

                    items += [(link, qual)]

                except :
                    pass
            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith('http') else client.request(urlparse.urljoin(self.base_link, item[0]))

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"];''', r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin('https://', url)

                    if 'vidlink' in url:
                        ua = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1'}
                        html = client.request(url, headers=ua)
                        postID = re.findall("postID\s*=\s*'([^']+)", html)[0]
                        data = {'postID': postID}

                        rid = client.request('https://vidlink.org/embed/update_views', post=data, headers=ua,
                                             referer=url)
                        from resources.lib.modules import jsunpack
                        rid = jsunpack.unpack(rid)
                        playlist = re.findall('''file1=['"](.+?)['"];''', rid)[0]
                        links = client.request(playlist, headers=ua, referer=url)

                        try:
                            sub = re.findall('''URI="/sub/vtt/(\d+)/sub.m3u8",LANGUAGE="el"''', links)[0]
                        except IndexError:
                            sub = re.findall('''URI="/sub/vtt/(\d+)/sub.m3u8",LANGUAGE="en"''', links)[0]
                        sub = 'https://opensubtitles.co/sub/{0}.vtt'.format(sub)

                        pattern = 'RESOLUTION=\d+x(\d{3,4}),SUBTITLES="subs"\s*(/drive.+?.m3u8)'
                        links = re.findall(pattern, links)
                        for quality, link in links:
                            quality = source_utils.get_release_quality(quality, quality)[0]
                            link = 'https://p2p.vidlink.org/' + link.replace('/drive//hls/', 'drive/hls/')
                            sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': link,
                                            'sub': sub, 'direct': True, 'debridonly': False})

                except :
                    pass

            return sources
        except :
            return sources