Beispiel #1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         r = cfScraper.get(url).content
         qual = re.compile('class="quality">(.+?)</span>').findall(r)
         for i in qual:
             info = i
             if '1080' in i:
                 quality = '1080p'
             elif '720' in i:
                 quality = '720p'
             else:
                 quality = 'SD'
         u = re.compile('data-video="(.+?)"').findall(r)
         for url in u:
             if not url.startswith('http'):
                 url =  "https:" + url
             if 'vidcloud' in url:
                 r = cfScraper.get(url).content
                 t = re.compile('data-video="(.+?)"').findall(r)
                 for url in t:
                     if not url.startswith('http'):
                         url =  "https:" + url
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid and 'vidcloud' not in url:
                         sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False})
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': url, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Beispiel #2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         hostDict = hostDict + hostprDict
         r = cfScraper.get(url).content
         match = re.compile(
             '<a href="http://www.tvmovieflix.com/report-.+?/(.+?)" target="_blank"><span class="a">Report Broken</span></a></li>',
             re.DOTALL | re.M).findall(r)
         for link in match:
             if "/show/" in url:
                 surl = "http://www.tvmovieflix.com/e/" + link
             else:
                 surl = "http://www.tvmovieflix.com/m/" + link
             i = cfScraper.get(surl).content
             match = re.compile('<IFRAME.+?SRC="(.+?)"',
                                re.DOTALL | re.IGNORECASE).findall(i)
             for link in match:
                 if "realtalksociety.com" in link:
                     r = requests.get(link).content
                     match = re.compile(
                         '<source src="(.+?)" type="video/mp4">',
                         re.DOTALL | re.IGNORECASE).findall(r)
                     for url in match:
                         valid, host = source_utils.is_host_valid(
                             url, hostDict)
                         quality, info = source_utils.get_release_quality(
                             url, url)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': True,
                             'debridonly': False
                         })
                 else:
                     valid, host = source_utils.is_host_valid(
                         link, hostDict)
                     quality, info = source_utils.get_release_quality(
                         link, link)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': link,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Beispiel #3
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict = hostDict + hostprDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                 self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
             # r = client.request(url, headers=headers, timeout='10', output='geturl')
             r = cfScraper.get(url).content
             if url is None:
                 url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases, headers)
             if url is None:
                 url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
         if url is None:
             raise Exception()
         # r = client.request(url, headers=headers, timeout='10')
         r = cfScraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             if '123movieshd' in link or 'seriesonline' in link:
                 # r = client.request(link, headers=headers, timeout='10')
                 r = cfScraper.get(link).content
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
             else:
                 valid, host = source_utils.is_host_valid(link, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(link, link)
                     if 'load.php' not in link:
                         sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Beispiel #4
0
    def _get_items(self, url):
        try:
            r = cfScraper.get(url).content
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urlparse.urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title):
                    continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr:
                    continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    dsize, isize = utils._size(size)
                except BaseException:
                    dsize, isize = 0, ''

                self.items.append((name, link, isize, dsize))
            return self.items
        except BaseException:
            return self.items
Beispiel #5
0
 def resolve(self, url):
     if 'vidcloud' in url:
         #r = client.request(url)
         r = cfScraper.get(url).content
         url = re.compile('(?:file|source)(?:\:)\s*(?:\"|\')(.+?)(?:\"|\')'
                          ).findall(r)[0]
     return url
Beispiel #6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources
            hostDict = hostprDict + hostDict
            # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
            r = cfScraper.get(url).content
            qual = re.compile('<span class="calidad2">(.+?)</span>').findall(r)
            for qcheck in qual:
                quality, info = source_utils.get_release_quality(
                    qcheck, qcheck)

            links = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(r)

            for link in links:

                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid:
                    continue
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Movie4kis - Exception: \n' + str(failure))
            return sources
Beispiel #7
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            hostDict = hostprDict + hostDict
            if url == None: return sources

            r = cfScraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],
                       'data-name': i.attrs['data-name']}
                url = urllib.urlencode(url)
                valid, host = source_utils.is_host_valid(i.content, hostDict)
                if valid:
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return
Beispiel #8
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = cfScraper.get(url).content
         try:
             data = re.compile(
                 "callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)").findall(
                     r)
             for http, host, url in data:
                 url = '%s://%s/%s' % (http, host, url)
                 valid, host = source_utils.is_host_valid(host, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             pass
         return sources
     except Exception:
         return
Beispiel #9
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(
                self.base_link, self.search_link % cleantitle.geturl(search))
            # r = client.request(url, headers=headers, timeout='10')
            r = cfScraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
                 for i in r]
            r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if self.matchAlias(i[2][0], aliases) and i[2][1] == season
            ][0]
            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('series94 - Exception: \n' + str(failure))
            return
Beispiel #10
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link,
                                   self.search_link % cleantitle.geturl(title))
            # r = client.request(url, headers=headers, timeout='10')
            r = cfScraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='oldtitle'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass
            if url is None:
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]
            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('series95 - Exception: \n' + str(failure))
            return
Beispiel #11
0
    def _get_sources(self, item):
        try:
            name = item[0]
            quality, info = source_utils.get_release_quality(name, item[1])
            info.insert(0, item[2])
            data = cfScraper.get(item[1]).content
            data = ensure_text(data, errors='replace')
            data = client.parseDOM(data, 'a', ret='href')
            url = [i for i in data if 'magnet:' in i][0]
            url = url.split('&tr')[0]
            info = ' | '.join(info)

            self._sources.append({
                'source': 'Torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': item[3],
                'name': name
            })
        except:
            log_utils.log('1337x_exc1', 1)
            pass
Beispiel #12
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = cfScraper.get(url).content
         try:
             data = re.compile(
                 "callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)").findall(
                     r)
             for http, host, url in data:
                 url = '%s://%s/%s' % (http, host, url)
                 valid, host = source_utils.is_host_valid(host, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         except:
             failure = traceback.format_exc()
             log_utils.log('projectfree2 - Exception: \n' + str(failure))
             pass
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('projectfree3 - Exception: \n' + str(failure))
         return
Beispiel #13
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         hostDict = hostprDict + hostDict
         #headers = {'Referer': url}
         r = cfScraper.get(url).content
         u = client.parseDOM(r,
                             "span",
                             attrs={"class": "movie_version_link"})
         for t in u:
             match = client.parseDOM(t, 'a', ret='data-href')
             for url in match:
                 if url in str(sources):
                     continue
                 quality, info = source_utils.get_release_quality(url, url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Beispiel #14
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         #html = client.request(url)
         html = cfScraper.get(url).content
         quality = re.compile(
             '<div>Quanlity: <span class="quanlity">(.+?)</span></div>',
             re.DOTALL).findall(html)
         for qual in quality:
             quality = source_utils.check_url(qual)
             info = qual
         links = re.compile('var link_.+? = "(.+?)"',
                            re.DOTALL).findall(html)
         for url in links:
             if not url.startswith('http'):
                 url = "https:" + url
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'info': info,
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('fmovies1 - Exception: \n' + str(failure))
         return sources
Beispiel #15
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            log_utils.log('RMZ - Exception', 1)
            pass
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         q = '%s' % cleantitle.get_gan_url(data['title'])
         url = self.base_link + self.search_link % q
         r = cfScraper.get(url).content
         r = ensure_text(r)
         v = re.compile(
             '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>'
         ).findall(r)
         #<a href="https://0123movies.in/695/1917-2/" data-url="" class="ml-mask jt" data-hasqtip="0" oldtitle="1917" title="" aria-describedby="qtip-0"> <span class="mli-quality">4K</span><img data-original="https://0123movies.in/wp-content/uploads/2020/09/iZf0KyrE25z1sage4SYFLCCrMi9.jpg" class="lazy thumb mli-thumb" alt="1917" src="https://0123movies.in/wp-content/uploads/2020/09/iZf0KyrE25z1sage4SYFLCCrMi9.jpg" style="display: inline-block;"><span class="mli-info"><h2>1917</h2></span></a>
         for url, check, qual in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t in check:
                 #key = url.split('-hd')[1]
                 #url = '0123movies.in/moviedownload.php?q=%s' % key
                 r = cfScraper.get(url).content
                 r = ensure_text(r)
                 r = re.compile(
                     '<a rel=".+?" href="(.+?)" target=".+?">').findall(r)
                 for url in r:
                     if any(x in url for x in ['.rar']): continue
                     #quality, _ = source_utils.get_release_quality(qual, url)
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid:
                         #info = ' | '.join(info)
                         sources.append({
                             'source': host,
                             'quality': '720p',
                             'language': 'en',
                             'url': url,
                             'direct': False,
                             'debridonly': False
                         })
         return sources
     except:
         failure = traceback.format_exc()
         log_utils.log('Ganool Testing - Exception: \n' + str(failure))
         return sources
Beispiel #17
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         q = '%s' % cleantitle.get_gan_url(data['title'])
         url = self.base_link + self.search_link % q
         r = cfScraper.get(url).content
         v = re.compile(
             '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\s+<span class=".+?">(.+?)</span>'
         ).findall(r)
         for url, check, qual in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t in check:
                 key = url.split('-hd')[1]
                 url = 'https://fmovies.tw/moviedownload.php?q=%s' % key
                 r = cfScraper.get(url).content
                 r = re.compile(
                     '<a rel=".+?" href="(.+?)" target=".+?">').findall(r)
                 for url in r:
                     if any(x in url for x in ['.rar']): continue
                     quality, info = source_utils.get_release_quality(
                         qual, url)
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid:
                         info = ' | '.join(info)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'url': url,
                             'info': info,
                             'direct': False,
                             'debridonly': False
                         })
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('---Ganool Testing - Exception: \n' + str(failure))
         return sources
Beispiel #18
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            log_utils.log('RMZ - Exception', 1)
            return self.sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            self.hostDict = hostDict + hostprDict
            if url is None:
                return sources
            if debrid.status() is False: return
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote(query))
                url = urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(quote(query))
                url = urljoin(self.base_link, url)

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                link = client.parseDOM(post, 'a', ret='href')[0]
                hash = re.findall(r'(\w{40})', link, re.I)
                if hash:
                    url = 'magnet:?xt=urn:btih:' + hash[0]
                    name = link.split('title=')[1]
                    t = name.split(self.hdlr)[0]
                    if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
                    try:
                        y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                    except:
                        y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                    if not y == self.hdlr: continue
                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)
                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                                    'debridonly': True, 'size': dsize, 'name': name})
            return sources
        except:
            log_utils.log('lime0 - Exception', 1)
            return sources
Beispiel #20
0
 def resolve(self, url):
     try:
         urldata = urlparse.parse_qs(url)
         urldata = dict((i, urldata[i][0]) for i in urldata)
         post = {'ipplugins': 1, 'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'], 'fix': "0"}
         cfScraper.headers.update({'Referer': urldata['url'], 'X-Requested-With': 'XMLHttpRequest'})
         p1 = cfScraper.post('http://123movieshubz.com/ip.file/swf/plugins/ipplugins.php', data=post).content
         p1 = json.loads(p1)
         p2 = cfScraper.get('http://123movieshubz.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' % (
         p1['s'], urldata['data-server'])).content
         p2 = json.loads(p2)
         p3 = cfScraper.get('http://123movieshubz.com/ip.file/swf/ipplayer/api.php?hash=%s' % (p2['hash'])).content
         p3 = json.loads(p3)
         n = p3['status']
         if n == False:
             p2 = cfScraper.get('http://123movieshubz.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' % (
             p1['s'], urldata['data-server'])).content
             p2 = json.loads(p2)
         url = "https:%s" % p2["data"].replace("\/", "/")
         return url
     except:
         return
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s-s%02de%02d' % (data['tvshowtitle'], int(
                data['season']), int(data['episode']))
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('+', '-')

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            try:
                data = re.compile(
                    '<a href="(.+?)" target="_blank" rel="nofollow" title.+?'
                ).findall(r)
                for url in data:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        quality, info = source_utils.get_release_quality(
                            url, url)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
            except:
                log_utils.log('projectfree2 - Exception', 1)
                pass
            return sources
        except:
            log_utils.log('projectfree3 - Exception', 1)
            return sources
Beispiel #22
0
    def __get_base_url(self, fallback):
        try:
            for domain in self.domains:
                try:
                    url = 'https://%s' % domain
                    result = cfScraper.get(url).content
                    search_n = re.findall('<input type="search" placeholder="(.+?)"', result, re.DOTALL)[0]
                    if search_n and 'Search for torrents..' in search_n:
                        return url
                except Exception:
                    pass
        except Exception:
            pass

        return fallback
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None:
             return sources
         hostDict = hostDict + hostprDict
         sourcePage = ensure_text(cfScraper.get(url).content,
                                  errors='replace')
         thesources = re.compile('<tbody>(.+?)</tbody>',
                                 re.DOTALL).findall(sourcePage)[0]
         links = re.compile(
             "<a href=\'(.+?)\' target=\'_blank\'>Download</a>",
             re.DOTALL).findall(thesources)
         for link in links:
             linkPage = ensure_text(cfScraper.get(link).content,
                                    errors='replace')
             vlink = re.compile(
                 '<a id="link" rel="nofollow" href="(.+?)" class="btn"',
                 re.DOTALL).findall(linkPage)
             for zlink in vlink:
                 valid, host = source_utils.is_host_valid(zlink, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(
                         zlink, zlink)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': zlink,
                         'info': info,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search = cleantitle.getsearch(imdb)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search.replace(':', ' ').replace(' ', '+'))
         r = cfScraper.get(url).content
         Yourmouth = re.compile(
             '<div class="post_thumb".+?href="(.+?)"><h2 class="thumb_title">(.+?)</h2>',
             re.DOTALL).findall(r)
         for Myballs, Mycock in Yourmouth:
             if cleantitle.get(title) in cleantitle.get(Mycock):
                 return Myballs
         return
     except Exception:
         return
Beispiel #25
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = title.lower()
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', '%3A').replace(',', '%2C').replace('&', '%26').replace("'", '%27').replace(' ', '+').replace('...', ' '))
         search_results = cfScraper.get(url).content
         match = re.compile('<div data-movie-id=.+?href="(.+?)".+?oldtitle="(.+?)"',re.DOTALL).findall(search_results)
         for movie_url, movie_title in match:
             clean_title = cleantitle.get(title)
             movie_title = movie_title.replace('&#8230', ' ').replace('&#038', ' ').replace('&#8217', ' ').replace('...', ' ')
             clean_movie_title = cleantitle.get(movie_title)
             if clean_movie_title in clean_title:
                 return movie_url
         return
     except:
         return
Beispiel #26
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         html = cfScraper.get(url).content
         links = re.compile('id="linkplayer.+?href="(.+?)"',re.DOTALL).findall(html)
         for link in links:
             quality, info = source_utils.get_release_quality(link, link)
             host = link.split('//')[1].replace('www.', '')
             host = host.split('/')[0].split('.')[0].title()
             valid, host = source_utils.is_host_valid(host, hostDict)
             if valid:
                 sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
Beispiel #27
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = cfScraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except:
         return
 def __get_base_url(self, fallback):
     try:
         for domain in self.domains:
             try:
                 url = 'https://%s' % domain
                 #result = client.request(url, limit=1, timeout='5')
                 result = cfScraper.get(url, timeout=4).content
                 result = ensure_text(result, errors='ignore')
                 search_n = re.findall('<title>(.+?)</title>', result, re.DOTALL)[0]
                 if result and 'LimeTorrents' in search_n:
                     return url
             except:
                 pass
     except:
         pass
     return fallback
Beispiel #29
0
    def __get_base_url(self, fallback):
        try:
            for domain in self.domains:
                try:
                    url = 'https://%s' % domain
                    result = cfScraper.get(url, timeout=7).content
                    result = ensure_text(result, errors='ignore')
                    search_n = re.findall('<title>(.+?)</title>', result,
                                          re.DOTALL)[0]
                    if result and '1337x' in search_n:
                        return url
                except Exception:
                    pass
        except Exception:
            pass

        return fallback
Beispiel #30
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search = cleantitle.getsearch(title)
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % (search.replace(':', ' ').replace(' ', '+'))

            r = cfScraper.get(url).content
            info = re.findall(
                '<div class="boxinfo".+?href="(.+?)".+?<h2>(.+?)</h2>.+?class="year">(.+?)</span>',
                r, re.DOTALL)
            for link, name, r_year in info:
                if cleantitle.get(title) in cleantitle.get(name):
                    if year in str(r_year):
                        return link
            return
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Movie4kis - Exception: \n' + str(failure))
            return