Beispiel #1
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content

            name = client.replaceHTMLCodes(name)
            l = dom_parser.parse_dom(r, 'div', {'class': 'ppu2h'})
            s = ''

            for i in l:
                s += i.content

            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if '.rar' not in i or '.zip' not in i
                or '.iso' not in i or '.idx' not in i or '.sub' not in i
            ]

            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                quality, info = source_utils.get_release_quality(name, url)

                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                    size = '%.2f GB' % size
                    info.append(size)
                except:
                    pass

                info = ' | '.join(info)

                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
        except:
            source_utils.scraper_error('RAPIDMOVIEZ')
            pass
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         try:
             v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                            r)[0]
             b64 = base64.b64decode(v)
             url = client.parseDOM(b64, 'iframe', ret='src')[0]
             try:
                 host = re.findall(
                     '([\w]+[.][\w]+)$',
                     urlparse.urlparse(url.strip().lower()).netloc)[0]
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': 'en',
                     'url': url.replace('\/', '/'),
                     'direct': False,
                     'debridonly': False
                 })
             except:
                 pass
         except:
             pass
         r = client.parseDOM(r, 'div', {'class': 'server_line'})
         r = [(client.parseDOM(i, 'a', ret='href')[0],
               client.parseDOM(i, 'p', attrs={'class':
                                              'server_servername'})[0])
              for i in r]
         if r:
             for i in r:
                 try:
                     host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                     url = i[0]
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     if 'other' in host: continue
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url.replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except Exception:
         return
    def _get_sources(self, item, hostDict):
        try:
            quality, info = source_utils.get_release_quality(item[0], item[1])
            size = item[2] if item[2] != '0' else item[0]

            try:
                size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', size)[-1]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                size = '%.2f GB' % size
                info.append(size)

            except Exception:
                pass

            data = self.scraper.get(item[1]).content

            try:
                r = client.parseDOM(data, 'li', attrs={'class': 'elemento'})
                r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                      dom_parser2.parse_dom(i, 'img', req='alt')[0],
                      dom_parser2.parse_dom(i, 'span', {'class': 'd'})[0]) for i in r]
                urls = [('http:' + i[0].attrs['href'] if not i[0].attrs['href'].startswith('http') else
                         i[0].attrs['href'], i[1].attrs['alt'], i[2].content) for i in r if i[0] and i[1]]

                for url, host, qual in urls:

                    try:
                        if any(x in url for x in ['.rar', '.zip', '.iso', ':Upcoming']): raise Exception()
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(host, hostDict)
                        if not valid: continue
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        quality, info = source_utils.get_release_quality(qual, quality)
                        info.append('HEVC')
                        info = ' | '.join(info)
                        self._sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                             'direct': False, 'debridonly': True})
                    except Exception:
                        pass
            except Exception:
                pass

        except BaseException:
            return
Beispiel #4
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'],
               re.search('/(\w+).html', i[0].attrs['href'])) for i in r
              if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         for i in r:
             try:
                 host = i[1]
                 if str(host) in str(hostDict):
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': i[0].replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except Exception:
         return
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}),
                  dom_parser.parse_dom(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'],
                  client.replaceHTMLCodes(i[0][0].content), i[1]) for i in r
                 if i[0]]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Beispiel #6
0
    def __search(self, title, season):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(title)))
            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(title)

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
            r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a'))
                 for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
            r = [
                i[0] for i in r
                if t == cleantitle.get(i[1]) and int(i[2]) == int(season)
            ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Beispiel #7
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            r = self.scraper.get(url, headers={'referer': self.base_link}).content
            links = client.parseDOM(r, 'a', ret='href', attrs={'target': '.+?'})
            links = [x for y, x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        continue;
                    host = host.encode('utf-8')
                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                                    'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Beispiel #8
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.tvsearch_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query.lower())
            result = client.request(query, referer=self.base_link)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                      for i in result if i]
            result = [(
                i.attrs['href']
            ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
                re.sub(
                    '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                    '',
                    i.attrs['title'],
                    flags=re.I))][0]

            url = client.replaceHTMLCodes(result)
            url = url.encode('utf-8')
            return url
        except Exception:
            return
Beispiel #9
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            season = data.get('season')
            episode = data.get('episode')

            r = client.request(url)

            if season and episode:
                r = dom_parser.parse_dom(r, 'select', attrs={'id': 'SeasonSelection'}, req='rel')[0]
                r = client.replaceHTMLCodes(r.attrs['rel'])[1:]
                r = urlparse.parse_qs(r)
                r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r])
                r = urlparse.urljoin(self.base_link, self.get_links_epi % (r['Addr'], r['SeriesID'], season, episode))
                r = client.request(r)

            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'HosterList'})[0]
            r = dom_parser.parse_dom(r, 'li', attrs={'id': re.compile('Hoster_\d+')}, req='rel')
            r = [(client.replaceHTMLCodes(i.attrs['rel']), i.content) for i in r if i[0] and i[1]]
            r = [(i[0], re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1])) for i in r]
            r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][2]) for i in r if len(i[1]) > 0]

            for link, hoster, mirrors in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue
                u = urlparse.parse_qs('&id=%s' % link)
                u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u])
                for x in range(0, int(mirrors)):
                    url = self.mirror_link % (u['id'], u['Hoster'], x + 1)
                    if season and episode: url += "&Season=%s&Episode=%s" % (season, episode)
                    try:
                        sources.append(
                            {'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False,
                             'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
def strip_domain(url):
    try:
        if url.lower().startswith('http') or url.startswith('/'):
            url = re.findall('(?://.+?|)(/.+)', url)[0]
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
    except:
        return
Beispiel #11
0
	def resolve(self, url):
		try:
			url = json.loads(client.request(url)).get('code')
			url = url.replace('\/', '/')
			url = client.replaceHTMLCodes(url).encode('utf-8')
			if url.startswith('/'): url = 'https:%s' % url
			return url
		except:
			return
Beispiel #12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            url = urlparse.urljoin(self.base_link,
                                   url) if not url.startswith('http') else url

            result = client.request(url)
            data = re.findall(r'\s*(eval.+?)\s*</script', result, re.DOTALL)[1]
            data = jsunpack.unpack(data).replace('\\', '')

            patern = '''rtv='(.+?)';var aa='(.+?)';var ba='(.+?)';var ca='(.+?)';var da='(.+?)';var ea='(.+?)';var fa='(.+?)';var ia='(.+?)';var ja='(.+?)';var ka='(.+?)';'''
            links_url = re.findall(patern, data, re.DOTALL)[0]
            slug = 'slug={}'.format(url.split('/')[-1])
            links_url = self.base_link + [''.join(links_url)][0].replace(
                'slug=', slug)
            links = client.request(links_url)
            links = client.parseDOM(links, 'tbody')

            for i in links:
                try:
                    data = [(client.parseDOM(i, 'a', ret='href')[0],
                             client.parseDOM(i,
                                             'span',
                                             attrs={'class':
                                                    'version_host'})[0])][0]
                    url = urlparse.urljoin(self.base_link, data[0])
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = data[1]
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid:
                        raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality, info = source_utils.get_release_quality(
                        quality, url)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except BaseException:
                    pass

            return sources
        except Exception:
            return sources
Beispiel #13
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(
             r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Beispiel #14
0
 def getTVShowTranslation(self, thetvdb, lang):
     try:
         url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % (
             'MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang)
         r = client.request(url)
         title = client.parseDOM(r, 'SeriesName')[0]
         title = client.replaceHTMLCodes(title)
         title = title.encode('utf-8')
         return title
     except:
         pass
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
             data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         try:
             posts = client.parseDOM(r, 'div', attrs={'class': 'box-info'})
             for post in posts:
                 data = client.parseDOM(post, 'a', ret='href')
                 u = [i for i in data if '/torrent/' in i]
                 for u in u:
                     match = '%s %s' % (title, hdlr)
                     match = match.replace('+', '-').replace(' ', '-').replace(':-', '-').replace('---', '-')
                     if not match in u: continue
                     u = self.base_link + u
                     r = client.request(u)
                     r = client.parseDOM(r, 'div', attrs={'class': 'torrent-category-detail clearfix'})
                     for t in r:
                         link = re.findall('href="magnet:(.+?)" onclick=".+?"', t)[0]
                         link = 'magnet:%s' % link
                         link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                         seeds = int(re.compile('<span class="seeds">(.+?)</span>').findall(t)[0])
                         if self.min_seeders > seeds:
                             continue
                         quality, info = source_utils.get_release_quality(link, link)
                         try:
                             size = re.findall('<strong>Total size</strong> <span>(.+?)</span>', t)
                             for size in size:
                                 size = '%s' % size
                                 info.append(size)
                         except BaseException:
                             pass
                         info = ' | '.join(info)
                         sources.append(
                             {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
                              'direct': False, 'debridonly': True})
         except:
             return
         return sources
     except:
         return sources
Beispiel #16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            urls = self.search(data['title'], data['year'])

            for url in urls:
                try:
                    link = client.replaceHTMLCodes(url[1])
                    link = link.encode('utf-8')
                    if link in sources: continue
                    if 'snahp' in link:
                        data = client.request(link)
                        data = client.parseDOM(data, 'center')
                        data = [i for i in data if 'Hidden Link' in i][0]
                        link = client.parseDOM(data, 'a', ret='href')[0]
                    if 'google' in link:
                        quality, info2 = source_utils.get_release_quality(
                            url[0], link)
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })

                    else:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if host in hostDict:
                            host = host.encode('utf-8')
                            quality, info2 = source_utils.get_release_quality(
                                url[0], link)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'direct': False,
                                'debridonly': False
                            })
                except BaseException:
                    pass
            return sources
        except BaseException:
            return sources
Beispiel #17
0
 def resolve(self, url):
     try:
         h = urlparse.urlparse(url.strip().lower()).netloc
         r = client.request(url)
         r = r.rsplit('"underplayer"')[0].rsplit("'underplayer'")[0]
         u = re.findall('\'(.+?)\'', r) + re.findall('\"(.+?)\"', r)
         u = [client.replaceHTMLCodes(i) for i in u]
         u = [i for i in u if i.startswith('http') and not h in i]
         url = u[-1].encode('utf-8')
         return url
     except:
         return
Beispiel #18
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
             self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
             r = client.request(url, headers=headers, timeout='10', output='geturl')
             if url == None:
                 url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases, headers)
             if url == None:
                 url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
         if url == None:
             raise Exception()
         r = client.request(url, headers=headers, timeout='10')
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             if '123movieshd' in link or 'seriesonline' in link:
                 r = client.request(link, headers=headers, timeout='10')
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     try:
                         sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
                                         'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                     except:
                         pass
             else:
                 try:
                     host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
                     if not host in hostDict:
                         raise Exception()
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
                                     'debridonly': False})
                 except:
                     pass
         return sources
     except:
         return sources
Beispiel #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources
            if debrid.status() is False: raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            query = '%s %s' % (data['title'], data['year'])
            url = self.search_link % urllib.quote(query)
            url = urlparse.urljoin(self.base_link, url).replace('%20', '-')
            html = client.request(url)
            try:
                results = client.parseDOM(html, 'div', attrs={'class': 'ava1'})
            except:
                failure = traceback.format_exc()
                log_utils.log('YIFYDLL - Exception: \n' + str(failure))
                return sources
            for torrent in results:
                link = re.findall(
                    'a data-torrent-id=".+?" href="(magnet:.+?)" class=".+?" title="(.+?)"',
                    torrent, re.DOTALL)
                for link, name in link:
                    link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                    quality, info = source_utils.get_release_quality(
                        name, name)
                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            torrent)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass
                    info = ' | '.join(info)
                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('YIFYDLL - Exception: \n' + str(failure))
            return
Beispiel #20
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            url = urlparse.urljoin(self.base_link, url) if url.startswith('/') else url
            url = url.split('online.html')[0]
            url = '%s%s-online.html' % (url, 'season-%01d-episode-%01d' % (int(season), int(episode)))
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except BaseException:
            return
def parse(url):
    try:
        url = client.replaceHTMLCodes(url)
    except:
        pass
    try:
        url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
    except:
        pass
    try:
        url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
    except:
        pass
    return url
Beispiel #22
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			if not url:
				return
			r = client.request(urlparse.urljoin(self.base_link, url))
			r = client.parseDOM(r, 'a', attrs={'class': 'item',
			                                   'href': '[^\'"]*/saison-%s/episode-%s[^\'"]*' % (season, episode)},
			                    ret='href')[0]
			url = re.findall('(?://.+?|)(/.+)', r)[0]
			url = client.replaceHTMLCodes(url)
			url = url.encode('utf-8')
			return url
		except:
			return
Beispiel #23
0
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall(
                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.append(size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower()
                       for x in ['.rar.', '.zip.', '.iso.']) or any(
                           url.lower().endswith(x)
                           for x in ['.rar', '.zip', '.iso']):
                    raise Exception()

                if any(x in url.lower()
                       for x in ['youtube', 'sample', 'trailer']):
                    raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info2 = source_utils.get_release_quality(title, url)
                if url in str(self._sources): continue

                self._sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
        except Exception:
            pass
Beispiel #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = proxy.request(url, 'tv shows')

            links = client.parseDOM(r,
                                    'a',
                                    ret='href',
                                    attrs={'target': '.+?'})
            links = [x for y, x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(
                        urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Beispiel #25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = self.scraper.get(url).content
            r = r.replace('\\"', '"')

            links = dom_parser.parse_dom(r,
                                         'tr',
                                         attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img',
                                                req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a',
                                               req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Beispiel #26
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.query(titles[0] + ' ' + year)))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i]
         r = self.scraper.get(query).content
         r = dom_parser.parse_dom(r,
                                  'figure',
                                  attrs={'class': 'pretty-figure'})
         r = dom_parser.parse_dom(r, 'figcaption')
         for i in r:
             title = client.replaceHTMLCodes(i[0]['title'])
             title = cleantitle.get(title)
             if title in t:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
         return
     except:
         return
Beispiel #27
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            url = urlparse.urljoin(self.base_link, url) if not url.startswith('http') else url

            result = self.scraper.get(url).content
            links = client.parseDOM(result, 'tbody')

            for i in links:
                try:
                    data = [
                        (client.parseDOM(i, 'a', ret='href')[0],
                         client.parseDOM(i, 'span', attrs={'class': 'version_host'})[0])][0]
                    url = urlparse.urljoin(self.base_link, data[0])
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = data[1]
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid:
                        raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality, info = source_utils.get_release_quality(
                        quality, url)

                    sources.append({'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False})
                except Exception:
                    pass

            return sources
        except Exception:
            return sources
Beispiel #28
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = '%s/serie/%s' % (self.base_link, url)

            r = proxy.request(url, 'tv shows')
            r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'span', attrs={'itemprop': 'name'}),
                  re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2])
                 for i in r if i[1]] + [(i[0], None, i[2])
                                        for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0])
                 for i in r if i[2]] + [(i[0], i[1], None)
                                        for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [
                i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]
            ][:1]
            if not url: url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url:
                url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url: raise Exception()

            url = url[0][0]
            url = proxy.parse(url)

            url = re.findall('(?://.+?|)(/.+)', url)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Beispiel #29
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			r = client.request(urlparse.urljoin(self.base_link, url))
			r = dom_parser.parse_dom(r, 'div', attrs={'id': 'entries'})
			links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i.content for i in r]))
			links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', req='src')]
			links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]
			for i in links:
				try:
					i = re.sub('\[.+?\]|\[/.+?\]', '', i)
					i = client.replaceHTMLCodes(i)
					valid, host = source_utils.is_host_valid(i, hostDict)
					if not valid: continue
					sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False,
					                'debridonly': False})
				except:
					pass
			return sources
		except:
			return sources
Beispiel #30
0
 def __search(self, title, localtitle, year, content_type):
     try:
         t = cleantitle.get(title)
         tq = cleantitle.get(localtitle)
         y = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1), '0'
         ]
         query = urlparse.urljoin(self.base_link, self.search_link)
         post = urllib.urlencode({'k': "%s"}) % tq
         r = client.request(query, post=post)
         r = json.loads(r)
         r = [
             i.get('result') for i in r
             if i.get('type', '').encode('utf-8') == content_type
         ]
         r = [(i.get('url'), i.get('originalTitle'), i.get('title'),
               i.get('anneeProduction', 0), i.get('dateStart', 0))
              for i in r]
         r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1] if i[1] else ''),
               re.sub('<.+?>|</.+?>', '', i[2] if i[2] else ''),
               i[3] if i[3] else re.findall('(\d{4})', i[4])[0]) for i in r
              if i[3] or i[4]]
         r = sorted(r, key=lambda i: int(i[3]),
                    reverse=True)  # with year > no year
         r = [
             i[0] for i in r
             if i[3] in y and (t.lower() == cleantitle.get(i[1].lower()) or
                               tq.lower() == cleantitle.query(i[2].lower()))
         ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return