Esempio n. 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         query = '%s %s' % (data['title'], data['year'])
         url = self.search_link % urllib.quote(query)
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         try:
             results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2]
         except Exception:
             return sources
         items = re.findall('class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL)
         if items is None:
             return sources
         for entry in items:
             try:
                 try:
                     link, name = \
                         re.findall('<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name)
                     if not cleantitle.get(name) == cleantitle.get(data['title']):
                         continue
                 except Exception:
                     continue
                 y = entry[-4:]
                 if not y == data['year']:
                     continue
                 response = client.request(link)
                 try:
                     entries = client.parseDOM(response, 'div', attrs={'class': 'modal-torrent'})
                     for torrent in entries:
                         link, name = re.findall(
                             'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                             torrent, re.DOTALL)[0]
                         link = 'magnet:%s' % link
                         link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                         quality, info = source_utils.get_release_quality(name, name)
                         try:
                             size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1]
                             div = 1 if size.endswith(('GB', 'GiB')) else 1024
                             size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                             size = '%.2f GB' % size
                             info.append(size)
                         except Exception:
                             pass
                         info = ' | '.join(info)
                         sources.append(
                             {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
                              'direct': False, 'debridonly': True})
                 except Exception:
                     continue
             except Exception:
                 continue
         return sources
     except Exception:
         return sources
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url == None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         url = urlparse.urljoin(self.base_link, data.get('url'))
         season = data.get('season')
         episode = data.get('episode')
         r = client.request(url)
         if season and episode:
             r = dom_parser.parse_dom(r,
                                      'select',
                                      attrs={'id': 'SeasonSelection'},
                                      req='rel')[0]
             r = client.replaceHTMLCodes(r.attrs['rel'])[1:]
             r = urlparse.parse_qs(r)
             r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r])
             r = urlparse.urljoin(
                 self.base_link, self.get_links_epi %
                 (r['Addr'], r['SeriesID'], season, episode))
             r = client.request(r)
         r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'HosterList'})[0]
         r = dom_parser.parse_dom(r,
                                  'li',
                                  attrs={'id': re.compile('Hoster_\d+')},
                                  req='rel')
         r = [(client.replaceHTMLCodes(i.attrs['rel']), i.content)
              for i in r if i[0] and i[1]]
         r = [(i[0],
               re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1]))
              for i in r]
         r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][2])
              for i in r if len(i[1]) > 0]
         for link, hoster, mirrors in r:
             valid, hoster = source_utils.is_host_valid(hoster, hostDict)
             if not valid: continue
             u = urlparse.parse_qs('&id=%s' % link)
             u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u])
             for x in range(0, int(mirrors)):
                 url = self.mirror_link % (u['id'], u['Hoster'], x + 1)
                 if season and episode:
                     url += "&Season=%s&Episode=%s" % (season, episode)
                 try:
                     sources.append({
                         'source': hoster,
                         'quality': 'SD',
                         'language': 'de',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except:
         return sources
 def _get_sources(self, item, hostDict):
     try:
         quality, info = source_utils.get_release_quality(item[0], item[1])
         size = item[2] if item[2] != '0' else item[0]
         try:
             size = re.findall(
                 '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                 size)[-1]
             div = 1 if size.endswith(('GB', 'GiB')) else 1024
             size = float(re.sub('[^0-9|/.|/,]', '', size.replace(
                 ',', '.'))) / div
             size = '%.2f GB' % size
             info.append(size)
         except Exception:
             pass
         data = self.scraper.get(item[1]).content
         try:
             r = client.parseDOM(data, 'li', attrs={'class': 'elemento'})
             r = [(dom_parser.parse_dom(i, 'a', req='href')[0],
                   dom_parser.parse_dom(i, 'img', req='alt')[0],
                   dom_parser.parse_dom(i, 'span', {'class': 'd'})[0])
                  for i in r]
             urls = [('http:' + i[0].attrs['href']
                      if not i[0].attrs['href'].startswith('http') else
                      i[0].attrs['href'], i[1].attrs['alt'], i[2].content)
                     for i in r if i[0] and i[1]]
             for url, host, qual in urls:
                 try:
                     if any(x in url
                            for x in ['.rar', '.zip', '.iso', ':Upcoming']):
                         raise Exception()
                     url = client.replaceHTMLCodes(url)
                     url = url.encode('utf-8')
                     valid, host = source_utils.is_host_valid(
                         host, hostDict)
                     if not valid:
                         continue
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     quality, info = source_utils.get_release_quality(
                         qual, quality)
                     info.append('HEVC')
                     info = ' | '.join(info)
                     self._sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'info': info,
                         'direct': False,
                         'debridonly': True
                     })
                 except Exception:
                     pass
         except Exception:
             pass
     except BaseException:
         return
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content

            name = client.replaceHTMLCodes(name)
            l = dom_parser.parse_dom(r, 'div', {'class': 'ppu2h'})
            s = ''

            for i in l:
                s += i.content

            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if '.rar' not in i or '.zip' not in i
                or '.iso' not in i or '.idx' not in i or '.sub' not in i
            ]

            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')

                quality, info = source_utils.get_release_quality(name, url)

                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                    size = '%.2f GB' % size
                    info.append(size)
                except:
                    pass

                info = ' | '.join(info)

                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
        except:
            source_utils.scraper_error('RAPIDMOVIEZ')
            pass
Esempio n. 5
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         html = self.scraper.get(url).content
         try:
             v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                            html)[0]
             b64 = base64.b64decode(v)
             url = client.parseDOM(b64, 'iframe', ret='src')[0]
             try:
                 host = re.findall(
                     '([\w]+[.][\w]+)$',
                     urlparse.urlparse(url.strip().lower()).netloc)[0]
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': 'en',
                     'url': url.replace('\/', '/'),
                     'direct': False,
                     'debridonly': False
                 })
             except:
                 pass
         except:
             pass
         parsed = client.parseDOM(html, 'div', {'class': 'server_line'})
         parsed = [(client.parseDOM(i, 'a', ret='href')[0],
                    client.parseDOM(i,
                                    'p',
                                    attrs={'class':
                                           'server_servername'})[0])
                   for i in parsed]
         if parsed:
             for i in parsed:
                 try:
                     host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                     url = i[0]
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     if 'other' in host: continue
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url.replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except:
         return
Esempio n. 6
0
def resolve(regex):
	try:
		vanilla = re.compile('(<regex>.+)', re.MULTILINE | re.DOTALL).findall(regex)[0]
		cddata = re.compile('<\!\[CDATA\[(.+?)\]\]>', re.MULTILINE | re.DOTALL).findall(regex)
		for i in cddata:
			regex = regex.replace('<![CDATA[' + i + ']]>', urllib.quote_plus(i))

		regexs = re.compile('(<regex>.+)', re.MULTILINE | re.DOTALL).findall(regex)[0]
		regexs = re.compile('<regex>(.+?)</regex>', re.MULTILINE | re.DOTALL).findall(regexs)
		regexs = [re.compile('<(.+?)>(.*?)</.+?>', re.MULTILINE | re.DOTALL).findall(i) for i in regexs]

		regexs = [dict([(client.replaceHTMLCodes(x[0]), client.replaceHTMLCodes(urllib.unquote_plus(x[1]))) for x in i])
		          for i in regexs]
		regexs = [(i['name'], i) for i in regexs]
		regexs = dict(regexs)

		url = regex.split('<regex>', 1)[0].strip()
		url = client.replaceHTMLCodes(url)
		url = url.encode('utf-8')

		r = getRegexParsed(regexs, url)

		try:
			ln = ''
			ret = r[1]
			listrepeat = r[2]['listrepeat']
			regexname = r[2]['name']

			for obj in ret:
				try:
					item = listrepeat
					for i in range(len(obj) + 1):
						item = item.replace('[%s.param%s]' % (regexname, str(i)), obj[i - 1])

					item2 = vanilla
					for i in range(len(obj) + 1):
						item2 = item2.replace('[%s.param%s]' % (regexname, str(i)), obj[i - 1])

					item2 = re.compile('(<regex>.+?</regex>)', re.MULTILINE | re.DOTALL).findall(item2)
					item2 = [x for x in item2 if not '<name>%s</name>' % regexname in x]
					item2 = ''.join(item2)

					ln += '\n<item>%s\n%s</item>\n' % (item, item2)
				except:
					pass

			return ln
		except:
			pass

		if r[1] is True:
			return r[0]
	except:
		return
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         if debrid.status() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         posts = client.parseDOM(r, 'h2')
         hostDict = hostprDict + hostDict
         urls = []
         for item in posts:
             try:
                 item = re.compile('a href="(.+?)"').findall(item)
                 name = item[0]
                 query = query.replace(" ", "-").lower()
                 if query not in name:
                     continue
                 name = client.replaceHTMLCodes(name)
                 quality, info = source_utils.get_release_quality(name, item[0])
                 if any(x in quality for x in ['CAM', 'SD']):
                     continue
                 url = item
                 links = self.links(url)
                 urls += [(i, quality, info) for i in links]
             except:
                 pass
         for item in urls:
             if 'earn-money' in item[0]:
                 continue
             if any(x in item[0] for x in ['.rar', '.zip', '.iso']):
                 continue
             url = client.replaceHTMLCodes(item[0])
             url = url.encode('utf-8')
             valid, host = source_utils.is_host_valid(url, hostDict)
             if not valid:
                 continue
             host = client.replaceHTMLCodes(host)
             host = host.encode('utf-8')
             sources.append({'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'direct': False, 'debridonly': True})
         return sources
     except:
         return sources
Esempio n. 8
0
	def _get_sources(self, name, url):
		try:
			headers = {'User-Agent': client.agent()}
			r = self.scraper.get(url, headers=headers).content

			name = client.replaceHTMLCodes(name)
			if name.startswith('['):
				name = name.split(']')[1]
			name = name.strip().replace(' ', '.')

			l = dom_parser.parse_dom(r, 'div', {'class': 'ppu2h'})
			if l == []:
				return
			s = ''
			for i in l:
				s += i.content

			urls = re.findall(r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE|re.DOTALL)
			urls = [i for i in urls if '.rar' not in i or '.zip' not in i or '.iso' not in i or '.idx' not in i or '.sub' not in i]

			for url in urls:
				if url in str(self.sources):
					continue

				valid, host = source_utils.is_host_valid(url, self.hostDict)
				if not valid:
					continue
				host = client.replaceHTMLCodes(host)
				host = host.encode('utf-8')

				quality, info = source_utils.get_release_quality(name, url)

				try:
					size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0]
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except:
					dsize = 0
					pass

				fileType = source_utils.getFileType(name)
				info.append(fileType)
				info = ' | '.join(info) if fileType else info[0]

				self.sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('RAPIDMOVIEZ')
			pass
Esempio n. 9
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0] + ' ' + year)))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r,
                                     'figure',
                                     attrs={'class': 'pretty-figure'})
            r = dom_parser.parse_dom(r, 'figcaption')

            for i in r:
                title = client.replaceHTMLCodes(i[0]['title'])
                title = cleantitle.get(title)

                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
Esempio n. 10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         query = self.search_link % quote_plus(title)
         query = urljoin(self.base_link, query.lower())
         result = client.request(query, referer=self.base_link)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'index_item.+?'})
         result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                   for i in result if i]
         if not result:
             return
         result = [(
             i.attrs['href']
         ) for i in result if cleantitle.get(title) == cleantitle.get(
             re.sub(
                 '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                 '',
                 i.attrs['title'],
                 flags=re.I))][0]
         url = client.replaceHTMLCodes(result)
         try:
             url = url.encode('utf-8')
         except:
             pass
         return url
     except:
         source_utils.scraper_error('PRIMEWIRE')
         return
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'details'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'title'}),
                  dom_parser.parse_dom(i, 'span', attrs={'class': 'year'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content) for i in r
                 if i[0] and i[1]]
            r = [(i[0][0].attrs['href'],
                  client.replaceHTMLCodes(i[0][0].content), i[1]) for i in r
                 if i[0]]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y][0]

            return source_utils.strip_domain(r)
        except:
            return
Esempio n. 12
0
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.append(size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower() for x in ['.rar.', '.zip.', '.iso.']) or any(
                        url.lower().endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception()

                if any(x in url.lower() for x in ['youtube', 'sample', 'trailer']): raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info2 = source_utils.get_release_quality(title, url)
                if url in str(self._sources): continue

                self._sources.append(
                    {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                     'debridonly': True})
        except Exception:
            pass
    def movie(self, imdb, title, localtitle, aliases, year):
        query = self.moviesearch_link % urllib.quote_plus(title)
        query = urlparse.urljoin(self.base_link, query.lower())
        result = self.scraper.get(query, headers={
            'referer': self.base_link
        }).content

        result = client.parseDOM(result,
                                 'div',
                                 attrs={'class': 'index_item.+?'})

        result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                  for i in result if i]
        result = [
            (i.attrs['href']) for i in result
            if cleantitle.get(title) == cleantitle.get(
                re.sub(
                    '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                    '',
                    i.attrs['title'],
                    flags=re.I))
        ][0]
        url = client.replaceHTMLCodes(result)
        url = url.encode('utf-8')
        return url
Esempio n. 14
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            url = self.__search(data['tvshowtitle'], season)
            if not url and data['tvshowtitle'] is not data['localtvshowtitle']: url = self.__search(data['localtvshowtitle'], season)
            if not url: return

            print urlparse.urljoin(self.base_link, url)

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = client.parseDOM(r, 'div', attrs={'class': 'keremiya_part'})
            r = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(r))
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span')) for i in r]
            r = [(i[0][0], re.findall('(\d+)', i[1][0])) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [i[0] for i in r if len(i[1]) > 0 and int(i[1][0]) == int(episode)][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
    def __search(self, title, season):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(title)))
            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(title)

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
            r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a'))
                 for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
            r = [
                i[0] for i in r
                if t == cleantitle.get(i[1]) and int(i[2]) == int(season)
            ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Esempio n. 16
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            tv_maze = tvmaze.tvMaze()
            tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
            tvshowtitle = tvshowtitle['name']

            t = cleantitle.get(tvshowtitle)

            q = urlparse.urljoin(self.base_link, self.search_link)
            q = q % urllib.quote_plus(tvshowtitle)

            r = client.request(q)

            r = client.parseDOM(r, 'ul', attrs={'class': 'items'})
            r = client.parseDOM(r, 'li')
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title'), re.findall('\d{4}', i))
                 for i in r]
            r = [(i[0][0], i[1][0], i[2][-1]) for i in r
                 if i[0] and i[1] and i[2]]
            r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
            r = r[0][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('GoGoAnime - Exception: \n' + str(failure))
            return
Esempio n. 17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            r = self.scraper.get(url, headers={'referer': self.base_link}).content
            links = client.parseDOM(r, 'a', ret='href', attrs={'target': '.+?'})
            links = [x for y, x in enumerate(links) if x not in links[:y]]

            for i in links:
                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        continue;
                    host = host.encode('utf-8')
                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                                    'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.tvsearch_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query.lower())
            result = client.request(query, referer=self.base_link)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                      for i in result if i]
            result = [(
                i.attrs['href']
            ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
                re.sub(
                    '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                    '',
                    i.attrs['title'],
                    flags=re.I))][0]

            url = client.replaceHTMLCodes(result)
            url = url.encode('utf-8')
            return url
        except Exception:
            return
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'],
               re.search('/(\w+).html', i[0].attrs['href'])) for i in r
              if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         for i in r:
             try:
                 host = i[1]
                 if str(host) in str(hostDict):
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': i[0].replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except Exception:
         return
Esempio n. 20
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.replace('\\"', '"')

            links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 21
0
def strip_domain(url):
	try:
		if url.lower().startswith('http') or url.startswith('/'):
			url = re.findall('(?://.+?|)(/.+)', url)[0]
		url = client.replaceHTMLCodes(url)
		url = url.encode('utf-8')
		return url
	except:
		return
 def resolve(self, url):
     try:
         url = json.loads(client.request(url)).get('code')
         url = url.replace('\/', '/')
         url = client.replaceHTMLCodes(url).encode('utf-8')
         if url.startswith('/'): url = 'https:%s' % url
         return url
     except:
         return
Esempio n. 23
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = self.searchShow(data['tvshowtitle'], data['season'])
            else:
                url = self.searchMovie(data['title'], data['year'])

            if url is None:
                return sources

            r = self.scraper.get(url, params={'link_web': self.base_link}).content
            quality = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0]
            quality = source_utils.check_sd_url(quality)
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})

            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = self.scraper.get(url, data={'link_web': self.base_link}).content
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
                                            'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                        except:
                            traceback.print_exc()
                            pass
                else:
                    try:
                        host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if host not in hostDict:
                            pass
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'info': [],
                                        'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except:
            traceback.print_exc()
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            url = urlparse.urljoin(self.base_link,
                                   url) if not url.startswith('http') else url

            result = client.request(url)
            data = re.findall(r'\s*(eval.+?)\s*</script', result, re.DOTALL)[1]
            data = jsunpack.unpack(data).replace('\\', '')

            patern = '''rtv='(.+?)';var aa='(.+?)';var ba='(.+?)';var ca='(.+?)';var da='(.+?)';var ea='(.+?)';var fa='(.+?)';var ia='(.+?)';var ja='(.+?)';var ka='(.+?)';'''
            links_url = re.findall(patern, data, re.DOTALL)[0]
            slug = 'slug={}'.format(url.split('/')[-1])
            links_url = self.base_link + [''.join(links_url)][0].replace(
                'slug=', slug)
            links = client.request(links_url)
            links = client.parseDOM(links, 'tbody')

            for i in links:
                try:
                    data = [(client.parseDOM(i, 'a', ret='href')[0],
                             client.parseDOM(i,
                                             'span',
                                             attrs={'class':
                                                    'version_host'})[0])][0]
                    url = urlparse.urljoin(self.base_link, data[0])
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = data[1]
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid:
                        raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality, info = source_utils.get_release_quality(
                        quality, url)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except BaseException:
                    pass

            return sources
        except Exception:
            return sources
Esempio n. 25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            r = self.scraper.get(url).content
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
                b64 = base64.b64decode(v)
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if valid:
                        sources.append(
                            {'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'),
                             'direct': False, 'debridonly': False})
                except:
                    pass
            except:
                pass
            r = client.parseDOM(r, 'div', {'class': 'server_line'})
            r = [(client.parseDOM(i, 'a', ret='href')[0],
                  client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
            if r:
                for i in r:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0]
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        valid, host = source_utils.is_host_valid(host, hostDict)
                        if valid:
                            sources.append(
                                {'source': host, 'quality': 'SD', 'language': 'en', 'url': url.replace('\/', '/'),
                                 'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except Exception:
            return
Esempio n. 26
0
 def getTVShowTranslation(self, thetvdb, lang):
     try:
         url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % (
             'MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang)
         r = client.request(url)
         title = client.parseDOM(r, 'SeriesName')[0]
         title = client.replaceHTMLCodes(title)
         title = title.encode('utf-8')
         return title
     except:
         pass
Esempio n. 27
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(
             r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Esempio n. 28
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if url is None:
				return sources
			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			aliases = eval(data['aliases'])
			headers = {}
			if 'tvshowtitle' in data:
				ep = data['episode']
				url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
					self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
				r = client.request(url, headers=headers, timeout='10', output='geturl')
				if url is None:
					url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
			else:
				url = self.searchMovie(data['title'], data['year'], aliases, headers)
				if url is None:
					url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
			if url is None:
				raise Exception()
			r = client.request(url, headers=headers, timeout='10')
			r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
			if 'tvshowtitle' in data:
				ep = data['episode']
				links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
			else:
				links = client.parseDOM(r, 'a', ret='player-data')
			for link in links:
				if '123movieshd' in link or 'seriesonline' in link:
					r = client.request(link, headers=headers, timeout='10')
					r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
					for i in r:
						try:
							sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'],
							                'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
						except:
							pass
				else:
					try:
						host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(link.strip().lower()).netloc)[0]
						if not host in hostDict:
							raise Exception()
						host = client.replaceHTMLCodes(host)
						host = host.encode('utf-8')
						sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
						                'debridonly': False})
					except:
						pass
			return sources
		except:
			return sources
 def resolve(self, url):
     try:
         h = urlparse.urlparse(url.strip().lower()).netloc
         r = client.request(url, timeout='10')
         r = r.rsplit('"underplayer"')[0].rsplit("'underplayer'")[0]
         u = re.findall('\'(.+?)\'', r) + re.findall('\"(.+?)\"', r)
         u = [client.replaceHTMLCodes(i) for i in u]
         u = [i for i in u if i.startswith('http') and not h in i]
         url = u[-1].encode('utf-8')
         return url
     except:
         return
Esempio n. 30
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            urls = self.search(data['title'], data['year'])

            for url in urls:
                try:
                    link = client.replaceHTMLCodes(url[1])
                    link = link.encode('utf-8')
                    if link in sources: continue
                    if 'snahp' in link:
                        data = client.request(link)
                        data = client.parseDOM(data, 'center')
                        data = [i for i in data if 'Hidden Link' in i][0]
                        link = client.parseDOM(data, 'a', ret='href')[0]
                    if 'google' in link:
                        quality, info2 = source_utils.get_release_quality(
                            url[0], link)
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })

                    else:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if host in hostDict:
                            host = host.encode('utf-8')
                            quality, info2 = source_utils.get_release_quality(
                                url[0], link)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'direct': False,
                                'debridonly': False
                            })
                except BaseException:
                    pass
            return sources
        except BaseException:
            return sources