예제 #1
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            if not 'tv-series' in query:
                links = client.parseDOM(r, 'tbody')
                links = client.parseDOM(links, 'a', ret='href')
            else:
                links = client.parseDOM(r,
                                        'ul',
                                        attrs={'class': 'collapsible'})[0]
                pattern = 'href="#">.+?%d\s*<span class="right lmn-num-of-ep">(.+?)</table></div>' % self.ep
                links = re.findall(pattern, links)
                links = client.parseDOM(links, 'a', ret='href')

            for url in links:
                try:
                    if 'liomenoi' in url:
                        url = re.findall('liomenoi.+?link=(.+?)&title', url)[0]
                        url = base64.b64decode(url)
                    if 'target' in url: continue

                    if 'redvid' in url:
                        data = client.request(url)
                        url = client.parseDOM(data, 'iframe', ret='src')[0]

                    if any(x in url for x in [
                            '.online', 'xrysoi.se', 'filmer', '.bp',
                            '.blogger', 'youtu'
                    ]):
                        continue
                    if 'crypt' in url:
                        host = re.findall('embed\/(.+?)\/', url)[0]
                        url = url
                    else:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                    quality = 'SD'
                    lang, info = 'gr', 'SUB'

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': lang,
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass
            return sources
        except:
            return sources
예제 #2
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
         posts = client.parseDOM(posts, 'tr')
         for post in posts:
             data = dom.parse_dom(post, 'a', req='href')[1]
             link = urlparse.urljoin(self.base_link, data.attrs['href'])
             name = data.content
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                     self.title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 dsize, isize = utils._size(size)
             except BaseException:
                 dsize, isize = 0, ''
             self.items.append((name, link, isize, dsize))
         return self.items
     except BaseException:
         return self.items
예제 #3
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link,
                                   self.search_link % cleantitle.geturl(title))
            # r = client.request(url, headers=headers, timeout='10')
            r = cfScraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='oldtitle'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass
            if url is None:
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]
            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('series95 - Exception: \n' + str(failure))
            return
    def _get_items(self, url):
        items = []
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'})
            posts = [i for i in posts if not 'racker:' in i]
            for post in posts:
                data = client.parseDOM(post, 'a', ret='href')
                url = [i for i in data if 'magnet:' in i][0]
                name = client.parseDOM(post, 'a', ret='title')[0]
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                items.append((name, url, isize, dsize))
            return items
        except:
            log_utils.log('glodls2_exc', 1)
            return items
예제 #5
0
    def __search(self, titles, year, content):
        try:

            query = [self.search_link % (urllib.quote_plus(cleantitle.getsearch(i))) for i in titles]

            query = [urlparse.urljoin(self.base_link, i) for i in query]

            t = [cleantitle.get(i) for i in set(titles) if i] #cleantitle.get(titles[0])

            for u in query:
                try:
                    r = client.request(u)

                    r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})

                    if content == 'movies':
                        r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
                    else:
                        r = client.parseDOM(r, 'div', attrs={'id': 'series'})

                    r = [dom_parser2.parse_dom(i, 'figcaption') for i in r]
                    data = [(i[0].attrs['title'], dom_parser2.parse_dom(i[0].content, 'a', req='href')) for i in r if i]
                    data = [i[1][0].attrs['href'] for i in data if cleantitle.get(i[0]) in t]
                    if data: return source_utils.strip_domain(data[0])
                    else:
                        url = [dom_parser2.parse_dom(i[0].content, 'a', req='href') for i in r]
                        data = client.request(url[0][0]['href'])
                        data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
                        if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])
                except:pass

            return
        except:
            return
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         page = client.request(url)
         items = client.parseDOM(page,
                                 'div',
                                 attrs={'class': 'season-table-row'})
         for item in items:
             try:
                 url = re.compile('<a href="(.+?)">',
                                  re.DOTALL).findall(item)[0]
             except:
                 pass
             sea = client.parseDOM(item,
                                   'div',
                                   attrs={'class':
                                          'season-table-season'})[0]
             epi = client.parseDOM(item,
                                   'div',
                                   attrs={'class': 'season-table-ep'})[0]
             if cleantitle.get(season) in cleantitle.get(
                     sea) and cleantitle.get(episode) in cleantitle.get(
                         epi):
                 url = self.base_link + url
                 return url
         return
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return
예제 #7
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         hostDict = hostprDict + hostDict
         #headers = {'Referer': url}
         r = cfScraper.get(url).content
         u = client.parseDOM(r,
                             "span",
                             attrs={"class": "movie_version_link"})
         for t in u:
             match = client.parseDOM(t, 'a', ret='data-href')
             for url in match:
                 if url in str(sources):
                     continue
                 quality, info = source_utils.get_release_quality(url, url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
예제 #8
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(
                self.base_link, self.search_link % cleantitle.geturl(search))
            # r = client.request(url, headers=headers, timeout='10')
            r = cfScraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
                 for i in r]
            r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if self.matchAlias(i[2][0], aliases) and i[2][1] == season
            ][0]
            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('series94 - Exception: \n' + str(failure))
            return
예제 #9
0
    def searchMovie(self, title, year, aliases):
        try:
            #title = cleantitle.normalize(title)
            url = urljoin(self.base_link,
                          self.search_link % cleantitle.geturl(title))
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if url == None:
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]

            url = urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            log_utils.log('123movies2 exception', 1)
            return
예제 #10
0
	def get_sources(self, url):
		try:
			r = client.request(url)
			if not r:
				return
			div = client.parseDOM(r, 'div', attrs={'id': 'div2child'})

			for row in div:
				row = client.parseDOM(r, 'div', attrs={'class': 'resultdivbotton'})
				if not row:
					return

				for post in row:
					hash = re.findall('<div id="hideinfohash.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = re.findall('<div id="hidename.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = unquote_plus(name)
					name = source_utils.clean_name(self.title, name)
					if source_utils.remove_lang(name, self.episode_title):
						continue

					url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

					if url in str(self.sources):
						continue

					if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
						continue

					# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
					if self.episode_title:
						if not source_utils.filter_single_episodes(self.hdlr, name):
							continue

					try:
						seeders = int(re.findall('<div class="resultdivbottonseed">([0-9]+|[0-9]+,[0-9]+)<', post, re.DOTALL)[0].replace(',', ''))
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('<div class="resultdivbottonlength">(.+?)<', post)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
													'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('IDOPE')
			pass
예제 #11
0
    def sources(self, url, hostDict, hostprDict):
        self.sources = []
        try:
            scraper = cfscrape.create_scraper()

            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            urls = []
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            urls.append(url)
            # urls.append('%s%s' % (url, '&page=2')) # next page seems broken right now
            # urls.append('%s%s' % (url, '&page=3'))
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)

            links = []
            for x in urls:
                r = scraper.get(x).content
                if not r:
                    continue
                list = client.parseDOM(r, 'tr', attrs={'class': 'tlr'})
                list += client.parseDOM(r, 'tr', attrs={'class': 'tlz'})
                for item in list:
                    links.append(item)

            threads = []
            for link in links:
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('EXTRATORRENT')
            return self.sources
예제 #12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []

            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')

            hdlr = 'S%02d' % (int(data['season'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)

            posts = client.parseDOM(r, 'figure')

            items = []
            for post in posts:
                try:
                    tit = client.parseDOM(post, 'img', ret='title')[0]
                    tit = client.replaceHTMLCodes(tit)
                    t = tit.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in tit:
                        continue

                    url = client.parseDOM(post, 'a', ret='href')[0]

                    items.append((url, tit))

                except:
                    pass

            threads = []
            for i in items:
                threads.append(workers.Thread(self._get_sources, i[0], i[1], hostDict, hostprDict))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources

        except:
            return self._sources
예제 #13
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if debrid.status() is False:
                return sources

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode']))\
                                       if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(u'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query).lower()

            url = urljoin(self.base_link, self.search_link % query)

            #r = client.request(url)
            #r = requests.get(url).content
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace').replace('&nbsp;', ' ')
            r = client.parseDOM(r, 'div', attrs={'class': 'col s12'})
            posts = client.parseDOM(r, 'div')[1:]
            posts = [i for i in posts if 'magnet/' in i]
            for post in posts:

                links = client.parseDOM(post, 'a', ret='href')[0]
                url = 'magnet:?xt=urn:btih:' + links.lstrip('magnet/')
                try:
                    name = client.parseDOM(post, 'a', ret='title')[0]
                    if not query in cleantitle.get_title(name): continue
                except:
                    name = ''

                quality, info = source_utils.get_release_quality(name, name)
                try:
                    size = re.findall(r'<b class="cpill .+?-pill">(.+?)</b>', post)[0]
                    dsize, isize = source_utils._size(size)
                except:
                    dsize, isize = 0.0, ''

                info.insert(0, isize)

                info = ' | '.join(info)

                sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                'direct': False, 'debridonly': True, 'size': dsize, 'name': name})

            return sources
        except:
            log_utils.log('bt4g3 - Exception', 1)
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            self.hostDict = hostDict + hostprDict
            if url is None:
                return sources
            if debrid.status() is False: return
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote(query))
                url = urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(quote(query))
                url = urljoin(self.base_link, url)

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                link = client.parseDOM(post, 'a', ret='href')[0]
                hash = re.findall(r'(\w{40})', link, re.I)
                if hash:
                    url = 'magnet:?xt=urn:btih:' + hash[0]
                    name = link.split('title=')[1]
                    t = name.split(self.hdlr)[0]
                    if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue
                    try:
                        y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                    except:
                        y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                    if not y == self.hdlr: continue
                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        dsize, isize = 0.0, ''
                    info.insert(0, isize)
                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                                    'debridonly': True, 'size': dsize, 'name': name})
            return sources
        except:
            log_utils.log('lime0 - Exception', 1)
            return sources
예제 #15
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict = hostDict + hostprDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                 self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), ep)
             # r = client.request(url, headers=headers, timeout='10', output='geturl')
             r = cfScraper.get(url).content
             if url is None:
                 url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases, headers)
             if url is None:
                 url = '%s/film/%s/watching.html?ep=0' % (self.base_link, cleantitle.geturl(data['title']))
         if url is None:
             raise Exception()
         # r = client.request(url, headers=headers, timeout='10')
         r = cfScraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r, 'a', attrs={'episode-data': ep}, ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             if '123movieshd' in link or 'seriesonline' in link:
                 # r = client.request(link, headers=headers, timeout='10')
                 r = cfScraper.get(link).content
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
             else:
                 valid, host = source_utils.is_host_valid(link, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(link, link)
                     if 'load.php' not in link:
                         sources.append({'source': host, 'quality': quality, 'language': 'en', 'info': info, 'url': link, 'direct': False, 'debridonly': False})
         return sources
     except:
         return sources
예제 #16
0
 def _search(self, title, year, aliases, headers):
     try:
         q = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(title)))
         r = client.request(q)
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-img'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'img', ret='alt'))
         url = [
             i for i in r if cleantitle.get(title) == cleantitle.get(i[1])
             and year in i[1]
         ][0][0]
         return url
     except:
         pass
예제 #17
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         query = self.tvsearch_link % quote_plus(
             cleantitle.query(tvshowtitle))
         query = urljoin(self.base_link, query.lower())
         result = client.request(query, referer=self.base_link)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'index_item.+?'})
         result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                   for i in result if i]
         if not result:
             return
         result = [(
             i.attrs['href']
         ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
             re.sub(
                 '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                 '',
                 i.attrs['title'],
                 flags=re.I))]
         if not result: return
         else: result = result[0]
         url = client.replaceHTMLCodes(result)
         try:
             url = url.encode('utf-8')
         except:
             pass
         return url
     except:
         source_utils.scraper_error('PRIMEWIRE')
         return
예제 #18
0
    def resolve(self, url):
        if 'putlockers' in url:
            try:
                r = self.s.get(url, headers=self.ua).text
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                               r)[0]
                b64 = v.decode('base64')
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
            except:
                r = self.s.get(url, headers=self.ua)
                r = client.parseDOM(r, 'div', attrs={'class': 'player'})
                url = client.parseDOM(r, 'a', ret='href')[0]

            return url
        else:
            return url
예제 #19
0
    def _get_sources(self, item):
        try:
            name = item[0]
            quality, info = source_utils.get_release_quality(name, item[1])
            info.insert(0, item[2])
            data = cfScraper.get(item[1]).content
            data = ensure_text(data, errors='replace')
            data = client.parseDOM(data, 'a', ret='href')
            url = [i for i in data if 'magnet:' in i][0]
            url = url.split('&tr')[0]
            info = ' | '.join(info)

            self._sources.append({
                'source': 'Torrent',
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': item[3],
                'name': name
            })
        except:
            log_utils.log('1337x_exc1', 1)
            pass
예제 #20
0
    def __search(self, titles, year):
        try:
            tit = [i.split(':')[0] for i in titles]
            query = [
                self.search_link %
                (urllib.quote_plus(cleantitle.getsearch(i + ' ' + year)))
                for i in tit
            ]
            query = [urlparse.urljoin(self.base_link, i) for i in query]
            t = [cleantitle.get(i) for i in set(titles) if i]
            for u in query:
                try:
                    r = client.request(u)
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'card-content'})
                    r = dom_parser2.parse_dom(r, 'a')
                    r = [(i.attrs['href'], i.content) for i in r if i]
                    r = [(i[0], i[1]) for i in r
                         if year == re.findall('(\d{4})', i[1], re.DOTALL)[0]]
                    if len(r) == 1: return source_utils.strip_domain(r[0][0])
                    else:
                        r = [(i[0]) for i in r if cleantitle.get(i[1]) in t]
                        return source_utils.strip_domain(r[0])

                except:
                    pass

            return
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None: return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            year = data['year']
            if int(year) > 1970:
                return sources

            scrape = title.lower().replace(' ', '+')

            #start_url = self.search_link %(self.goog,scrape,year)
            start_url = self.base_link + self.search_link % scrape

            html = client.request(start_url)
            posts = client.parseDOM(html, 'div', attrs={'class': 'post'})
            for post in posts:
                url = client.parseDOM(post, 'a', ret='href')[0]
                if self.base_link in url:
                    if 'webcache' in url:
                        continue
                    if cleantitle.geturl(title) in url:
                        html2 = client.request(url)
                        chktitle = re.compile('<title.+?>(.+?)</title>',
                                              re.DOTALL).findall(html2)[0]
                        if title in chktitle and year in chktitle:
                            links = client.parseDOM(html2, 'source', ret='src')
                            for link in links:
                                sources.append({
                                    'source': 'direct',
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': link,
                                    'info': '',
                                    'direct': True,
                                    'debridonly': False
                                })
            return sources
        except:
            log_utils.log('BNWM1 - Exception', 1)
            return sources
예제 #22
0
    def episodeAbsoluteNumber(self, thetvdb, season, episode):
        try:
            url = 'https://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % (base64.b64decode('Sk1DTzhMUUhJWFg3NkNHTg=='), thetvdb, int(season), int(episode))
            return int(client.parseDOM(requests.get(url, timeout=15, verify=True).content, 'absolute_number')[0])
        except:
            pass

        return episode
예제 #23
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url:
             return
         page = client.request(url)
         items = client.parseDOM(page, 'div', attrs={'class': 'season-table-row'})
         for item in items:
             try:
                 url = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)[0]
             except:
                 pass
             sea = client.parseDOM(item, 'div', attrs={'class': 'season-table-season'})[0]
             epi = client.parseDOM(item, 'div', attrs={'class': 'season-table-ep'})[0]
             if cleantitle.get(season) in cleantitle.get(sea) and cleantitle.get(episode) in cleantitle.get(epi):
                 url = self.base_link + url
                 return url
         return
     except:
         return
예제 #24
0
    def getTVShowTranslation(self, thetvdb, lang):
        try:
            url = 'https://thetvdb.com/api/%s/series/%s/%s.xml' % (base64.b64decode('Sk1DTzhMUUhJWFg3NkNHTg=='), thetvdb, lang)
            r = requests.get(url, timeout=15, verify=True).content
            title = client.parseDOM(r, 'SeriesName')[0]
            title = client.replaceHTMLCodes(title)
            title = control.six_encode(title)

            return title
        except:
            pass
    def links(self, url):
        urls = []
        try:
            if url is None:
                return

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs={'class': 'entry'})
            r = client.parseDOM(r, 'a', ret='href')
            r1 = [i for i in r if 'money' in i][0]
            r = client.request(r1)
            r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0]

            if 'enter the password' in r:
                plink = client.parseDOM(r, 'form', ret='action')[0]

                post = {'post_password': '******', 'Submit': 'Submit'}
                send_post = client.request(plink, post=post, output='cookie')
                link = client.request(r1, cookie=send_post)
            else:
                link = client.request(r1)

            link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0]
            link = client.parseDOM(link, 'a', ret='href')
            for i in link:
                if 'earn-money-onlines.info' in i:
                    trim = i.replace('protector1.php', 'protector.php')
                    r = client.request(trim)
                    filter_links = re.compile(
                        '<center> <a href="(.+?)"').findall(r)
                    for i in filter_links:
                        if any(x in i for x in ['uptobox', 'clicknupload']):
                            continue
                        urls.append(i)
                else:
                    urls.append(i)

            return urls
        except Exception:
            pass
예제 #26
0
    def _get_items(self, url):
        try:
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                        self.title):
                    continue

                try:
                    y = re.findall(
                        '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall(
                        '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == self.hdlr:
                    continue

                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                self.items.append((name, link, isize, dsize))
            return self.items
        except:
            log_utils.log('1337x_exc0', 1)
            return self.items
    def resolve(self, url):
        if 'putlockerfree' in url:
            try:
                r = client.request(url)
                r = six.ensure_text(r, errors='replace')
                try:
                    v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                                   r)[0]
                    v = v.encode('utf-8')
                    b64 = base64.b64decode(v)
                    b64 = six.ensure_text(b64, errors='ignore')
                    url = client.parseDOM(b64, 'iframe', ret='src')[0]
                    url = url.replace('///', '//')
                except:
                    u = client.parseDOM(r, 'div', attrs={'class': 'player'})
                    url = client.parseDOM(u, 'a', ret='href')[0]
            except:
                log_utils.log('plockersR Exception', 1)

            return url
        else:
            return url
예제 #28
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            self.ep = int(episode)
            r = client.request(urlparse.urljoin(self.base_link, url))
            r = client.parseDOM(r, 'a', ret='href')
            s = '/season/%d/' % int(season)
            r = [i for i in r if s in i]

            return source_utils.strip_domain(r[0])
        except:
            return
예제 #29
0
 def searchShow(self, title, season, aliases):
     try:
         #title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urljoin(self.base_link,
                       self.search_link % cleantitle.geturl(search))
         r = cfScraper.get(url).content
         r = ensure_text(r, errors='ignore')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         url = urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         log_utils.log('123movies1 exception', 1)
         return
예제 #30
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvshowtitle = cleantitle.geturl(tvshowtitle)
         url = self.base_link + self.search_link % (tvshowtitle.replace(' ', '+').replace('-', '+').replace('++', '+'))
         page = client.request(url)
         items = client.parseDOM(page, 'div', attrs={'class': 'content-left'})
         for item in items:
             match = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)
             for url in match:
                 if cleantitle.get(tvshowtitle) in cleantitle.get(url):
                     url = self.base_link + url
                     return url
         return
     except:
         return