Exemple #1
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', ' ').replace(' ', '+'))
         search_results = client.request(url)
         match = re.compile('<a href="/watch/(.+?)" title="(.+?)">', re.DOTALL).findall(search_results)
         for row_url, row_title in match:
             row_url = self.base_link + '/watch/%s' % row_url
             if cleantitle.get(title) in cleantitle.get(row_title):
                 return row_url
         return
     except:
         return
Exemple #2
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search = cleantitle.getsearch(imdb)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search.replace(':', ' ').replace(' ', '+'))
         r = cfScraper.get(url).content
         Yourmouth = re.compile(
             '<div class="post_thumb".+?href="(.+?)"><h2 class="thumb_title">(.+?)</h2>',
             re.DOTALL).findall(r)
         for Myballs, Mycock in Yourmouth:
             if cleantitle.get(title) in cleantitle.get(Mycock):
                 return Myballs
         return
     except Exception:
         return
Exemple #3
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url  % (search_id.replace(' ', '+').replace('-', '+').replace('++', '+'), year)
         headers = {'User-Agent': self.User_Agent}
         search_results = requests.get(url, headers=headers, timeout=10).content
         items = re.compile('<item>(.+?)</item>', re.DOTALL).findall(search_results)
         for item in items:
             match = re.compile('<title>(.+?)</title>.+?<link>(.+?)</link>', re.DOTALL).findall(item)
             for row_title, row_url in match:
                 if cleantitle.get(title) in cleantitle.get(row_title):
                     if year in str(row_title):
                         return row_url
         return
     except:
         return
Exemple #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            host_dict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            if 'season' in data:
                season = data['season']
            if 'episode' in data:
                episode = data['episode']
            year = data['year']

            r = client.request(self.base_link, output='extended', timeout='10')
            #r = cfScraper.get(self.base_link).content
            cookie = r[3]
            headers = r[2]
            result = r[0]
            headers['Cookie'] = cookie

            query = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.getsearch(title)))
            query2 = urljoin(self.base_link,
                             self.search_link % quote_plus(title).lower())
            r = client.request(query, headers=headers, XHR=True)
            if len(r) < 20:
                r = client.request(query2, headers=headers, XHR=True)
            r = json.loads(r)['content']
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                cltitle = cleantitle.get(title + 'season' + season)
                cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
                r = [
                    i for i in r if cltitle == cleantitle.get(i[1])
                    or cltitle2 == cleantitle.get(i[1])
                ]
                vurl = '%s%s-episode-%s' % (self.base_link, str(
                    r[0][0]).replace('/info', ''), episode)
                vurl2 = None

            else:
                cltitle = cleantitle.getsearch(title)
                cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                r = [
                    i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                    or cltitle == cleantitle.getsearch(i[1])
                ]
                vurl = '%s%s-episode-0' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))
                vurl2 = '%s%s-episode-1' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))

            r = client.request(vurl, headers=headers)
            headers['Referer'] = vurl

            slinks = client.parseDOM(r,
                                     'div',
                                     attrs={'class': 'anime_muti_link'})
            slinks = client.parseDOM(slinks, 'li', ret='data-video')
            if len(slinks) == 0 and vurl2 is not None:
                r = client.request(vurl2, headers=headers)
                headers['Referer'] = vurl2
                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
            slinks = [
                slink
                if slink.startswith('http') else 'https:{0}'.format(slink)
                for slink in slinks
            ]

            for url in slinks:
                url = client.replaceHTMLCodes(url)
                #url = url.encode('utf-8')
                valid, host = source_utils.is_host_valid(url, host_dict)
                if valid:
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            log_utils.log('gowatchseries3 - Exception', 1)
            return sources
Exemple #5
0
 def _search(self, title, year, aliases, headers):
     try:
         q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
         r = client.request(q)
         r = client.parseDOM(r, 'div', attrs={'class':'ml-img'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'img', ret='alt'))
         url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and year in i[1]][0][0]
         return url
     except:
         pass
Exemple #6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            url = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.query(title)))
            headers = {'User-Agent': self.User_Agent}

            if 'tvshowtitle' in data:
                html = cfScraper.get(url, headers=headers).content
                html = ensure_str(html)

                match = re.compile(
                    'class="post-item.+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)
                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        season_url = '%02d' % int(data['season'])
                        episode_url = '%02d' % int(data['episode'])
                        sea_epi = 'S%sE%s' % (season_url, episode_url)

                        result = cfScraper.get(url,
                                               headers=headers,
                                               timeout=10).content
                        Regex = re.compile('href="(.+?)"',
                                           re.DOTALL).findall(result)
                        for ep_url in Regex:
                            if sea_epi in ep_url:
                                if '1080p' in ep_url:
                                    qual = '1080p'
                                elif '720p' in ep_url:
                                    qual = '720p'
                                elif '480p' in ep_url:
                                    qual = '480p'
                                else:
                                    qual = 'SD'

                                sources.append({
                                    'source': 'CDN',
                                    'quality': qual,
                                    'language': 'en',
                                    'url': ep_url,
                                    'direct': False,
                                    'debridonly': False
                                })
            else:
                html = requests.get(url, headers=headers).text
                match = re.compile(
                    '<div class="thumbnail".+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)

                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        if '1080' in url:
                            quality = '1080p'
                        elif '720' in url:
                            quality = '720p'
                        else:
                            quality = 'SD'

                        result = requests.get(url, headers=headers,
                                              timeout=10).text
                        Regex = re.compile('href="/download.php.+?link=(.+?)"',
                                           re.DOTALL).findall(result)

                        for link in Regex:
                            if 'server=' not in link:
                                try:
                                    link = base64.b64decode(link)
                                    link = ensure_str(link)
                                except Exception:
                                    pass
                                try:
                                    host = link.split('//')[1].replace(
                                        'www.', '')
                                    host = host.split('/')[0].lower()
                                except Exception:
                                    pass
                                _hostDict = hostDict + hostprDict
                                valid, host = source_utils.is_host_valid(
                                    host, _hostDict)
                                if not valid:
                                    continue
                                # if not self.filter_host(host):
                                # continue
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'direct': False,
                                    'debridonly': False
                                })

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('ExtraMovies - Exception: \n' + str(failure))
            return sources