示例#1
0
    def __search(self, titles, year):
        try:
            tit = [i.split(':')[0] for i in titles]
            query = [
                self.search_link % (urllib.quote_plus(cleantitle.getsearch(i)))
                for i in tit
            ]
            query = [urlparse.urljoin(self.base_link, i) for i in query]
            t = [cleantitle.get(i) for i in set(titles) if i]
            for u in query:
                try:
                    r = client.request(u)
                    r = json.loads(r)
                    r = [(r[i]['url'], r[i]['title'], r[i]['extra']) for i in r
                         if i]
                    r = [(i[0], i[1]) for i in r if i[2]['date'] == year]
                    if len(r) == 1: return source_utils.strip_domain(r[0][0])
                    else:
                        r = [(i[0]) for i in r if cleantitle.get(i[1]) in t]
                        return source_utils.strip_domain(r[0])

                except BaseException:
                    pass

            return
        except BaseException:
            return
示例#2
0
    def __search(self, titles, year, content):
        try:

            query = [self.search_link % (urllib.quote_plus(cleantitle.getsearch(i))) for i in titles]

            query = [urlparse.urljoin(self.base_link, i) for i in query]

            t = [cleantitle.get(i) for i in set(titles) if i] #cleantitle.get(titles[0])

            for u in query:
                try:
                    r = client.request(u)

                    r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})

                    if content == 'movies':
                        r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
                    else:
                        r = client.parseDOM(r, 'div', attrs={'id': 'series'})

                    r = [dom_parser2.parse_dom(i, 'figcaption') for i in r]
                    data = [(i[0].attrs['title'], dom_parser2.parse_dom(i[0].content, 'a', req='href')) for i in r if i]
                    data = [i[1][0].attrs['href'] for i in data if cleantitle.get(i[0]) in t]
                    if data: return source_utils.strip_domain(data[0])
                    else:
                        url = [dom_parser2.parse_dom(i[0].content, 'a', req='href') for i in r]
                        data = client.request(url[0][0]['href'])
                        data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
                        if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])
                except:pass

            return
        except:
            return
示例#3
0
    def __search(self, titles, year):
        try:
            tit = [i.split(':')[0] for i in titles]
            query = [
                self.search_link %
                (urllib.quote_plus(cleantitle.getsearch(i + ' ' + year)))
                for i in tit
            ]
            query = [urlparse.urljoin(self.base_link, i) for i in query]
            t = [cleantitle.get(i) for i in set(titles) if i]
            for u in query:
                try:
                    r = client.request(u)
                    r = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'card-content'})
                    r = dom_parser2.parse_dom(r, 'a')
                    r = [(i.attrs['href'], i.content) for i in r if i]
                    r = [(i[0], i[1]) for i in r
                         if year == re.findall('(\d{4})', i[1], re.DOTALL)[0]]
                    if len(r) == 1: return source_utils.strip_domain(r[0][0])
                    else:
                        r = [(i[0]) for i in r if cleantitle.get(i[1]) in t]
                        return source_utils.strip_domain(r[0])

                except:
                    pass

            return
        except:
            return
示例#4
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         lowerTitle = tvshowtitle.lower()
         possibleTitles = set(
             (lowerTitle, cleantitle.getsearch(lowerTitle)) +
             tuple((alias['title'].lower()
                    for alias in aliases) if aliases else ()))
         return self._getSearchData(lowerTitle,
                                    possibleTitles,
                                    year,
                                    self._createSession(),
                                    isMovie=False)
     except:
         log_utils.log('PrimewireGR - Exception', 1)
         return
示例#5
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', ' ').replace(' ', '+'))
         search_results = client.request(url)
         match = re.compile('<a href="/watch/(.+?)" title="(.+?)">',
                            re.DOTALL).findall(search_results)
         for row_url, row_title in match:
             row_url = self.base_link + '/watch/%s' % row_url
             if cleantitle.get(title) in cleantitle.get(row_title):
                 return row_url
         return
     except:
         return
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search = cleantitle.getsearch(imdb)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search.replace(':', ' ').replace(' ', '+'))
         r = cfScraper.get(url).content
         Yourmouth = re.compile(
             '<div class="post_thumb".+?href="(.+?)"><h2 class="thumb_title">(.+?)</h2>',
             re.DOTALL).findall(r)
         for Myballs, Mycock in Yourmouth:
             if cleantitle.get(title) in cleantitle.get(Mycock):
                 return Myballs
         return
     except Exception:
         return
示例#7
0
 def _search(self, title, year, aliases, headers):
     try:
         q = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(title)))
         r = client.request(q)
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-img'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'img', ret='alt'))
         url = [
             i for i in r if cleantitle.get(title) == cleantitle.get(i[1])
             and year in i[1]
         ][0][0]
         return url
     except:
         pass
示例#8
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url  % (search_id.replace(' ', '+').replace('-', '+').replace('++', '+'), year)
         headers = {'User-Agent': self.User_Agent}
         search_results = requests.get(url, headers=headers, timeout=10).content
         items = re.compile('<item>(.+?)</item>', re.DOTALL).findall(search_results)
         for item in items:
             match = re.compile('<title>(.+?)</title>.+?<link>(.+?)</link>', re.DOTALL).findall(item)
             for row_title, row_url in match:
                 if cleantitle.get(title) in cleantitle.get(row_title):
                     if year in str(row_title):
                         return row_url
         return
     except:
         return
示例#9
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search = cleantitle.getsearch(title)
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % (search.replace(':', ' ').replace(' ', '+'))

            r = cfScraper.get(url).content
            info = re.findall(
                '<div class="boxinfo".+?href="(.+?)".+?<h2>(.+?)</h2>.+?class="year">(.+?)</span>',
                r, re.DOTALL)
            for link, name, r_year in info:
                if cleantitle.get(title) in cleantitle.get(name):
                    if year in str(r_year):
                        return link
            return
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Movie4kis - Exception: \n' + str(failure))
            return
示例#10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(' ', '+'))
         #search_results = client.request(url)
         search_results = cfScraper.get(url).content
         log_utils.log('fmovies0 - search_results: \n' +
                       str(search_results))
         match = re.compile(r'<a href="/watch/(.+?)" title="(.+?)">',
                            re.DOTALL).findall(search_results)
         for row_url, row_title in match:
             row_url = self.base_link + '/watch/%s' % row_url
             if cleantitle.get(title) in cleantitle.get(row_title):
                 return row_url
         return
     except:
         failure = traceback.format_exc()
         log_utils.log('fmovies0 - Exception: \n' + str(failure))
         return
示例#11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            url = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.query(title)))
            headers = {'User-Agent': self.User_Agent}

            if 'tvshowtitle' in data:
                html = cfScraper.get(url, headers=headers).content
                html = ensure_str(html)

                match = re.compile(
                    'class="post-item.+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)
                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        season_url = '%02d' % int(data['season'])
                        episode_url = '%02d' % int(data['episode'])
                        sea_epi = 'S%sE%s' % (season_url, episode_url)

                        result = cfScraper.get(url,
                                               headers=headers,
                                               timeout=10).content
                        Regex = re.compile('href="(.+?)"',
                                           re.DOTALL).findall(result)
                        for ep_url in Regex:
                            if sea_epi in ep_url:
                                if '1080p' in ep_url:
                                    qual = '1080p'
                                elif '720p' in ep_url:
                                    qual = '720p'
                                elif '480p' in ep_url:
                                    qual = '480p'
                                else:
                                    qual = 'SD'

                                sources.append({
                                    'source': 'CDN',
                                    'quality': qual,
                                    'language': 'en',
                                    'url': ep_url,
                                    'direct': False,
                                    'debridonly': False
                                })
            else:
                html = requests.get(url, headers=headers).text
                match = re.compile(
                    '<div class="thumbnail".+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)

                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        if '1080' in url:
                            quality = '1080p'
                        elif '720' in url:
                            quality = '720p'
                        else:
                            quality = 'SD'

                        result = requests.get(url, headers=headers,
                                              timeout=10).text
                        Regex = re.compile('href="/download.php.+?link=(.+?)"',
                                           re.DOTALL).findall(result)

                        for link in Regex:
                            if 'server=' not in link:
                                try:
                                    link = base64.b64decode(link)
                                    link = ensure_str(link)
                                except Exception:
                                    pass
                                try:
                                    host = link.split('//')[1].replace(
                                        'www.', '')
                                    host = host.split('/')[0].lower()
                                except Exception:
                                    pass
                                _hostDict = hostDict + hostprDict
                                valid, host = source_utils.is_host_valid(
                                    host, _hostDict)
                                if not valid:
                                    continue
                                # if not self.filter_host(host):
                                # continue
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'direct': False,
                                    'debridonly': False
                                })

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('ExtraMovies - Exception: \n' + str(failure))
            return sources
示例#12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle']
            hdlr = 's%02de%02d' % (int(data['season']), int(data['episode']))
            query = quote_plus(cleantitle.getsearch(title))
            surl = urljoin(self.base_link, self.search_link % query)
            # log_utils.log('surl = %s' % surl, log_utils.LOGDEBUG)

            r = client.request(surl, XHR=True)
            r = json.loads(r)
            r = r['series']

            for i in r:
                tit = i['value']

                if cleantitle.get(title) != cleantitle.get(tit):
                    continue
                slink = i['seo']
                slink = urljoin(self.base_link, slink)
                r = client.request(slink)

                if not data['imdb'] in r:
                    continue

                data = client.parseDOM(r, 'div', {'class': 'el-item\s*'})

                epis = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in data if i
                ]
                epis = [i for i in epis if hdlr in i.lower()][0]

                r = client.request(epis)
                links = client.parseDOM(r, 'a', ret='data-actuallink')

                for url in links:
                    try:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid:
                            continue
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'info': '',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        source_utils.scraper_error('WATCHEPISODES')
                        return sources
            return sources
        except:
            source_utils.scraper_error('WATCHEPISODES')
            return sources
示例#13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                r = client.request(self.base_link,
                                   output='extended',
                                   timeout='10')
                cookie = r[4]
                headers = r[3]
                headers['Cookie'] = cookie

                query = urlparse.urljoin(
                    self.base_link, self.search_link %
                    urllib.quote_plus(cleantitle.getsearch(title)))
                r = client.request(query, headers=headers, XHR=True)
                r = json.loads(r)['content']
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title +
                                              'season%02d' % int(season))
                    r = [
                        i for i in r if cltitle == cleantitle.get(i[1])
                        or cltitle2 == cleantitle.get(i[1])
                    ]
                    vurl = '%s%s-episode-%s' % (self.base_link, str(
                        r[0][0]).replace('/info', ''), episode)
                    vurl2 = None
                else:
                    cltitle = cleantitle.getsearch(title)
                    cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                    r = [
                        i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                        or cltitle == cleantitle.getsearch(i[1])
                    ]
                    vurl = '%s%s-episode-0' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))
                    vurl2 = '%s%s-episode-1' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))

                r = client.request(vurl, headers=headers)
                headers['Referer'] = vurl

                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
                if len(slinks) == 0 and not vurl2 == None:
                    r = client.request(vurl2, headers=headers)
                    headers['Referer'] = vurl2
                    slinks = client.parseDOM(
                        r, 'div', attrs={'class': 'anime_muti_link'})
                    slinks = client.parseDOM(slinks, 'li', ret='data-video')

                for slink in slinks:
                    try:
                        if 'vidcloud.icu/streaming.php' in slink:
                            r = client.request('https:%s' % slink,
                                               headers=headers)
                            clinks = re.findall(r'sources:\[(.*?)\]', r)[0]
                            clinks = re.findall(
                                r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)',
                                clinks)
                            for clink in clinks:
                                q = source_utils.label_to_quality(clink[1])
                                sources.append({
                                    'source': 'cdn',
                                    'quality': q,
                                    'language': 'en',
                                    'url': clink[0],
                                    'direct': True,
                                    'debridonly': False
                                })
                        else:
                            valid, hoster = source_utils.is_host_valid(
                                slink, hostDict)
                            if valid:
                                sources.append({
                                    'source': hoster,
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': slink,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            host_dict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            if 'season' in data:
                season = data['season']
            if 'episode' in data:
                episode = data['episode']
            year = data['year']

            r = client.request(self.base_link, output='extended', timeout='10')
            #r = cfScraper.get(self.base_link).content
            cookie = r[3]
            headers = r[2]
            result = r[0]
            headers['Cookie'] = cookie

            query = urljoin(
                self.base_link,
                self.search_link % quote_plus(cleantitle.getsearch(title)))
            query2 = urljoin(self.base_link,
                             self.search_link % quote_plus(title).lower())
            r = client.request(query, headers=headers, XHR=True)
            if len(r) < 20:
                r = client.request(query2, headers=headers, XHR=True)
            r = json.loads(r)['content']
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if 'tvshowtitle' in data:
                cltitle = cleantitle.get(title + 'season' + season)
                cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
                r = [
                    i for i in r if cltitle == cleantitle.get(i[1])
                    or cltitle2 == cleantitle.get(i[1])
                ]
                vurl = '%s%s-episode-%s' % (self.base_link, str(
                    r[0][0]).replace('/info', ''), episode)
                vurl2 = None

            else:
                cltitle = cleantitle.getsearch(title)
                cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                r = [
                    i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                    or cltitle == cleantitle.getsearch(i[1])
                ]
                vurl = '%s%s-episode-0' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))
                vurl2 = '%s%s-episode-1' % (self.base_link, str(
                    r[0][0]).replace('/info', ''))

            r = client.request(vurl, headers=headers)
            headers['Referer'] = vurl

            slinks = client.parseDOM(r,
                                     'div',
                                     attrs={'class': 'anime_muti_link'})
            slinks = client.parseDOM(slinks, 'li', ret='data-video')
            if len(slinks) == 0 and vurl2 is not None:
                r = client.request(vurl2, headers=headers)
                headers['Referer'] = vurl2
                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
            slinks = [
                slink
                if slink.startswith('http') else 'https:{0}'.format(slink)
                for slink in slinks
            ]

            for url in slinks:
                url = client.replaceHTMLCodes(url)
                #url = url.encode('utf-8')
                valid, host = source_utils.is_host_valid(url, host_dict)
                if valid:
                    sources.append({
                        'source': host,
                        'quality': '720p',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            log_utils.log('gowatchseries3 - Exception', 1)
            return sources