예제 #1
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            for alias in aliases:
                url = '%s/full-movie/%s' % (self.base_link,
                                            cleantitle.geturl(alias['title']))
                url = client.request(url,
                                     headers=headers,
                                     output='geturl',
                                     timeout='10')
                if url is not None and url != self.base_link:
                    break
            if url is None:
                for alias in aliases:
                    url = '%s/full-movie/%s-%s' % (self.base_link,
                                                   cleantitle.geturl(
                                                       alias['title']), year)
                    url = client.request(url,
                                         headers=headers,
                                         output='geturl',
                                         timeout='10')
                    if url is not None and url != self.base_link:
                        break

            return url
        except:
            log_utils.log('cartoonhd - Exception', 1)
            return
예제 #2
0
    def searchMovie(self, title, year, aliases):
        try:
            #title = cleantitle.normalize(title)
            url = urljoin(self.base_link,
                          self.search_link % cleantitle.geturl(title))
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if url == None:
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]

            url = urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            log_utils.log('123movies2 exception', 1)
            return
예제 #3
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(tvshowtitle)
         url = self.base_link + clean_title
         return url
     except:
         return
예제 #4
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('--', '-')
         url = self.base_link + self.search_link % title
         return url
     except:
         return
예제 #5
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + self.search_movie % title
         return url
     except:
         return
예제 #6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         mtitle = cleantitle.geturl(title)
         url = self.base_link + '/movie/%s-%s' % (mtitle, year)
         return url
     except:
         return
예제 #7
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None: return
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            tvshowtitle = data['tvshowtitle']
            year = data['year']

            query = '%s+s%02de%02d' % (cleantitle.geturl(tvshowtitle).replace(
                '-', '+'), int(season), int(episode))
            url2 = urlparse.urljoin(self.base_link, self.search_link % (query))
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url = {
                'imdb': imdb,
                'title': title,
                'year': year,
                'url': url2,
                'content': 'episdoe',
                'tvshowtitle': tvshowtitle,
                'season': season,
                'episode': episode,
                'premiered': premiered
            }
            url = urllib.urlencode(url)
            return url
        except:
            return
예제 #8
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, year)))
         return url
     except:
         return
예제 #9
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link,
                                   self.search_link % cleantitle.geturl(title))
            # r = client.request(url, headers=headers, timeout='10')
            r = cfScraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='oldtitle'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass
            if url is None:
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]
            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            return
예제 #10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         mtitle = cleantitle.geturl(title)
         url = self.base_link + self.search_link % mtitle
         print(url)
         return url
     except:
         return
예제 #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = self.search_link % cleantitle.geturl(query)
            url = urljoin(self.base_link, query)
            r = client.request(url)
            posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser2.parse_dom(i.content, 'a', req='href')
                for i in posts if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [
                (i[0], i[1]) for i in posts
                if (cleantitle.get_simple(i[1].split(hdlr)[0]) ==
                    cleantitle.get(title) and hdlr.lower() in i[1].lower())
            ]
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except Exception:
            return self._sources
예제 #12
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         query = cleantitle.geturl(title).replace('-', '+') + '+' + year
         url2 = urlparse.urljoin(self.base_link, self.search_link % query)
         url = {
             'imdb': imdb,
             'title': title,
             'year': year,
             'url': url2,
             'content': 'movie'
         }
         url = urllib.urlencode(url)
         return url
     except:
         return
예제 #13
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = cfScraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except:
         return
예제 #14
0
 def searchShow(self, title, season, episode, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/tv-show/%s/season/%01d/episode/%01d' % (
                 self.base_link, cleantitle.geturl(title), int(season),
                 int(episode))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if url is not None and url != self.base_link:
                 break
         return url
     except:
         log_utils.log('cartoonhd - Exception', 1)
         return
예제 #15
0
 def searchShow(self, title, season, aliases):
     try:
         #title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urljoin(self.base_link, self.search_link % cleantitle.geturl(search))
         r = cfScraper.get(url).content
         r = ensure_text(r, errors='ignore')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         url = urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         log_utils.log('123movies1 exception', 1)
         return
예제 #16
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvshowtitle = cleantitle.geturl(tvshowtitle)
         url = self.base_link + self.search_link % (tvshowtitle.replace(' ', '+').replace('-', '+').replace('++', '+'))
         page = client.request(url)
         items = client.parseDOM(page, 'div', attrs={'class': 'content-left'})
         for item in items:
             match = re.compile('<a href="(.+?)">', re.DOTALL).findall(item)
             for url in match:
                 if cleantitle.get(tvshowtitle) in cleantitle.get(url):
                     url = self.base_link + url
                     return url
         return
     except:
         failure = traceback.format_exc()
         log_utils.log('watchseriestv - Exception: \n' + str(failure))
         return
예제 #17
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(
                self.base_link, self.search_link % cleantitle.geturl(search))
            # r = client.request(url, headers=headers, timeout='10')
            r = cfScraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
                 for i in r]
            r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if self.matchAlias(i[2][0], aliases) and i[2][1] == season
            ][0]
            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            return
예제 #18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None: return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']
            year = data['year']
            if int(year) > 1970:
                return sources

            scrape = title.lower().replace(' ','+')

            #start_url = self.search_link %(self.goog,scrape,year)
            start_url = self.base_link + self.search_link % scrape

            html = client.request(start_url)
            posts = client.parseDOM(html, 'div', attrs={'class': 'post'})
            for post in posts:
                url = client.parseDOM(post, 'a', ret='href')[0]
                if self.base_link in url:
                    if 'webcache' in url:
                        continue
                    if cleantitle.geturl(title) in url:
                        html2 = client.request(url)
                        chktitle = re.compile('<title.+?>(.+?)</title>',re.DOTALL).findall(html2)[0]
                        if title in chktitle and year in chktitle:
                            links = client.parseDOM(html2, 'source', ret='src')
                            for link in links:
                                sources.append({'source': 'direct', 'quality': 'SD', 'language': 'en', 'url': link, 'info': '', 'direct': True, 'debridonly': False})
            return sources
        except:
            log_utils.log('BNWM1 - Exception', 1)
            return sources
예제 #19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return
            data = parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            title = data['title']
            year = data['year']

            tit = cleantitle.geturl(title + ' ' + year)
            query = urljoin(self.base_link, tit)

            r = client.request(query, referer=self.base_link, redirect=True)
            if not data['imdb'] in r:
                return sources

            links = []

            try:
                down = client.parseDOM(r, 'div', attrs={'id':
                                                        'tab-download'})[0]
                down = client.parseDOM(down, 'a', ret='href')[0]
                data = client.request(down)
                frames = client.parseDOM(data,
                                         'div',
                                         attrs={'class': 'single-link'})
                frames = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in frames if i
                ]
                for i in frames:
                    links.append(i)

            except Exception:
                pass
            try:
                streams = client.parseDOM(r, 'div', attrs={'id':
                                                           'tab-stream'})[0]
                streams = re.findall(r'''iframe src=(.+?) frameborder''',
                                     streams.replace('&quot;', ''),
                                     re.I | re.DOTALL)
                for i in streams:
                    links.append(i)
            except Exception:
                pass

            for url in links:
                try:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        valid, host = source_utils.is_host_valid(
                            url, hostprDict)
                        if not valid:
                            continue
                        else:
                            rd = True
                    else:
                        rd = False
                    #quality, _ = source_utils.get_release_quality(url, url)
                    quality = '720p'
                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    if rd:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': True
                        })
                    else:
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                except Exception:
                    pass
            return sources
        except:
            log_utils.log('filmxy', 1)
            return sources
예제 #20
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = cleantitle.geturl(tvshowtitle)
         return url
     except:
         return
예제 #21
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict = hostDict + hostprDict
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                 self.base_link, cleantitle.geturl(
                     data['tvshowtitle']), int(data['season']), ep)
             # r = client.request(url, headers=headers, timeout='10', output='geturl')
             r = cfScraper.get(url).content
             if url is None:
                 url = self.searchShow(data['tvshowtitle'], data['season'],
                                       aliases, headers)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases,
                                    headers)
             if url is None:
                 url = '%s/film/%s/watching.html?ep=0' % (
                     self.base_link, cleantitle.geturl(data['title']))
         if url is None:
             raise Exception()
         # r = client.request(url, headers=headers, timeout='10')
         r = cfScraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r,
                                     'a',
                                     attrs={'episode-data': ep},
                                     ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             if '123movieshd' in link or 'seriesonline' in link:
                 # r = client.request(link, headers=headers, timeout='10')
                 r = cfScraper.get(link).content
                 r = re.findall('(https:.*?redirector.*?)[\'\"]', r)
                 for i in r:
                     sources.append({
                         'source':
                         'gvideo',
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'language':
                         'en',
                         'url':
                         i,
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
             else:
                 valid, host = source_utils.is_host_valid(link, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(
                         link, link)
                     if 'load.php' not in link:
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': link,
                             'direct': False,
                             'debridonly': False
                         })
         return sources
     except:
         return sources
예제 #22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])

            if 'tvshowtitle' in data:
                ep = data['episode']
                url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                    self.base_link, cleantitle.geturl(
                        data['tvshowtitle']), int(data['season']), ep)
                r = client.request(url, timeout='10', output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'],
                                          aliases)

            else:
                url = self.searchMovie(data['title'], data['year'], aliases)

            if url == None: raise Exception()

            r = cfScraper.get(url).content
            r = ensure_text(r, errors='ignore')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r,
                                        'a',
                                        attrs={'episode-data': ep},
                                        ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                try:
                    if link.startswith('//'):
                        link = 'https:' + link
                    host = re.findall('([\w]+[.][\w]+)$',
                                      urlparse(link.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    if 'load.php' not in link:
                        sources.append({
                            'source': host,
                            'quality': '720p',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            log_utils.log('123movies0 exception', 1)
            return sources