Exemplo n.º 1
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            for alias in aliases:
                url = '%s/full-movie/%s' % (self.base_link,
                                            cleantitle.geturl(alias['title']))
                url = client.request(url,
                                     headers=headers,
                                     output='geturl',
                                     timeout='10')
                if not url == None and url != self.base_link: break
            if url == None:
                for alias in aliases:
                    url = '%s/full-movie/%s-%s' % (self.base_link,
                                                   cleantitle.geturl(
                                                       alias['title']), year)
                    url = client.request(url,
                                         headers=headers,
                                         output='geturl',
                                         timeout='10')
                    if not url == None and url != self.base_link: break

            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('CartoonHD - Exception: \n' + str(failure))
            return
Exemplo n.º 2
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
         return url
     except:
         return
Exemplo n.º 3
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         tvshowtitle = cleantitle.geturl(tvshowtitle)
         url = tvshowtitle
         return url
     except:
         return
Exemplo n.º 4
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = cleantitle.geturl(tvshowtitle)
         url = url.replace('-', '+')
         return url
     except:
         return
Exemplo n.º 5
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['premiered'], url['season'], url[
             'episode'] = premiered, season, episode
         try:
             clean_title = cleantitle.geturl(
                 url['tvshowtitle']) + '-season-%d' % int(season)
             search_url = urlparse.urljoin(
                 self.base_link,
                 self.search_link % clean_title.replace('-', '+'))
             search_results = self.scraper.get(search_url).content
             parsed = client.parseDOM(search_results, 'div',
                                      {'id': 'movie-featured'})
             parsed = [(client.parseDOM(i, 'a', ret='href'),
                        re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
             parsed = [
                 (i[0][0], i[1][0]) for i in parsed
                 if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)
             ]
             url = parsed[0][0]
         except:
             pass
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'id': 'details'})
         data = zip(client.parseDOM(data, 'a'),
                    client.parseDOM(data, 'a', ret='href'))
         url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
         return url[0][1]
     except:
         return
Exemplo n.º 6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title).replace('--', '-')
         url = self.base_link + self.search_link % title
         return url
     except:
         return
Exemplo n.º 7
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link,
                                   self.search_link % cleantitle.geturl(title))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if (url == None):
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            return
Exemplo n.º 8
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         url = self.base_link + self.search_link % (title, year)
         return url
     except:
         return
Exemplo n.º 9
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         title = cleantitle.geturl(title)
         title = title.replace('-', '+')
         query = '%s+%s' % (title, year)
         url = self.base_link % query
         return url
     except:
         return
Exemplo n.º 10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title).replace('-', '+')
         url = urlparse.urljoin(self.base_link,
                                self.search_link % clean_title)
         url = {'url': url, 'title': title, 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         return
Exemplo n.º 11
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = urlparse.urljoin(
             self.base_link,
             self.search_link % cleantitle.geturl(title).replace('-', '+'))
         r = client.request(url, cookie='check=2')
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m if i.content == title]
         url = urlparse.urljoin(self.base_link, m[0])
         return url
     except:
         return
Exemplo n.º 12
0
 def searchShow(self, title, season, episode, aliases, headers):
     try:
         for alias in aliases:
             url = '%s/show/%s/season/%01d/episode/%01d' % (
                 self.base_link, cleantitle.geturl(title), int(season),
                 int(episode))
             url = client.request(url,
                                  headers=headers,
                                  output='geturl',
                                  timeout='10')
             if not url == None and url != self.base_link: break
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('CartoonHD - Exception: \n' + str(failure))
         return
Exemplo n.º 13
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s'%episode:
                 url = i.attrs['href']
         return url
     except:
         return
Exemplo n.º 14
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(search))
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('Series9 - Exception: \n' + str(failure))
         return  
Exemplo n.º 15
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         data = urlparse.parse_qs(url)
         data = dict((i, data[i][0]) for i in data)
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             cleantitle.geturl(data['tvshowtitle']).replace('-', '+'))
         r = client.request(url, cookie='check=2')
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m
              if i.content == data['tvshowtitle']]
         query = '%s/season-%s/episode-%s/' % (m[0], season, episode)
         url = urlparse.urljoin(self.base_link, query)
         return url
     except:
         return
Exemplo n.º 16
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title).replace('-', '+')
         url = urlparse.urljoin(self.base_link,
                                (self.search_link % clean_title))
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'],
               re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         r = [(i[0], i[1]) for i in r if i[1] == year]
         if r[0]:
             url = r[0][0]
             return url
         else:
             return
     except Exception:
         return
Exemplo n.º 17
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            search_url = urlparse.urljoin(
                self.base_link,
                self.search_link % clean_title.replace('-', '+'))
            r = cache.get(client.request, 1, search_url)
            r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  re.findall('.+?elease:\s*(\d{4})</',
                             i), re.findall('<b><i>(.+?)</i>', i)) for i in r]
            r = [(i[0][0], i[1][0], i[2][0]) for i in r
                 if (cleantitle.get(i[2][0]) == cleantitle.get(title)
                     and i[1][0] == year)]
            url = r[0][0]

            return url
        except Exception:
            return
Exemplo n.º 18
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(
             self.base_link,
             self.search_link % clean_title.replace('-', '+'))
         search_results = self.scraper.get(search_url).content
         parsed = client.parseDOM(search_results, 'div',
                                  {'id': 'movie-featured'})
         parsed = [(client.parseDOM(i, 'a', ret='href'),
                    re.findall('.+?elease:\s*(\d{4})</',
                               i), re.findall('<b><i>(.+?)</i>', i))
                   for i in parsed]
         parsed = [(i[0][0], i[1][0], i[2][0]) for i in parsed
                   if (cleantitle.get(i[2][0]) == cleantitle.get(title)
                       and i[1][0] == year)]
         url = parsed[0][0]
         return url
     except:
         return
Exemplo n.º 19
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(
             self.base_link,
             self.search_link % clean_title.replace('-', '+'))
         r = cache.get(client.request, 1, search_url)
         r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
         r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
               re.findall('status-year">(\d{4})</div', i.content,
                          re.DOTALL)[0]) for i in r if i]
         r = [(i[0][0].attrs['href'],
               re.findall('(.+?)</b><br', i[0][0].content,
                          re.DOTALL)[0], i[1]) for i in r if i]
         r = [(i[0], i[1], i[2]) for i in r if (
             cleantitle.get(i[1]) == cleantitle.get(title) and i[2] == year)
              ]
         url = r[0][0]
         return url
     except Exception:
         return
Exemplo n.º 20
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url[
                'episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(
                    url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(
                    self.base_link,
                    self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
                r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                      dom_parser2.parse_dom(i,
                                            'div',
                                            attrs={'class': 'status'})[0])
                     for i in r if i]
                r = [(i[0][0].attrs['href'],
                      re.findall('(.+?)</b><br', i[0][0].content,
                                 re.DOTALL)[0],
                      re.findall('(\d+)', i[1].content)[0]) for i in r if i]
                r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                     if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(
                         url['tvshowtitle']) and i[2] == str(int(season)))]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'),
                       client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Exemplo n.º 21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                if 'tvshowtitle' in data:
                    url = '%s/episode/%s-s%02de%02d/' % (
                        self.base_link, cleantitle.geturl(data['tvshowtitle']),
                        int(data['season']), int(data['episode']))
                    year = re.findall('(\d{4})', data['premiered'])[0]

                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    r = client.request(url)

                    y = client.parseDOM(r, 'span', attrs={'class': 'date'})
                    y += [
                        i for i in client.parseDOM(
                            r, 'div', attrs={'class': 'metadatac'})
                        if 'date' in i
                    ]
                    y = re.findall('(\d{4})', y[0])[0]
                    if not y == year: raise Exception()

                else:
                    url = '%s/movie/%s-%s/' % (
                        self.base_link, cleantitle.geturl(
                            data['title']), data['year'])

                    url = client.request(url, output='geturl')
                    if url == None: raise Exception()

                    r = client.request(url)

            else:
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url)

            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                try:
                    valid, hoster = source_utils.is_host_valid(link, hostDict)
                    if not valid: continue
                    urls, host, direct = source_utils.check_directstreams(
                        link, hoster)
                    for x in urls:
                        if x['quality'] == 'SD':
                            try:
                                if 'HDTV' in x['url'] or '720' in x['url']:
                                    x['quality'] = 'HD'
                                if '1080' in x['url']: x['quality'] = '1080p'
                            except:
                                pass
                    sources.append({
                        'source': host,
                        'quality': x['quality'],
                        'language': 'en',
                        'url': x['url'],
                        'direct': direct,
                        'debridonly': False
                    })
                except:
                    pass
            return sources
        except:
            return sources
Exemplo n.º 22
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = cleantitle.geturl(title)
         return url
     except:
         return
Exemplo n.º 23
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                ep = data['episode']
                url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                    self.base_link, cleantitle.geturl(
                        data['tvshowtitle']), int(data['season']), ep)
                r = client.request(url,
                                   headers=headers,
                                   timeout='10',
                                   output='geturl')

                if url == None:
                    url = self.searchShow(data['tvshowtitle'], data['season'],
                                          aliases, headers)

            else:
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            if url == None: raise Exception()

            r = client.request(url, headers=headers, timeout='10')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r,
                                        'a',
                                        attrs={'episode-data': ep},
                                        ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = client.request(link, headers=headers, timeout='10')
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                else:
                    try:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if not host in hostDict: raise Exception()
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass

            return sources
        except:
            return sources