Example #1
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])
            scraper = cfscrape.create_scraper()
            data = scraper.get(query).content
            #data = client.request(query, referer=self.base_link)
            data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
            r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
            r = zip(
                dom_parser.parse_dom(r, 'a'),
                dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))

            url = []
            for i in range(len(r)):
                title = cleantitle.get(r[i][0][1])
                title = re.sub('(\d+p|4k|3d|hd|season\d+)', '', title)
                y = r[i][1][1]
                link = r[i][0][0]['href']
                if 'season' in title: continue
                if t == title and y == year:
                    if 'season' in link:
                        url.append(source_utils.strip_domain(link))
                        print url[0]
                        return url[0]
                    else:
                        url.append(source_utils.strip_domain(link))

            return url
        except:
            return
Example #2
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         self.basetester()
         url = urlparse.urljoin(
             self.base_link,
             self.search_link % cleantitle.geturl(title).replace('-', '+'))
         r = client.request(url, cookie='check=2')
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m if i.content == title]
         url = urlparse.urljoin(self.base_link, m[0])
         return url
     except:
         return
Example #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
        
            hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
            result = client.request(url, timeout=10)
            
            dom = dom_parser.parse_dom(result, 'a', req='data-video')
            urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom]

            for url in urls:
                dom = []
                if 'vidnode.net' in url:
                    result = client.request(url, timeout=10)
                    dom = dom_parser.parse_dom(result, 'source', req=['src','label'])
                    dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i]
                elif 'ocloud.stream' in url:
                    result = client.request(url, timeout=10)
                    base = re.findall('<base href="([^"]+)">', result)[0]
                    hostDict += [base]
                    dom = dom_parser.parse_dom(result, 'a', req=['href','id'])
                    dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i]
                    dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]                        
                if dom:                
                    try:
                        for r in dom:
                            valid, hoster = source_utils.is_host_valid(r[0], hostDict)

                            if not valid: continue
                            quality = source_utils.label_to_quality(r[1])
                            urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                            for x in urls:
                                if direct: size = source_utils.get_size(x['url'])
                                if size: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size})         
                                else: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})         
                    except: pass
                else:
                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    try:
                        url.decode('utf-8')
                        sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except:
            return sources
Example #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            for i in range(3):
                result = client.request(url, timeout=10)
                if not result == None: break
            
            dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
            result = dom[0].content
            
            links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)         
            for link in links:#[:5]:
                try:
                    url2 = urlparse.urljoin(self.base_link, link[1])
                    for i in range(2):
                        result2 = client.request(url2, timeout=3)
                        if not result2 == None: break                    
                    r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0]
                    valid, hoster = source_utils.is_host_valid(r, hostDict)
                    if not valid: continue
                    #log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG)
                    urls, host, direct = source_utils.check_directstreams(r, hoster)
                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
                    
                except:
                    #traceback.print_exc()
                    pass           
                    
            #log_utils.log('JairoxDebug2: %s' % (str(sources)), log_utils.LOGDEBUG)
            return sources
        except:
            return sources
Example #5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return
            r = client.request(url, cookie='check=2')

            m = dom_parser.parse_dom(r, 'table', attrs={'class':
                                                        'show_links'})[0]
            links = re.findall('k">(.*?)<.*?f="(.*?)"', m.content)
            for link in links:
                try:
                    sources.append({
                        'source': link[0],
                        'quality': 'SD',
                        'language': 'en',
                        'url': link[1],
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #6
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         data = urlparse.parse_qs(url)
         data = dict((i, data[i][0]) for i in data)
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             cleantitle.geturl(data['tvshowtitle']).replace('-', '+'))
         r = client.request(url, cookie='check=2')
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m
              if i.content == data['tvshowtitle']]
         query = '%s/season-%s/episode-%s/' % (m[0], season, episode)
         url = urlparse.urljoin(self.base_link, query)
         return url
     except:
         return
Example #7
0
def parseDOM(html, name='', attrs=None, ret=False):
    if attrs:
        attrs = dict((key, re.compile(value + ('$' if value else '')))
                     for key, value in attrs.iteritems())
    results = dom_parser.parse_dom(html, name, attrs, ret)
    if ret:
        results = [result.attrs[ret.lower()] for result in results]
    else:
        results = [result.content for result in results]
    return results
Example #8
0
    def searchShow(self, title, season, year, aliases, headers):
        try:
            clean_title = cleantitle.geturl(title).replace('-','+')

            url = urlparse.urljoin(self.base_link, self.search_link % ('%s+Season+%01d' % (clean_title, int(season))))
            r = self.scraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'list_movies'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href']) for i in r if '%s - Season %01d' % (title, int(season)) in i.content]

            return r[0]
        except:
            return
Example #9
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            clean_title = cleantitle.geturl(title).replace('-','+')

            url = urlparse.urljoin(self.base_link, self.search_link % ('%s' %clean_title))
            r = self.scraper.get(url).content

            r = client.parseDOM(r, 'div', attrs={'class': 'list_movies'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href']) for i in r if i.content == '%s (%s)' %(title,year)]

            return r[0]
        except:
            return
Example #10
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         t = cleantitle.geturl(title).replace('-', '+')
         p = urllib.urlencode({'keyword': t, 'id': 1})
         r = client.request(self.search_link, post=p, XHR=True)
         try:
             r = json.loads(r)
         except:
             r = None
         r = dom_parser.parse_dom(r['content'],
                                  'a',
                                  attrs={'class': 'ss-title'})
         url = '%s%s-e0.html' % (self.base_link, r[0].attrs['href'].replace(
             'serie', 'episode'))
         return url
     except:
         return
Example #11
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.urljoin(self.base_link, url)
            result = client.request(url)

            title = cleantitle.get(title)
            premiered = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(premiered)[0]
            premiered = '%s/%s/%s' % (premiered[2], premiered[1], premiered[0])
            items = dom_parser.parse_dom(result, 'a', attrs={'itemprop':'url'})

            url = [i.attrs['href'] for i in items if bool(re.compile('<span\s*>%s<.*?itemprop="episodeNumber">%s<\/span>' % (season,episode)).search(i.content))][0]
            
            url = url.encode('utf-8')
            return url
        except:
            return
Example #12
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         q = cleantitle.geturl(url['tvshowtitle']).replace('-', '+')
         t = q + '+season+%s' % season
         p = urllib.urlencode({'keyword': t, 'id': 1})
         r = client.request(self.search_link, post=p, XHR=True)
         try:
             r = json.loads(r)
         except:
             r = None
         r = dom_parser.parse_dom(r['content'],
                                  'a',
                                  attrs={'class': 'ss-title'})
         url = '%s%s-e%s.html' % (self.base_link,
                                  r[0].attrs['href'].replace(
                                      'serie', 'episode'), episode)
         return url
     except:
         return
Example #13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            # if (self.user != '' and self.password != ''): #raise Exception()

                # login = urlparse.urljoin(self.base_link, '/login.html')

                # post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

                # cookie = client.request(login, post=post, output='cookie', close=False)

                # r = client.request(login, post=post, cookie=cookie, output='extended')

                # headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            # else:
                # headers = {}


            headers = {'User-Agent': client.randomagent()}
            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                year = data['year']
                def searchname(r):
                    r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                    r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                    r = [] if r == [] else [i[0] for i in r][0]
                    return r
                
                if 'tvshowtitle' in data:
                    link = urlparse.urljoin(self.base_link, 'tvshow-%s.html' %title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                            r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                else:
                    link = urlparse.urljoin(self.base_link, 'movies-%s.html' %title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            log_utils.log('shit Returned: %s' % str('in loop'), log_utils.LOGNOTICE)
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                            r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                        
                    

                # leaving old search in for if streamlord renables searching on the site
                # query = urlparse.urljoin(self.base_link, self.search_link)

                # post = urllib.urlencode({'searchapi2': title})

                # r = client.request(query, post=post, headers=headers)

                # if 'tvshowtitle' in data:
                    # r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    # r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                # else:
                    # r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    # r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                # r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                # r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                # r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower()][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)



            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url =  r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',r,re.DOTALL)[0][1]

            sources.append({'source': 'cdn', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': True})

            return sources
        except:
            return sources
Example #14
0
    def movie(self, imdb, title, localtitle, aliases, year):
        '''
        Takes movie information and returns a set name value pairs, encoded as
        url params. These params include ts
        (a unqiue identifier, used to grab sources) and list of source ids

        Keyword arguments:

        imdb -- string - imdb movie id
        title -- string - name of the movie
        localtitle -- string - regional title of the movie
        year -- string - year the movie was released

        Returns:

        url -- string - url encoded params

        '''
        try:
            clean_title = cleantitle.geturl(title).replace('-', '+')
            query = (self.search_path % clean_title)
            url = urlparse.urljoin(self.base_link, query)

            search_response = client.request(url)

            r = client.parseDOM(search_response,
                                'div',
                                attrs={'class': 'row movie-list'})[0]

            r = dom_parser.parse_dom(r, 'a', req='href')
            url = [(i.attrs['href']) for i in r
                   if cleantitle.get(title) in cleantitle.get(i.content)][0]

            r = client.request(url)
            quality = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0]
            r = client.parseDOM(r, 'div', attrs={'class': 'mt row'})[0]
            sources_list = []
            try:
                if client.parseDOM(r, 'div', ret='data-streamgo')[0]:
                    sources_list.append(
                        'https://streamgo.me/player/%s' %
                        client.parseDOM(r, 'div', ret='data-streamgo')[0])
            except Exception:
                pass
            try:
                if client.parseDOM(r, 'div', ret='data-server_openload')[0]:
                    sources_list.append(
                        'https://openload.co/embed/%s' % client.parseDOM(
                            r, 'div', ret='data-server_openload')[0])
            except Exception:
                pass
            data = {
                'imdb': imdb,
                'title': title,
                'localtitle': localtitle,
                'year': year,
                'quality': quality,
                'sources': sources_list
            }
            url = urllib.urlencode(data)

            return url

        except:
            return
Example #15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                u = urlparse.urljoin(self.base_link, self.info_link % mid)
                quality = client.request(u, headers=headers)
                quality = dom_parser.parse_dom(quality,
                                               'div',
                                               attrs={'class': 'jtip-quality'
                                                      })[0].content
                if quality == "HD":
                    quality = "720p"
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            if eid[1] != '6':
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % eid[0])
                                link = client.request(url)
                                link = json.loads(link)['src']
                                valid, host = source_utils.is_host_valid(
                                    link, hostDict)
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'info': [],
                                    'direct': False,
                                    'debridonly': False
                                })
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid))
                                script = client.request(url)
                                if '$_$' in script:
                                    params = self.uncensored1(script)
                                elif script.startswith(
                                        '[]') and script.endswith('()'):
                                    params = self.uncensored2(script)
                                elif '_x=' in script:
                                    x = re.search('''_x=['"]([^"']+)''',
                                                  script).group(1)
                                    y = re.search('''_y=['"]([^"']+)''',
                                                  script).group(1)
                                    params = {'x': x, 'y': y}
                                else:
                                    raise Exception()

                                u = urlparse.urljoin(
                                    self.base_link, self.source_link %
                                    (eid[0], params['x'], params['y']))
                                r = client.request(u, XHR=True)
                                url = json.loads(r)['playlist'][0]['sources']
                                url = [i['file'] for i in url if 'file' in i]
                                url = [directstream.googletag(i) for i in url]
                                url = [i[0] for i in url if i]

                                for s in url:
                                    if 'lh3.googleusercontent.com' in s['url']:
                                        s['url'] = directstream.googleredirect(
                                            s['url'])

                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': s['quality'],
                                        'language': 'en',
                                        'url': s['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Example #16
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        '''
        Takes episode information, finds the ts and list sources, encodes it as
        name value pairs, and returns a string of url params

        Keyword arguments:

        url -- string - url params
        imdb -- string - imdb tv show id
        tvdb -- string - tvdb tv show id
        title -- string - episode title
        premiered -- string - date the episode aired (format: year-month-day)
        season -- string - the episodes season
        episode -- string - the episode number

        Returns:

        url -- string - url encoded params

        '''
        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            clean_title = cleantitle.geturl(data['tvshowtitle']).replace(
                '-', '+')
            query = (self.search_path % clean_title)
            url = urlparse.urljoin(self.base_link, query)

            search_response = client.request(url)

            r = client.parseDOM(search_response,
                                'div',
                                attrs={'class': 'row movie-list'})[0]

            r = dom_parser.parse_dom(r, 'a', req='href')
            url = [(i.attrs['href']) for i in r if '%s - Season %01d' %
                   (data['tvshowtitle'], int(season)) in i.content][0]

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs={'id': 'player'})[0]

            url = client.parseDOM(r, 'a', ret='href')[0]
            film_response = client.request(url)

            servers = client.parseDOM(film_response,
                                      'div',
                                      attrs={'id': 'servers'})[0]
            r = dom_parser.parse_dom(servers, 'a', req='title')

            url = [(i) for i in r
                   if 'Episode %02d' % (int(episode)) in i.attrs['title']]
            sources_list = []

            for i in url:
                try:
                    if i.attrs['data-streamgo']:
                        sources_list.append('https://streamgo.me/player/%s' %
                                            i.attrs['data-streamgo'])
                except Exception:
                    pass
                try:
                    if i.attrs['data-openload']:
                        sources_list.append('https://openload.co/embed/%s' %
                                            i.attrs['data-openload'])
                except Exception:
                    pass
            quality = client.parseDOM(film_response,
                                      'span',
                                      attrs={'class': 'quality'})[0]

            data.update({
                'title': title,
                'premiered': premiered,
                'season': season,
                'episode': episode,
                'quality': quality,
                'sources': sources_list
            })

            url = urllib.urlencode(data)

            return url

        except:
            return
Example #17
0
 def resolve(self, url):
     r = client.request(url)
     r = dom_parser.parse_dom(r, 'div', {'class': 'link_under_video'})
     r = dom_parser.parse_dom(r, 'a', req='href')
     return r[0].attrs['href']