Example #1
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            headers = {'User-Agent': client.randomagent()}
            html = client.request(url, headers=headers)

            Links = re.compile('id="link_.+?target="_blank" id="(.+?)"',
                               re.DOTALL).findall(html)
            for vid_url in Links:
                quality, info = source_utils.get_release_quality(
                    vid_url, vid_url)
                host = vid_url.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': vid_url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            try: import xbmc ; ip = xbmc.getIPAddress()
            except: ip = 'London'

            referer = 'http://www.einthusan.com/movies/watch.php?id=%s' % url

            agent = client.randomagent()

            headers = {'User-Agent': agent, 'Referer': referer}

            url = 'http://cdn.einthusan.com/geturl/%s/hd/%s/' % (url, ip)

            url = client.request(url, headers=headers)

            url +='|%s' % urllib.urlencode({'User-agent': agent})

            sources.append({'source': 'einthusan', 'quality': 'HD', 'provider': 'Einthusan', 'url': url, 'direct': True, 'debridonly': False})
            return sources
        except:
            return sources
Example #3
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            session = self._createSession(randomagent())

            lowerTitle = title.lower()
            stringConstant, searchHTML = self._getSearch(lowerTitle, session)

            possibleTitles = set((lowerTitle, ) + tuple(
                (alias['title'].lower()
                 for alias in aliases) if aliases else ()))
            soup = BeautifulSoup(searchHTML,
                                 'html.parser',
                                 parse_only=SoupStrainer('div',
                                                         recursive=False))
            for div in soup:
                if div.span and (year
                                 in div.span.text) and (div.a.text.lower()
                                                        in possibleTitles):
                    return {
                        'type': 'movie',
                        'pageURL': self.BASE_URL + div.a['href'],
                        'sConstant': stringConstant,
                        'UA': session.headers['User-Agent'],
                        'cookies': session.cookies.get_dict()
                    }
            return None  # No results found.
        except:
            self._logException()
            return None
Example #4
0
def sucuri(url, timeout):
    try:
        h = client.randomagent()

        r = client.request(url,
                           headers={'User-Agent': h},
                           timeout=timeout,
                           error=True)

        s = re.compile("S\s*=\s*'([^']+)").findall(r)[0]
        s = base64.b64decode(s)
        s = s.replace(' ', '')
        s = re.sub('String\.fromCharCode\(([^)]+)\)', r'chr(\1)', s)
        s = re.sub('\.slice\((\d+),(\d+)\)', r'[\1:\2]', s)
        s = re.sub('\.charAt\(([^)]+)\)', r'[\1]', s)
        s = re.sub('\.substr\((\d+),(\d+)\)', r'[\1:\1+\2]', s)
        s = re.sub(';location.reload\(\);', '', s)
        s = re.sub(r'\n', '', s)
        s = re.sub(r'document\.cookie', 'cookie', s)

        cookie = ''
        exec(s)
        c = re.compile('([^=]+)=(.*)').findall(cookie)[0]
        c = '%s=%s' % (c[0], c[1])

        return {'User-Agent': h, 'Cookie': c}
    except:
        pass
Example #5
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)
            url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])

            headers = {'Referer': ref, 'User-Agent': client.randomagent()}

            result = client.request(url, headers=headers, post='')
            result = base64.decodestring(result)
            result = json.loads(result).get('playinfo', [])

            if isinstance(result, basestring):
                result = result.replace('embed.html', 'index.m3u8')

                base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '', result)

                r = client.request(result, headers=headers)
                r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i]
                r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r]
                r = [{'quality': i[0], 'url': base_url+i[1]} for i in r]
                for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False})
            elif result:
                result = [i.get('link_mp4') for i in result]
                result = [i for i in result if i]
                for i in result:
                    try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False})
                    except: pass

            return sources
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            year = url['year']
            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(url['title']).replace('-', '+')
            url = urlparse.urljoin(self.base_link, self.search_link % title)
            r = requests.get(url, headers=h)
            r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'})
            r = r.find('a')['href']
            r = requests.get(r, headers=h)
            r = BeautifulSoup(r.content, 'html.parser')
            quality = r.find('span', {'class': 'calidad2'}).text
            url = r.find('div', {'class':'movieplay'}).find('iframe')['src']
            if not quality in ['1080p', '720p']:
                quality = 'SD'

            valid, host = source_utils.is_host_valid(url, hostDict)
            sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
            return sources
        except:
            print("Unexpected error in Furk Script: check_api", sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
Example #7
0
    def episode(self, data, imdb, tvdb, title, premiered, season, episode):
        try:
            session = self._createSession(randomagent())

            # Search with the TV show name and season number string.
            lowerTitle = data
            stringConstant, searchHTML = self._getSearch(
                lowerTitle + ' ' + season, session)

            soup = BeautifulSoup(searchHTML, 'html.parser')
            for div in soup.findAll('div', recursive=False):
                resultName = div.a.text.lower()
                if lowerTitle in resultName and season in resultName:
                    return {
                        'type': 'episode',
                        'episode': episode,
                        'pageURL': self.BASE_URL + div.a['href'],
                        'sConstant': stringConstant,
                        'UA': session.headers['User-Agent'],
                        'cookies': session.cookies.get_dict()
                    }
            return None  # No results found.
        except:
            self._logException()
            return None
Example #8
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
#           search_id = title.lower().replace(':', ' ').replace('-', ' ') # see __init__

#           start_url = urlparse.urljoin(self.base_link, (self.search_link % (search_id.replace(' ','%20'))))         
            start_url = urlparse.urljoin(self.base_link, (self.search_link % (title.replace(' ','%20'))))  
            
            headers={'User-Agent':client.randomagent()}
            html = client.request(start_url,headers=headers)    		
            
            match = re.compile('<span class="name"><a title="(.+?)" href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
            for name,item_url, link_year in match:
                if year in link_year:                                                        
                    if cleantitle.get(title) in cleantitle.get(name):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))

        self.domains = ['solarmoviez.to', 'solarmoviez.ru']
        self.base_link = 'https://solarmoviez.ru'
        self.search_link = '/movie/search/%s.html'
        self.info_link = '/ajax/movie_info/%s.html?is_login=false'
        self.server_link = '/ajax/v4_movie_episodes/%s'
        self.embed_link = '/ajax/movie_embed/%s'
        self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
        self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
Example #9
0
def get2(url, check, headers=None, data=None):
    if headers is None:
        headers = {
            'User-Agent': client.randomagent(),
        }
        try:
            request = urllib2.Request(url, headers=headers, data=data)
            html = urllib2.urlopen(request, timeout=10).read()
            if check in str(html): return html
        except:
            pass

    try:
        new_url = get_proxy_url() % urllib.quote_plus(url)
        headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
        request = urllib2.Request(new_url, headers=headers)
        response = urllib2.urlopen(request, timeout=10)
        html = response.read()
        response.close()
        if check in html: return html
    except:
        pass

    try:
        new_url = get_proxy_url() % urllib.quote_plus(url)
        headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
        request = urllib2.Request(new_url, headers=headers)
        html = urllib2.urlopen(request, timeout=10).read()
        if check in html: return html
    except:
        pass

    return
Example #10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)
            url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])

            headers = {'Referer': ref, 'User-Agent': client.randomagent()}

            result = client.request(url, headers=headers, post='')
            result = base64.decodestring(result)
            result = json.loads(result).get('playinfo', [])

            if isinstance(result, basestring):
                result = result.replace('embed.html', 'index.m3u8')

                base_url = re.sub('index\.m3u8\?token=[\w\-]+', '', result)

                r = client.request(result, headers=headers)
                r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i]
                r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r]
                r = [{'quality': i[0], 'url': base_url+i[1]} for i in r]
                for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False})
            elif result:
                result = [i.get('link_mp4') for i in result]
                result = [i for i in result if i]
                for i in result:
                    try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False})
                    except: pass

            return sources
        except:
            return
Example #11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         h = {'User-Agent': client.randomagent()}
         html = self.scraper.get(url, headers=h).content
         qual = re.compile('<span class="calidad2">(.+?)</span>',
                           flags=re.DOTALL | re.IGNORECASE).findall(html)[0]
         links = re.compile('<iframe src="(.+?)"',
                            flags=re.DOTALL | re.UNICODE | re.MULTILINE
                            | re.IGNORECASE).findall(html)
         for link in links:
             valid, host = source_utils.is_host_valid(link, hostDict)
             quality, info = source_utils.get_release_quality(qual, link)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': 'en',
                 'info': info,
                 'url': link,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            year = url['year']
            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(url['title']).replace('-', '+')
            url = urlparse.urljoin(self.base_link, self.search_link % title)
            r = self.scraper.get(url, headers=h)
            r = BeautifulSoup(r.text,
                              'html.parser').find('div', {'class': 'item'})
            r = r.find('a')['href']
            r = self.scraper.get(r, headers=h)
            r = BeautifulSoup(r.content, 'html.parser')
            quality = r.find('span', {'class': 'calidad2'}).text
            url = r.find('div', {'class': 'movieplay'}).find('iframe')['src']
            if not quality in ['1080p', '720p']:
                quality = 'SD'

            valid, host = source_utils.is_host_valid(url, hostDict)
            sources.append({
                'source': host,
                'quality': quality,
                'language': 'en',
                'url': url,
                'direct': False,
                'debridonly': False
            })
            return sources
        except:
            return sources
Example #13
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
#           search_id = title.lower().replace(':', ' ').replace('-', ' ') # see __init__

#           start_url = urlparse.urljoin(self.base_link, (self.search_link % (search_id.replace(' ','%20'))))         
            start_url = urlparse.urljoin(self.base_link, (self.search_link % (title.replace(' ','%20'))))  
            
            headers={'User-Agent':client.randomagent()}
            html = client.request(start_url,headers=headers)    		
            
            match = re.compile('<span class="name"><a title="(.+?)" href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
            for name,item_url, link_year in match:
                if year in link_year:                                                        
                    if cleantitle.get(title) in cleantitle.get(name):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))

        self.domains = ['solarmoviez.to', 'solarmoviez.ru']
        self.base_link = 'https://solarmoviez.ru'
        self.search_link = '/movie/search/%s.html'
        self.info_link = '/ajax/movie_info/%s.html?is_login=false'
        self.server_link = '/ajax/v4_movie_episodes/%s'
        self.embed_link = '/ajax/movie_embed/%s'
        self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
        self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
Example #14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            year = data['year']
            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(data['title']).replace('-', '_')
            url = urlparse.urljoin(self.base_link, self.search_link %(title, year))
            r = client.request(url, headers=h)
            vidlink = re.findall('d\/(.+)"',r)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'title'})
            if '1080p' in r[0].content:
                quality = '1080p'
            elif '720p' in r[0].content:
                quality = '720p'
            else:
                quality = 'SD'
            u = 'https://vidlink.org/streamdrive/info/%s' % vidlink[0]
            r = client.request(u, headers=h)
            r = json.loads(r)
            for i in r:
                try: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})
                except: pass
            return sources
        except:
            return sources
Example #15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            try:
                import xbmc
                ip = xbmc.getIPAddress()
            except:
                ip = 'London'

            referer = 'http://www.einthusan.com/movies/watch.php?id=%s' % url

            agent = client.randomagent()

            headers = {'User-Agent': agent, 'Referer': referer}

            url = 'http://cdn.einthusan.com/geturl/%s/hd/%s/' % (url, ip)

            url = client.request(url, headers=headers)

            url += '|%s' % urllib.urlencode({'User-agent': agent})

            sources.append({
                'source': 'einthusan',
                'quality': 'HD',
                'provider': 'Einthusan',
                'url': url,
                'direct': True,
                'debridonly': False
            })
            return sources
        except:
            return sources
Example #16
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            session = self._createSession(randomagent())

            lowerTitle = title.lower()
            stringConstant, searchHTML = self._getSearch(lowerTitle, session)

            possibleTitles = set(
                (lowerTitle,) + tuple((alias['title'].lower() for alias in aliases) if aliases else ())
            )
            soup = BeautifulSoup(searchHTML, 'html.parser')
            for div in soup.findAll('div', recursive=False):
                if div.span and year in div.span.text and div.a.text.lower() in possibleTitles:
                    # The return value doesn't need to be url-encoded or even a string. Exodus forks accept
                    # anything that can be converted with repr() and be stored in the local database.
                    return {
                        'type': 'movie',
                        'pageURL': self.BASE_URL + div.a['href'],
                        'sConstant': stringConstant,
                        'UA': session.headers['User-Agent'],
                        'cfCookies': self._cloudflareCookiesToDict(session)
                    }
            return None # No results found.
        except:
            self._logException()
            return None
Example #17
0
def get2(url, check, headers=None, data=None):
    if headers is None:
        headers = {
            'User-Agent': client.randomagent(),
        }
        try:
            request = urllib2.Request(url, headers=headers, data=data)
            html = urllib2.urlopen(request, timeout=10).read()
            if check in str(html): return html
        except:
            pass

    try:
        new_url = get_proxy_url() % urllib.quote_plus(url)
        headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
        request = urllib2.Request(new_url, headers=headers)
        response = urllib2.urlopen(request, timeout=10)
        html = response.read()
        response.close()
        if check in html: return html
    except:
        pass

    try:
        new_url = get_proxy_url() % urllib.quote_plus(url)
        headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
        request = urllib2.Request(new_url, headers=headers)
        html = urllib2.urlopen(request, timeout=10).read()
        if check in html: return html
    except:
        pass

    return
Example #18
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'returnpath': '/'}
            post = urllib.urlencode(post)

            headers = {'User-Agent':client.randomagent()}
            rlogin = client.request(login, headers=headers, post=post, output='extended')
            guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0]
            headers['Cookie'] += '; '+guid
            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, headers=headers)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url, headers=headers)

            try:
                url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
                for i in url:
                    try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
                    except: pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')
                url += re.findall('src\s*:\s*\'(.*?)\'', result)
                url = [i for i in url if '://' in i]
                links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
            except:
                pass

            for i in links:
                sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Example #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'returnpath': '/'}
            post = urllib.urlencode(post)

            headers = {'User-Agent':client.randomagent()}
            rlogin = client.request(login, headers=headers, post=post, output='extended')
            guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0]
            headers['Cookie'] += '; '+guid
            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, headers=headers)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url, headers=headers)

            try:
                url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")', result)
                for i in url:
                    try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i})
                    except: pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')
                url += re.findall('src\s*:\s*\'(.*?)\'', result)
                url = [i for i in url if '://' in i]
                links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
            except:
                pass

            for i in links:
                sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Example #20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            url = re.sub('/watching.html$', '', url.strip('/'))
            url = url + '/watching.html'

            p = client.request(url)

            if episode > 0:
                r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
                r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0]) for i in r]
                url = [i[0] for i in r if int(i[1]) == episode][0]
                p = client.request(url, headers=headers, timeout='10')

            referer = url

            id = re.findall('load_player\(.+?(\d+)', p)[0]
            r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3?id=%s' % id)
            r = client.request(r, referer=referer, XHR=True)

            url = json.loads(r)['value']

            if (url.startswith('//')):
                url = 'https:' + url

            r = client.request(url, referer=referer, XHR=True)

            headers = {
                'User-Agent': client.randomagent(),
                'Referer': referer
            }

            headers = '|' + urllib.urlencode(headers)

            source = str(json.loads(r)['playlist'][0]['file']) + headers

            sources.append({'source': 'CDN', 'quality': 'HD', 'language': 'en', 'url': source, 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], data['year'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            if url == None: return sources

            #url = urlparse.urljoin(self.base_link, url)
            url = re.sub('/watching.html$', '', url.strip('/'))
            url = url + '/watching.html'

            p = self.scraper.get(url).content

            if episode > 0:
                r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
                r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0]) for i in r]
                url = [i[0] for i in r if int(i[1]) == episode][0]
                p = self.scraper.get(url, headers=headers).content

            referer = url
            headers = {
                'User-Agent': client.randomagent(),
                'Referer': url
            }

            id = re.findall('load_player\(.+?(\d+)', p)[0]
            r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3?id=%s' % id)
            r = self.scraper.get(r, headers=headers).content

            url = json.loads(r)['value']

            if (url.startswith('//')):
                url = 'https:' + url

            r = self.scraper.get(url, headers=headers).content


            headers = '|' + urllib.urlencode(headers)

            source = str(json.loads(r)['playlist'][0]['file']) + headers

            sources.append({'source': 'CDN', 'quality': 'HD', 'language': 'en', 'url': source, 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
Example #22
0
    def __get_cookies(self, url):
        h = {'User-Agent': client.randomagent()}

        c = client.request(url, headers=h, output='cookie')
        c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url)
        c = client.request(url, cookie=c, headers=h, referer=url, output='cookie')

        return c, h
Example #23
0
    def __get_cookies(self, url):
        h = {'User-Agent': client.randomagent()}

        c = client.request(url, headers=h, output='cookie')
        c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url)
        c = client.request(url, cookie=c, headers=h, referer=url, output='cookie')

        return c, h
Example #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            year = data['year']

            h = {'User-Agent': client.randomagent()}

            v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year)

            url = '/watch_%s.html' % v
            url = urlparse.urljoin(self.base_link, url)

            c = client.request(url, headers=h, output='cookie')
            c = client.request(urlparse.urljoin(self.base_link, '/av'),
                               cookie=c,
                               output='cookie',
                               headers=h,
                               referer=url)
            #c = client.request(url, cookie=c, headers=h, referer=url, output='cookie')

            post = urllib.urlencode({'v': v})
            u = urlparse.urljoin(self.base_link, '/video_info/frame')

            #r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url)
            r = client.request(u, post=post, headers=h, XHR=True, referer=url)
            r = json.loads(r).values()
            r = [urllib.unquote(i.split('url=')[-1]) for i in r]

            for i in r:
                try:
                    sources.append({
                        'source':
                        'gvideo',
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'language':
                        'en',
                        'url':
                        i,
                        'direct':
                        True,
                        'debridonly':
                        False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #25
0
    def sources(self, url, hostDict, locDict):
        sources = []
        req = requests.Session()
        headers = {'User-Agent': client.randomagent(), 'Origin': 'http://imdark.com', 'Referer': 'http://imdark.com',
                   'X-Requested-With': 'XMLHttpRequest'}

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = urllib.quote_plus(title).lower()
            result = req.get(self.base_link, headers=headers).text
            darksearch = re.findall(r'darkestsearch" value="(.*?)"', result)[0]

            result = req.get(self.base_link + self.search_link % (query, darksearch), headers=headers).text

            r = client.parseDOM(result, 'div', attrs={'id':'showList'})
            r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])     
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
            url = r[0]
            print("INFO - " + url)
            result = req.get(url, headers=headers).text
            nonce = re.findall(r"nonce = '(.*?)'", result)[0]
            tipi = re.findall(r'tipi = (.*?);', result)[0]
            postData = {'action':'getitsufiplaying', 'tipi':tipi, 'jhinga':nonce}
            result = req.post(self.base_link + self.ajax_link, data=postData, headers=headers).text
            r = re.findall(r'"src":"(.*?)","type":"(.*?)","data-res":"(\d*?)"', result)
            linkHeaders = 'Referer=http://imdark.com/&User-Agent=' + urllib.quote(client.randomagent()) + '&Cookie=' + urllib.quote('mykey123=mykeyvalue')
            for i in r:
                print(str(i))
                try:
                    q = source_utils.label_to_quality(i[2])
                    sources.append({'source': 'CDN', 'quality': q, 'info': i[1].replace('\\', ''), 'language': 'en',
                                    'url': i[0].replace('\\','') + '|' + linkHeaders,
                                    'direct': True, 'debridonly': False})
                except:
                    traceback.print_exc()
                    pass
            for i in sources:
                print("INFO SOURCES " + str(i))
            return sources
        except:
            traceback.print_exc()
            return sources
Example #26
0
    def sources(self, url, hostDict, locDict):
        sources = []
        req = requests.Session()
        headers = {'User-Agent': client.randomagent(), 'Origin': 'http://imdark.com', 'Referer': 'http://imdark.com',
                   'X-Requested-With': 'XMLHttpRequest'}

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = urllib.quote_plus(title).lower()
            result = req.get(self.base_link, headers=headers).text
            darksearch = re.findall(r'darkestsearch" value="(.*?)"', result)[0]

            result = req.get(self.base_link + self.search_link % (query, darksearch), headers=headers).text

            r = client.parseDOM(result, 'div', attrs={'id':'showList'})
            r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])     
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
            url = r[0]
            print("INFO - " + url)
            result = req.get(url, headers=headers).text
            nonce = re.findall(r"nonce = '(.*?)'", result)[0]
            tipi = re.findall(r'tipi = (.*?);', result)[0]
            postData = {'action':'getitsufiplaying', 'tipi':tipi, 'jhinga':nonce}
            result = req.post(self.base_link + self.ajax_link, data=postData, headers=headers).text
            r = re.findall(r'"src":"(.*?)","type":"(.*?)","data-res":"(\d*?)"', result)
            linkHeaders = 'Referer=http://imdark.com/&User-Agent=' + urllib.quote(client.randomagent()) + '&Cookie=' + urllib.quote('mykey123=mykeyvalue')
            for i in r:
                print(str(i))
                try:
                    q = source_utils.label_to_quality(i[2])
                    sources.append({'source': 'CDN', 'quality': q, 'info': i[1].replace('\\', ''), 'language': 'en',
                                    'url': i[0].replace('\\','') + '|' + linkHeaders,
                                    'direct': True, 'debridonly': False})
                except:
                    traceback.print_exc()
                    pass
            for i in sources:
                print("INFO SOURCES " + str(i))
            return sources
        except:
            traceback.print_exc()
            return sources
Example #27
0
 def resolve(self, url):
     if 'streamty.com' in url:
         h = {'User-Agent': client.randomagent()}
         html = self.scraper.get(url, headers=h).content
         packed = find_match(data, "text/javascript'>(eval.*?)\s*</script>")
         unpacked = jsunpack.unpack(packed)
         link = find_match(unpacked, 'file:"([^"]+)"')[0]
         return link
     return url
 def request(self, url):
     try:
         req = urllib2.Request(url)
         req.add_header('User-Agent', client.randomagent())
         res = urllib2.urlopen(req)
         r = res.read() if not res.info().getheader('Content-Encoding') == 'gzip' else gzip.GzipFile(fileobj=StringIO.StringIO(res.read())).read()
         res.close()
         return r
     except:
         return
Example #29
0
def get_tenies_online_links(url):
    urls = []

    headers = {'User-Agent': client.randomagent(), 'Referer': url}
    r = client.request(url)
    try:
        frames = client.parseDOM(r, 'div', {'id': 'playeroptions'})[0]
        frames = dom.parse_dom(frames,
                               'li',
                               attrs={'class': 'dooplay_player_option'},
                               req=['data-post', 'data-nume', 'data-type'])
        for frame in frames:
            post = 'action=doo_player_ajax&post=%s&nume=%s&type=%s' % \
                   (frame.attrs['data-post'], frame.attrs['data-nume'], frame.attrs['data-type'])
            if '=trailer' in post: continue
            p_link = 'https://tenies-online.gr/wp-admin/admin-ajax.php'

            flink = client.request(p_link, post=post, headers=headers)
            flink = client.parseDOM(flink, 'iframe', ret='src')[0]

            host = __top_domain(flink)
            urls.append((flink, host))
        xbmc.log('FRAMES-LINKs: %s' % urls)
    except BaseException:
        pass

    try:
        extra = client.parseDOM(r, 'div', attrs={'class': 'links_table'})[0]
        extra = dom.parse_dom(extra, 'td')
        extra = [
            dom.parse_dom(i.content, 'img', req='src') for i in extra if i
        ]
        extra = [(i[0].attrs['src'],
                  dom.parse_dom(i[0].content, 'a', req='href')) for i in extra
                 if i]
        extra = [(re.findall('domain=(.+?)$', i[0])[0], i[1][0].attrs['href'])
                 for i in extra if i]
        for item in extra:
            url = item[1]
            if 'paidikestainies' in url:
                continue
            if 'tenies-online' in url:
                url = client.request(url, output='geturl', redirect=True)
            else:
                url = url

            host = item[0]

            urls.append((url, host))
        xbmc.log('EXTRA-LINKs: %s' % urls)
    except BaseException:
        pass

    return urls
Example #30
0
 def request(self, url):
     try:
         req = urllib2.Request(url)
         req.add_header('User-Agent', client.randomagent())
         res = urllib2.urlopen(req)
         r = res.read() if not res.info().getheader(
             'Content-Encoding') == 'gzip' else gzip.GzipFile(
                 fileobj=StringIO.StringIO(res.read())).read()
         res.close()
         return r
     except:
         return
Example #31
0
 def _createSession(self, customHeaders={}):
     # Create a 'requests.Session' and try to spoof a header from a web browser.
     session = requests.Session()
     session.headers.update(
         {
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'User-Agent': customHeaders.get('UA', randomagent()),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': customHeaders.get('referer', self.BASE_URL + '/'),
             'DNT': '1'
         }
     )
     return session
Example #32
0
 def _createSession(self, customHeaders={}):
     # Create a 'requests.Session' and try to spoof a header from a web browser.
     session = requests.Session()
     session.headers.update(
         {
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'User-Agent': customHeaders.get('UA', randomagent()),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': customHeaders.get('referer', self.BASE_URL + '/'),
             'DNT': '1'
         }
     )
     return session
Example #33
0
    def sources(self, url, hostDict, hostprDict):
        try:

            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            h = {'User-Agent': client.randomagent()}
            
            result = client.request(url, output='extended', headers=h)
            cookie = result[4]
            ajax_prov = client.parseDOM(result[0], 'meta', attrs={'property': 'provision'}, ret='content')[0]
            
            ajax_url = urlparse.urljoin(self.base_link, self.ajax_link) % ajax_prov
            h['X-CSRFToken']=re.findall ('csrftoken=(.*?);', cookie)[0]
            result = client.request(ajax_url, cookie=cookie, XHR=True, headers=h)
            
            r = client.parseDOM(result, 'div', attrs={'class':'host-container pull-left'})
            r = [(client.parseDOM(i, 'div', attrs={'class': 'url'}, ret='data-url'),
                  client.parseDOM(i, 'span', attrs={'class':'label label-default'}),
                  client.parseDOM(i, 'img', attrs={'class': 'ttip'}, ret='title'),
                  client.parseDOM(i, 'span', attrs={'class': 'glyphicon glyphicon-hd-video ttip'}, ret='title'),
                  ) for i in r]

            r = [(self.html_parser.unescape(i[0][0]), i[1][0], i[2][0], len(i[3]) > 0) for i in r]
            r = [(client.parseDOM(i[0], 'iframe', ret='src'), i[1], i[2], i[3]) for i in r]
            r = [(i[0][0], i[1], i[2], i[3]) for i in r if len(i[0]) > 0]

            for i in r:
                try:

                    host = urlparse.urlparse(i[0]).netloc
                    host = host.replace('www.', '').replace('embed.', '')
                    host = host.lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    lang, info = self.get_lang_by_type(i[1])

                    q = 'SD'
                    if 'Wysoka' in i[2]: q = 'HD'
                    if i[3] == True: q = '1080p'

                    sources.append({'source': host, 'quality': q, 'language': lang, 'url': i[0], 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #34
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         headers = {'User-Agent': client.randomagent()}
         tvtitle = cleantitle.geturl(tvshowtitle)
         url = self.base_link + self.search_link % tvtitle
         r = client.request(url, headers=headers, timeout='5')
         u = client.parseDOM(r, "div", attrs={"class": "ml-item"})
         for i in u:
             t = re.compile('<a href="(.+?)"').findall(i)
             for r in t:
                 if cleantitle.get(tvtitle) in cleantitle.get(r):
                     return source_utils.strip_domain(url)
     except:
         return
Example #35
0
def get_raw(url, headers=None, data=None):
    if headers is None:
        headers = {
            'User-Agent': client.randomagent(),
        }

    try:
        new_url = get_proxy_url() % urllib.quote_plus(url)
        headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
        request = urllib2.Request(new_url, headers=headers)
        response = urllib2.urlopen(request, timeout=10)
        return response
    except:
        pass
Example #36
0
def cloudflare_mode(url):
    headers = {'User-Agent': client.randomagent()}
    result = client.request(url, headers=headers)
    # from cloudscraper2 import CloudScraper
    # import requests
    # scraper = CloudScraper.create_scraper()
    # ua = client.agent()
    # scraper.headers.update({'User-Agent': ua})
    # cookies = scraper.get(url).cookies.get_dict()
    # headers = {'User-Agent': ua}
    # req = requests.get(url, cookies=cookies, headers=headers)
    # result = req.text
    #xbmc.log('RESULTTTTT: %s' % result)
    return result
Example #37
0
def get_raw(url, headers=None, data=None):
    if headers is None:
        headers = {
            'User-Agent': client.randomagent(),
        }

    try:
        new_url = get_proxy_url() % urllib.quote_plus(url)
        headers['Referer'] = 'http://%s/' % urlparse.urlparse(new_url).netloc
        request = urllib2.Request(new_url, headers=headers)
        response = urllib2.urlopen(request, timeout=10)
        return response
    except:
        pass
Example #38
0
 def _createSession(self, userAgent=None, cookies=None, referer=None):
     session = requests.Session()
     session.headers.update(
         {
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'User-Agent': userAgent if userAgent else randomagent(),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': referer if referer else self.BASE_URL + '/',
             'Upgrade-Insecure-Requests': '1',
             'DNT': '1'
         }
     )
     if cookies:
         session.cookies.update(cookies)
     return session
Example #39
0
 def _createSession(self, userAgent=None, cookies=None, referer=None):
     session = requests.Session()
     session.headers.update(
         {
             'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
             'User-Agent': userAgent if userAgent else randomagent(),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': referer if referer else self.BASE_URL + '/',
             'Upgrade-Insecure-Requests': '1',
             'DNT': '1'
         }
     )
     if cookies:
         session.cookies.update(cookies)
     return session
Example #40
0
 def _createSession(self, userAgent=None, cookies=None, referer=None):
     session = requests.Session()
     session.headers.update(
         {
             'Accept': self.DEFAULT_ACCEPT,
             'User-Agent': userAgent if userAgent else randomagent(),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': referer if referer else self.BASE_URL + '/',
             'DNT': '1'
         }
     )
     if cookies:
         session.cookies.update(cookies)
         session.cookies[''] = '__test'
     return session
Example #41
0
 def _createSession(self, userAgent=None, cookies=None, referer=None):
     # Try to spoof a header from a web browser.
     session = requests.Session()
     session.headers.update(
         {
             'Accept': self.DEFAULT_ACCEPT,
             'User-Agent': userAgent if userAgent else randomagent(),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': referer if referer else self.BASE_URL + '/',
             'DNT': '1'
         }
     )
     if cookies:
         session.cookies.update(cookies)
         session.cookies[''] = '__test' # See _getSearch() for more info on this.
     return session
Example #42
0
 def _createSession(self, userAgent=None, cookies=None, referer=None):
     # Try to spoof a header from a web browser.
     session = requests.Session()
     session.headers.update(
         {
             'Accept': self.DEFAULT_ACCEPT,
             'User-Agent': userAgent if userAgent else randomagent(),
             'Accept-Language': 'en-US,en;q=0.5',
             'Referer': referer if referer else self.BASE_URL + '/',
             'DNT': '1'
         }
     )
     if cookies:
         session.cookies.update(cookies)
         session.cookies[''] = '__test' # See _getSearch() for more info on this.
     return session
Example #43
0
    def __init__(self):
        self.priority = 1
        self.language = ['en']
        self.domains = ['ondarewatch.com', 'dailytvfix.com']
        self.base_link = 'http://www.dailytvfix.com'
        self.search_link = self.base_link + '/ajax/search.php'
        self.ua = client.randomagent()

        self.search_headers = {
            'Host': self.base_link.replace('http://', '', 1),
            'User-Agent': self.ua,
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'Referer': self.base_link + '/',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'X-Requested-With': 'XMLHttpRequest',
            'DNT': '1'
        }
Example #44
0
    def __init__(self):
        self.priority = 1
        self.language = ['en']
        self.domains = ['ondarewatch.com', 'dailytvfix.com']
        self.base_link = 'http://www.dailytvfix.com'
        self.search_link = self.base_link + '/ajax/search.php'
        self.ua = client.randomagent()

        self.search_headers = {
            'Host': self.base_link.replace('http://', '', 1),
            'User-Agent': self.ua,
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'Referer': self.base_link + '/',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'X-Requested-With': 'XMLHttpRequest',
            'DNT': '1'
        }
Example #45
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search_id = title.lower().replace(':', ' ').replace('-', ' ')

            start_url = urlparse.urljoin(self.base_link, (self.search_link % (search_id.replace(' ','%20'))))         
            
            headers={'User-Agent':client.randomagent()}
            html = client.request(start_url,headers=headers)     
            
            match = re.compile('<span class="name"><a title="(.+?)" href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
            for name,item_url, link_year in match:
                if year in link_year:                                                        
                    if cleantitle.get(title) in cleantitle.get(name):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            search_id = title.lower().replace(':', ' ').replace('-', ' ')

            start_url = urlparse.urljoin(self.base_link, (self.search_link % (search_id.replace(' ','%20'))))         
            
            headers={'User-Agent':client.randomagent()}
            html = client.request(start_url,headers=headers)     
            
            match = re.compile('<span class="name"><a title="(.+?)" href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(html)
            for name,item_url, link_year in match:
                if year in link_year:                                                        
                    if cleantitle.get(title) in cleantitle.get(name):
                        return item_url
            return
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return
Example #47
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources


            headers={'User-Agent':client.randomagent()}
            html = client.request(url,headers=headers)

            Links = re.compile('id="link_.+?target="_blank" id="(.+?)"',re.DOTALL).findall(html)
            for vid_url in Links:
                if 'openload' in vid_url:
                    try:
                        source_html   = client.request(vid_url,headers=headers)
                        source_string = re.compile('description" content="(.+?)"',re.DOTALL).findall(source_html)[0]
                        quality,info = source_utils.get_release_quality(source_string, vid_url)
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append({'source': 'Openload','quality': quality,'language': 'en','url':vid_url,'info':info,'direct': False,'debridonly': False})
                elif 'streamango' in vid_url:
                    try:  
                        source_html = client.request(vid_url,headers=headers)
                        source_string = re.compile('description" content="(.+?)"',re.DOTALL).findall(source_html)[0]
                        quality,info = source_utils.get_release_quality(source_string, vid_url)  
                    except:
                        quality = 'DVD'
                        info = []
                    sources.append({'source': 'Streamango','quality': quality,'language': 'en','url':vid_url,'info':info,'direct': False,'debridonly': False})
                else:
                    if resolveurl.HostedMediaFile(vid_url):
                        quality,info = source_utils.get_release_quality(vid_url, vid_url)  
                        host = vid_url.split('//')[1].replace('www.','')
                        host = host.split('/')[0].split('.')[0].title()
                        sources.append({'source': host,'quality': quality,'language': 'en','url':vid_url,'info':info,'direct': False,'debridonly': False})
            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMovie - Exception: \n' + str(failure))
            return sources
Example #48
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title'] ; year = data['year']

            h = {'User-Agent': client.randomagent()}

            v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year)

            url = '/watch_%s.html' % v
            url = urlparse.urljoin(self.base_link, url)

            c = client.request(url, headers=h, output='cookie')
            c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url)
            #c = client.request(url, cookie=c, headers=h, referer=url, output='cookie')

            post = urllib.urlencode({'v': v})
            u = urlparse.urljoin(self.base_link, '/video_info/frame')

            #r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url)
            r = client.request(u, post=post, headers=h, XHR=True, referer=url)
            r = json.loads(r).values()
            r = [urllib.unquote(i.split('url=')[-1])  for i in r]

            for i in r:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Example #49
0
    def episode(self, data, imdb, tvdb, title, premiered, season, episode):
        try:
            session = self._createSession(randomagent())

            # Search with the TV show name and season number string.
            lowerTitle = data
            stringConstant, searchHTML = self._getSearch(lowerTitle + ' ' + season, session)

            soup = BeautifulSoup(searchHTML, 'html.parser')
            for div in soup.findAll('div', recursive=False):
                resultName = div.a.text.lower()
                if lowerTitle in resultName and season in resultName:
                    return {
                        'type': 'episode',
                        'episode': episode,
                        'pageURL': self.BASE_URL + div.a['href'],
                        'sConstant': stringConstant,
                        'UA': session.headers['User-Agent'],
                        'cookies': session.cookies.get_dict()
                    }
            return None # No results found.
        except:
            self._logException()
            return None
Example #50
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            session = self._createSession(randomagent())

            lowerTitle = title.lower()
            stringConstant, searchHTML = self._getSearch(lowerTitle, session)

            possibleTitles = set(
                (lowerTitle,) + tuple((alias['title'].lower() for alias in aliases) if aliases else ())
            )
            soup = BeautifulSoup(searchHTML, 'html.parser', parse_only=SoupStrainer('div', recursive=False))
            for div in soup:
                if div.span and (year in div.span.text) and (div.a.text.lower() in possibleTitles):
                    return {
                        'type': 'movie',
                        'pageURL': self.BASE_URL + div.a['href'],
                        'sConstant': stringConstant,
                        'UA': session.headers['User-Agent'],
                        'cookies': session.cookies.get_dict()
                    }
            return None # No results found.
        except:
            self._logException()
            return None
Example #51
0
def sucuri(url, timeout):
    try:
        h = client.randomagent()

        r = client.request(url, headers={'User-Agent': h}, timeout=timeout, error=True)

        s = re.compile("S\s*=\s*'([^']+)").findall(r)[0]
        s = base64.b64decode(s)
        s = s.replace(' ', '')
        s = re.sub('String\.fromCharCode\(([^)]+)\)', r'chr(\1)', s)
        s = re.sub('\.slice\((\d+),(\d+)\)', r'[\1:\2]', s)
        s = re.sub('\.charAt\(([^)]+)\)', r'[\1]', s)
        s = re.sub('\.substr\((\d+),(\d+)\)', r'[\1:\1+\2]', s)
        s = re.sub(';location.reload\(\);', '', s)
        s = re.sub(r'\n', '', s)
        s = re.sub(r'document\.cookie', 'cookie', s)

        cookie = '' ; exec(s)
        c = re.compile('([^=]+)=(.*)').findall(cookie)[0]
        c = '%s=%s' % (c[0], c[1])

        return {'User-Agent': h, 'Cookie': c}
    except:
        pass
Example #52
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None:
                return sources

            if self.user == "" or self.password == "":
                raise Exception()

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            headers = {"X-Requested-With": "XMLHttpRequest"}
            login = urlparse.urljoin(self.base_link, "/login")
            post = {"username": self.user, "password": self.password, "action": "login"}
            post = urllib.urlencode(post)

            cookie = client.source(login, post=post, headers=headers, output="cookie")

            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = re.compile("embeds\[\d+\]\s*=\s*'([^']+)").findall(result)[0]
            url = client.parseDOM(url, "iframe", ret="src")[0]
            url = url.replace("https://", "http://")

            links = []

            try:
                url = re.compile("mplanet\*(.+)").findall(url)[0]
                url = url.rsplit("&")[0]
                dec = self._gkdecrypt(base64.b64decode("MllVcmlZQmhTM2swYU9BY0lmTzQ="), url)
                dec = directstream.google(dec)

                links += [(i["url"], i["quality"], "gvideo") for i in dec]
            except:
                pass

            try:
                result = client.source(url)

                result = re.compile("sources\s*:\s*\[(.*?)\]", re.DOTALL).findall(result)[0]
                result = re.compile(
                    """['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*([^'"]+)""", re.DOTALL
                ).findall(result)
            except:
                pass

            try:
                u = result[0][0]
                if not "download.php" in u and not ".live." in u:
                    raise Exception()
                o = urllib2.build_opener(NoRedirection)
                o.addheaders = [("User-Agent", client.randomagent()), ("Cookie", cookie)]
                r = o.open(u)
                try:
                    u = r.headers["Location"]
                except:
                    pass
                r.close()
                links += [(u, "1080p", "cdn")]
            except:
                pass
            try:
                u = [(i[0], re.sub("[^0-9]", "", i[1])) for i in result]
                u = [(i[0], i[1]) for i in u if i[1].isdigit()]
                links += [(i[0], "1080p", "gvideo") for i in u if int(i[1]) >= 1080]
                links += [(i[0], "HD", "gvideo") for i in u if 720 <= int(i[1]) < 1080]
                links += [(i[0], "SD", "gvideo") for i in u if 480 <= int(i[1]) < 720]
            except:
                pass

            for i in links:
                sources.append(
                    {
                        "source": i[2],
                        "quality": i[1],
                        "provider": "Moviesplanet",
                        "url": i[0],
                        "direct": True,
                        "debridonly": False,
                    }
                )

            return sources
        except:
            return sources
Example #53
0
def cloudflareAgent():
    return client.randomagent()
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response): return response


            headers = {'X-Requested-With': 'XMLHttpRequest'}
            login = urlparse.urljoin(self.base_link, '/login')
            post = {'username': self.user, 'password': self.password, 'action': 'login'}
            post = urllib.urlencode(post)

            cookie = client.source(login, post=post, headers=headers, output='cookie')


            url = urlparse.urljoin(self.base_link, url)

            result = client.source(url, cookie=cookie)

            url = re.compile("embeds\[\d+\]\s*=\s*'([^']+)").findall(result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                url = re.compile('mplanet\*(.+)').findall(url)[0]
                url = url.rsplit('&')[0]
                dec = self._gkdecrypt(base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), url)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            try:
                result = client.source(url)

                result = re.compile('sources\s*:\s*\[(.*?)\]', re.DOTALL).findall(result)[0]
                result = re.compile('''['"]*file['"]*\s*:\s*['"]*([^'"]+).*?['"]*label['"]*\s*:\s*['"]*([^'"]+)''', re.DOTALL).findall(result)
            except:
                pass

            try:
                u = result[0][0]
                if not 'download.php' in u and not '.live.' in u: raise Exception()
                o = urllib2.build_opener(NoRedirection)
                o.addheaders = [('User-Agent', client.randomagent()), ('Cookie', cookie)]
                r = o.open(u)
                try: u = r.headers['Location']
                except: pass
                r.close()
                links += [(u, '1080p', 'cdn')]
            except:
                pass
            try:
                u = [(i[0], re.sub('[^0-9]', '', i[1])) for i in result]
                u = [(i[0], i[1]) for i in u if i[1].isdigit()]
                links += [(i[0], '1080p', 'gvideo') for i in u if int(i[1]) >= 1080]
                links += [(i[0], 'HD', 'gvideo') for i in u if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD', 'gvideo') for i in u if 480 <= int(i[1]) < 720]
            except:
                pass


            for i in links: sources.append({'source': i[2], 'quality': i[1], 'provider': 'Moviesplanet', 'url': i[0], 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except: episode = None

            headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}

            for i in range(3):
                result = client.request(url)
                if not result == None: break

            if not episode == None:
                mid = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'phimid'})[0]
                url = urlparse.urljoin(self.base_link, '/ajax.php')
                post = {'ipos_server': 1, 'phimid': mid, 'keyurl': episode}
                post = urllib.urlencode(post)

                for i in range(3):
                    result = client.request(url, post=post, headers=headers, timeout='10')
                    if not result == None: break

            r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*server_line[^"]*'})

            links = []

            for u in r:
                try:
                    host = client.parseDOM(u, 'p', attrs = {'class': 'server_servername'})[0]
                    host = host.strip().lower().split(' ')[-1]

                    url = urlparse.urljoin(self.base_link, '/ip.temp/swf/plugins/ipplugins.php')

                    p1 = client.parseDOM(u, 'a', ret='data-film')[0]
                    p2 = client.parseDOM(u, 'a', ret='data-server')[0]
                    p3 = client.parseDOM(u, 'a', ret='data-name')[0]
                    post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3}
                    post = urllib.urlencode(post)

                    if not host in ['google', 'putlocker', 'megashare']: raise Exception()

                    for i in range(3):
                        result = client.request(url, post=post, headers=headers, timeout='10')
                        if not result == None: break

                    result = json.loads(result)['s']

                    url = urlparse.urljoin(self.base_link, '/ip.temp/swf/ipplayer/ipplayer.php')

                    post = {'u': result, 'w': '100%', 'h': '420'}
                    post = urllib.urlencode(post)

                    for i in range(3):
                        result = client.request(url, post=post, headers=headers)
                        if not result == None: break

                    url = json.loads(result)['data']

                    if type(url) is list:
                        url = [i['files'] for i in url]
                        for i in url:
                            try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Tunemovie', 'url': i, 'direct': True, 'debridonly': False})
                            except: pass

                    else:
                        url = client.request(url)
                        url = client.parseDOM(url, 'source', ret='src', attrs = {'type': 'video.+?'})[0]
                        url += '|%s' % urllib.urlencode({'User-agent': client.randomagent()})
                        sources.append({'source': 'cdn', 'quality': 'HD', 'provider': 'Tunemovie', 'url': url, 'direct': False, 'debridonly': False})

                except:
                    pass

            return sources
        except:
            return sources
Example #56
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None

            ref = url

            for i in range(3):
                result = client.request(url)
                if not result == None: break

            if not episode == None:
                result = client.parseDOM(result, 'div', attrs = {'id': 'ip_episode'})[0]
                ep_url = client.parseDOM(result, 'a', attrs = {'data-name': str(episode)}, ret='href')[0]
                for i in range(3):
                    result = client.request(ep_url)
                    if not result == None: break

            r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*server_line[^"]*'})

            for u in r:
                try:
                    url = urlparse.urljoin(self.base_link, '/ip.file/swf/plugins/ipplugins.php')
                    p1 = client.parseDOM(u, 'a', ret='data-film')[0]
                    p2 = client.parseDOM(u, 'a', ret='data-server')[0]
                    p3 = client.parseDOM(u, 'a', ret='data-name')[0]
                    post = {'ipplugins': 1, 'ip_film': p1, 'ip_server': p2, 'ip_name': p3}
                    post = urllib.urlencode(post)
                    for i in range(3):
                        result = client.request(url, post=post, XHR=True, referer=ref, timeout='10')
                        if not result == None: break

                    result = json.loads(result)
                    u = result['s']
                    s = result['v']

                    url = urlparse.urljoin(self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')

                    for n in range(3):
                        try:
                            post = {'u': u, 'w': '100%', 'h': '420', 's': s, 'n': n}
                            post = urllib.urlencode(post)
                            result = client.request(url, post=post, XHR=True, referer=ref)
                            src = json.loads(result)['data']

                            if type(src) is list:
                                src = [i['files'] for i in src]
                                for i in src:
                                    try:
                                        sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                                    except:
                                        pass
                            else:
                                src = client.request(src)
                                src = client.parseDOM(src, 'source', ret='src', attrs = {'type': 'video.+?'})[0]
                                src += '|%s' % urllib.urlencode({'User-agent': client.randomagent()})
                                sources.append({'source': 'cdn', 'quality': 'HD', 'language': 'en', 'url': src, 'direct': False, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Example #57
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            # if (self.user != '' and self.password != ''): #raise Exception()

                # login = urlparse.urljoin(self.base_link, '/login.html')

                # post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

                # cookie = client.request(login, post=post, output='cookie', close=False)

                # r = client.request(login, post=post, cookie=cookie, output='extended')

                # headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            # else:
                # headers = {}


            headers = {'User-Agent': client.randomagent()}
            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                year = data['year']
                def searchname(r):
                    r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                    r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                    r = [] if r == [] else [i[0] for i in r][0]
                    return r
                
                if 'tvshowtitle' in data:
                    link = urlparse.urljoin(self.base_link, 'tvshow-%s.html' %title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                            r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                else:
                    link = urlparse.urljoin(self.base_link, 'movies-%s.html' %title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            log_utils.log('shit Returned: %s' % str('in loop'), log_utils.LOGNOTICE)
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                            r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                        
                    

                # leaving old search in for if streamlord renables searching on the site
                # query = urlparse.urljoin(self.base_link, self.search_link)

                # post = urllib.urlencode({'searchapi2': title})

                # r = client.request(query, post=post, headers=headers)

                # if 'tvshowtitle' in data:
                    # r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    # r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                # else:
                    # r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    # r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                # r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                # r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                # r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [i for i in r if '-s%02de%02d-' % (int(data['season']), int(data['episode'])) in i.lower()][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)



            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall('\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)', u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url =  r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',r,re.DOTALL)[0][1]

            sources.append({'source': 'cdn', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False, 'autoplay': True})

            return sources
        except:
            return sources