Exemple #1
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        sources = []
        #control.log("rainierland-sources-0 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ ")

        try:
            r = urlparse.urljoin(self.base_link, url)

            result = client2.http_get(r)
            #control.log("rainierland-sources-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % result)
            r = client.parseDOM(
                result,
                'div',
                attrs={'class': 'screen fluid-width-video-wrapper'})[0]
            #control.log("rainierland-sources-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
            r = re.compile('src="(.*?)"').findall(r)
            #control.log("rainierland-sources-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
            if len(r) > 0:
                t = urlparse.urljoin(self.base_link, r[0])
                r2 = client2.http_get(t)
                #control.log("rainierland-sources-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r2)
                r3 = re.compile('<source src="(.*?)"').findall(r2)
                for i in r3:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            client.googletag(i)[0]['quality'],
                            'provider':
                            'Rainierland',
                            'url':
                            i
                        })
                    except:
                        pass
                #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r3)
                r4 = client.parseDOM(r2, 'a', ret='href')
                #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r4)
                for i in r4:
                    try:
                        url = resolvers.request(i)
                        sources.append({
                            'source': 'openload',
                            'quality': 'HD',
                            'provider': 'Rainierland',
                            'url': url
                        })
                    except:
                        pass

            return sources

        except:
            return sources
Exemple #2
0
 def resolve(self, url):
     try:
         #url = client.request(url, output='geturl')
         if 'sezonlukdizi.com' in url: url = client2.http_get(url,allow_redirect=False)
         control.log('############ SEZONLUKIDZ res-0 %s' % url)
         url = client2.http_get(url,allow_redirect=False)
         control.log('############ SEZONLUKIDZ res-1 %s' % url)
         if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
         #else: url = url.replace('https://', 'http://')
         return url
     except:
         return
Exemple #3
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            path = urlparse.urlparse(url).path

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+','', result)
            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
            result = [i[0] for i in result if len(i[0]) > 0 and path in i[0][0] and len(i[1]) > 0 and 'Altyaz' in i[1][0]][0][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+','', result)
            result = client.parseDOM(result, 'div', attrs = {'class': 'video-player'})[0]
            result = client.parseDOM(result, 'iframe', ret='src')[-1]
            control.log('RRRR %s' % result)

            try:
                url = base64.b64decode(urlparse.parse_qs(urlparse.urlparse(result).query)['id'][0])
                if not url.startswith('http'): raise Exception()
            except:
                url = client2.http_get(result)
                url = urllib.unquote_plus(url.decode('string-escape'))

                frame = client.parseDOM(url, 'iframe', ret='src')
                control.log('RRRR frame %s' % frame)

                if len(frame) > 0:
                    url = [client2.http_get(frame[-1], allow_redirect = False)]
                else: url = re.compile('"(.+?)"').findall(url)
                url = [i for i in url if 'ok.ru' in i or 'vk.com' in i or 'openload.co' in i][0]

            try: url = 'http://ok.ru/video/%s' % urlparse.parse_qs(urlparse.urlparse(url).query)['mid'][0]
            except: pass

            if 'openload.co' in url: host = 'openload.co' ; direct = False ; url = [{'url': resolvers.request(url), 'quality': 'HD'}]
            elif 'ok.ru' in url: host = 'vk' ; direct = True ;url = [{'url': resolvers.request(url), 'quality': 'HD'}]
            elif 'vk.com' in url: host = 'vk' ; direct = True ; url = [{'url': resolvers.request(url), 'quality': 'HD'}]
            else: raise Exception()

            for i in url: sources.append({'source': host, 'quality': i['quality'], 'provider': 'Onlinedizi', 'url': i['url'], })


            return sources
        except:
            return sources
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            url = urlparse.urljoin(self.base_link, url) + '/watching.html'
            result = client2.http_get(url)

            movie = client.parseDOM(result, 'div', ret='movie-id', attrs = {'id': 'media-player'})[0]
            mtoken =  client.parseDOM(result, 'div', ret='player-token', attrs = {'id': 'media-player'})[0]
            try:
                quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
            except: quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'
            url = '/ajax/get_episodes/%s/%s' % (movie, mtoken)
            url = urlparse.urljoin(self.base_link, url)
            result = client2.http_get(url)
            result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
            result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a', ret='episode-id'), client.parseDOM(result, 'a'))
            result = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9a-fA-F]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in result]
            result = [(i[0], i[1], i[2], i[3]) for i in result]
            if content == 'episode': result = [i for i in result if i[3] == '%01d' % int(episode)]

            links = [('/ajax/load_episode/%s/%s' % (i[2], i[1]), 'gvideo') for i in result if 2 <= int(i[0]) <= 11]

            for i in links:
                url1 = urlparse.urljoin(self.base_link, i[0])
                sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})

            links = []
            links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'openload') for i in result if i[0] == '14']
            links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'videomega') for i in result if i[0] == '13']
            #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12']

            for i in links:
                url1 = urlparse.urljoin(self.base_link, i[0])
                sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})
            return sources
        except:
            return sources
Exemple #5
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            url = urlparse.urljoin(self.base_link, url) + '/watching.html'
            result = client2.http_get(url)

            movie = client.parseDOM(result, 'div', ret='movie-id', attrs = {'id': 'media-player'})[0]
            mtoken =  client.parseDOM(result, 'div', ret='player-token', attrs = {'id': 'media-player'})[0]
            try:
                quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
            except: quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'
            url = '/ajax/get_episodes/%s/%s' % (movie, mtoken)
            url = urlparse.urljoin(self.base_link, url)
            result = client2.http_get(url)
            result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
            result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a', ret='episode-id'), client.parseDOM(result, 'a'))
            result = [(re.sub('[^0-9]', '', i[0].split(',')[0]), re.sub('[^0-9a-fA-F]', '', i[0].split(',')[-1]), i[1], ''.join(re.findall('(\d+)', i[2])[:1])) for i in result]
            result = [(i[0], i[1], i[2], i[3]) for i in result]
            if content == 'episode': result = [i for i in result if i[3] == '%01d' % int(episode)]

            links = [('/ajax/load_episode/%s/%s' % (i[2], i[1]), 'gvideo') for i in result if 2 <= int(i[0]) <= 11]

            for i in links:
                url1 = urlparse.urljoin(self.base_link, i[0])
                sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})

            links = []
            links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'openload') for i in result if i[0] == '14']
            links += [('/ajax/load_embed/%s/%s' % (i[2], i[1]), 'videomega') for i in result if i[0] == '13']
            #links += [('movie/loadEmbed/%s/%s' % (i[2], i[1]), 'videowood.tv') for i in result if i[0] == '12']

            for i in links:
                url1 = urlparse.urljoin(self.base_link, i[0])
                sources.append({'source': i[1], 'quality': quality, 'provider': 'Muchmovies', 'url': i[0]})
            return sources
        except:
            return sources
Exemple #6
0
 def resolve(self, url):
     try:
         #url = client.request(url, output='geturl')
         if 'sezonlukdizi.com' in url:
             url = client2.http_get(url, allow_redirect=False)
         control.log('############ SEZONLUKIDZ res-0 %s' % url)
         url = client2.http_get(url, allow_redirect=False)
         control.log('############ SEZONLUKIDZ res-1 %s' % url)
         if 'requiressl=yes' in url:
             url = url.replace('http://', 'https://')
         #else: url = url.replace('https://', 'http://')
         return url
     except:
         return
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        control.log('######### DIZILAB ## %s ' % url)
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            #result = client.source(url)
            result = client2.http_get(url)

            try:
                url = re.compile('"episode_player".*?src="([^"]+)"').findall(result)

                links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
                links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
                if not 'SD' in [i[1] for i in links]: links += [(i[0], 'SD') for i in url if 360 <= int(i[1]) < 480]

                for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dizilab', 'url': i[0]})
            except:
                pass

            try:
                url = client.parseDOM(result, 'iframe', ret='src')
                url = [i for i in url if 'openload.' in i][0]
                sources.append({'source': 'openload.co', 'quality': client.file_quality_openload(url)['quality'], 'provider': 'Dizilab', 'url': url})
            except:
                pass

            return sources

        except:
            return sources
Exemple #8
0
def daytse_movies(dbname):
    con = lite.connect(dbname)
    cur = con.cursor()
    #cur.execute("DROP TABLE IF EXISTS movies")
    #cur.executemany("INSERT INTO Cars VALUES(?, ?, ?)", cars)
    #cur.execute("CREATE TABLE movies (title TEXT, link TEXT, quality TEXT, UNIQUE (link))")

    for j in range(1,2):
        print'------- %s ' % j
        src = 'http://dayt.se/movies/index.php?&page=%s' % j
        result = ''
        result = client2.http_get(src).decode('windows-1256').encode('utf8')
        result = client.parseDOM(result, 'table', attrs={'class': 'topic_table'})
        for i in result:
            print '-------------------------'
            #print i
            print client.parseDOM(i, 'img', attrs={'class': 'image'}, ret='alt')[0]
            print  client.parseDOM(i, 'a', attrs={'target': '_self'},  ret='href')[1]

        #result = [(client.parseDOM(i, 'img', attrs={'class': 'image'}, ret='alt')[0],
        #       client.parseDOM(i, 'a', attrs={'target': '_self'},  ret='href')[0],
        #       re.findall('Quality: (\d+).*</pre>', i)[0]) for i in result]
        #cur.executemany("INSERT INTO movies VALUES(?, ?, ?)", result)
    con.commit()
    con.close()
Exemple #9
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client2.http_get(url)
            links = client.parseDOM(result, 'div', attrs={'class':'movieplay'})
            links = [client.parseDOM(i, 'iframe', ret='src')[0] for i in links]

            for i in links:
                try:
                    host = urlparse.urlparse(i).netloc
                    host = host.replace('www.', '').replace('embed.', '')
                    host = host.rsplit('.', 1)[0]
                    host = host.lower()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({'source': host, 'quality': 'SD', 'provider': 'CdaOnline', 'url': i, 'vtype':'BD'})
                except:
                    pass

            return sources
        except:
            return sources
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            self.sources =[]
            mylinks = []
            result = client2.http_get(url)
            mytitle = re.compile('<title>(.*?)</title>', re.DOTALL).findall(result)[0]
            if any(word in mytitle.lower() for word in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts']):
                quality = 'CAM'
            elif '1080p' in mytitle:
                quality = '1080p'
            elif '720p' in mytitle:
                quality = 'HD'
            else:
                quality = 'SD'
            links = client.parseDOM(result, 'a', attrs={'rel': 'nofollow'})
            links = [i for i in links if i.startswith('http')]
            for a in links:
                mylinks.append([a,quality])

            threads = []
            for i in mylinks: threads.append(workers.Thread(self.check, i))
            [i.start() for i in threads]
            for i in range(0, 10 * 2):
                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(1)
            return self.sources
        except:
            return self.sources
Exemple #11
0
 def request(self, url, check):
     try:
         result = client2.http_get(url)
         if check in str(result):
             return result.decode('iso-8859-1').encode('utf-8')
     except:
         return
Exemple #12
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            if url == None: return sources
            myurl = urlparse.urljoin(self.base_link, '/forum/' + url)
            result = client2.http_get(myurl)
            result10 = result
            result10 = client.parseDOM(result10, 'div', attrs={'id': '5throw'})[0]
            result10 = client.parseDOM(result10, 'a', attrs={'rel': 'nofollow'}, ret='href')
            mquality = 'HD'
            if '1080'in url: mquality = '1080p'
            for i in result10:
                if 'mail.ru' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'MAIL.RU', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
                if 'yadi.sk' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'YADISK', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})

            result = client.parseDOM(result, 'iframe', ret='src')
            result = [i for i in result if 'pasep' in i][0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            links = resolvers.request(result)
            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dayt', 'url': i[0]})
            return sources
        except:
            return sources
Exemple #13
0
def daytse_movies(dbname):
    con = lite.connect(dbname)
    cur = con.cursor()
    #cur.execute("DROP TABLE IF EXISTS movies")
    #cur.executemany("INSERT INTO Cars VALUES(?, ?, ?)", cars)
    #cur.execute("CREATE TABLE movies (title TEXT, link TEXT, quality TEXT, UNIQUE (link))")

    for j in range(1, 2):
        print '------- %s ' % j
        src = 'http://dayt.se/movies/index.php?&page=%s' % j
        result = ''
        result = client2.http_get(src).decode('windows-1256').encode('utf8')
        result = client.parseDOM(result,
                                 'table',
                                 attrs={'class': 'topic_table'})
        for i in result:
            print '-------------------------'
            #print i
            print client.parseDOM(i,
                                  'img',
                                  attrs={'class': 'image'},
                                  ret='alt')[0]
            print client.parseDOM(i,
                                  'a',
                                  attrs={'target': '_self'},
                                  ret='href')[1]

        #result = [(client.parseDOM(i, 'img', attrs={'class': 'image'}, ret='alt')[0],
        #       client.parseDOM(i, 'a', attrs={'target': '_self'},  ret='href')[0],
        #       re.findall('Quality: (\d+).*</pre>', i)[0]) for i in result]
        #cur.executemany("INSERT INTO movies VALUES(?, ?, ?)", result)
    con.commit()
    con.close()
 def get_movie(self, imdb, title, year):
     try:
         query = self.search_link % urllib.quote_plus(title + ' ' + year)
         query = urlparse.urljoin(self.base_link, query)
         result = client2.http_get(query)
         years = [
             '%s' % str(year),
             '%s' % str(int(year) + 1),
             '%s' % str(int(year) - 1)
         ]
         result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
         result = [(client.parseDOM(i, 'a',
                                    ret='href')[0], client.parseDOM(i,
                                                                    'a')[0])
                   for i in result]
         print('R', result)
         result = [
             i for i in result if cleantitle.movie(title.lower()) in
             cleantitle.movie(i[1]).lower()
         ]
         print('R', result)
         result = [i for i in result if any(x in i[1] for x in years)]
         print('R', result)
         result2 = [i for i in result if '1080' in i[1]]
         print('R', result)
         result3 = [i for i in result if '720' in i[1].lower()]
         print('R', result)
         if len(result3) > 0: result = result3
         if len(result2) > 0: result = result2
         url = result[0][0]
         return url
     except:
         return
Exemple #15
0
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]

            query = self.search_link % urllib.quote(tvshowtitle)
            query = urlparse.urljoin(self.base_link, query)

            #result = client.source(query)
            result = client2.http_get(query)


            tvshowtitle = cleantitle.tv(tvshowtitle)
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            result = [i for i in result if season == i[2]]
            result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            result += '?S%02dE%02d' % (int(season), int(episode))

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
    def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]

            query = self.search_link % urllib.quote(tvshowtitle)
            query = urlparse.urljoin(self.base_link, query)

            #result = client.source(query)
            result = client2.http_get(query)


            tvshowtitle = cleantitle.tv(tvshowtitle)
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]

            result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
            result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            result = [i for i in result if season == i[2]]
            result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
            result = [i[0] for i in result if any(x in i[2] for x in years)][0]

            result += '?S%02dE%02d' % (int(season), int(episode))

            try: url = re.compile('//.+?(/.+)').findall(result)[0]
            except: url = result
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Exemple #17
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote(title)
            query = urlparse.urljoin(self.base_link, query)
            result = client2.http_get(query)
            title = cleantitle.movie(title)
            years = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1)
            ]
            r = client.parseDOM(result, 'div', attrs={'class': 'ml-item'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][-1]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(re.sub('http.+?//.+?/', '', i[0]), i[1]) for i in r]
            r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
            r = [x for y, x in enumerate(r) if x not in r[:y]]
            r = [i for i in r if title == cleantitle.movie(i[1])]
            u = [i[0] for i in r][0]

            url = urlparse.urljoin(self.base_link, u)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            control.log("@@@@@@@@@@@@@@@ URL  %s" % url)

            return url
        except:
            return
Exemple #18
0
    def get_movie(self, imdb, title, year):
        try:
            query = self.search_link % urllib.quote(title)
            query = urlparse.urljoin(self.base_link, query)
            #control.log("rainierland-0 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % query)
            result = client2.http_get(query)
            title = cleantitle.movie(title)
            years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
            r = client.parseDOM(result, 'div', attrs = {'class': 'thumb'})
            #control.log("rainierland-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
            #control.log("rainierland-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [(i[0][0], i[1][-1]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            #control.log("rainierland-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [(re.sub('http.+?//.+?/','', i[0]), i[1]) for i in r]
            #control.log("rainierland-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)

            r = [('/'.join(i[0].split('/')[:2]), i[1]) for i in r]
            r = [x for y,x in enumerate(r) if x not in r[:y]]
            r = [i for i in r if title == cleantitle.movie(i[1])]
            u = [i[0] for i in r][0]

            url = urlparse.urljoin(self.base_link, u)
            url = urlparse.urlparse(url).path
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            #control.log("rainierland url @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % url)

            return url
        except:
            return
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            self.sources = []
            mylinks = []
            result = client2.http_get(url)
            mytitle = re.compile('<title>(.*?)</title>',
                                 re.DOTALL).findall(result)[0]
            if any(word in mytitle.lower() for word in [
                    'camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts',
                    'cam', 'ts'
            ]):
                quality = 'CAM'
            elif '1080p' in mytitle:
                quality = '1080p'
            elif '720p' in mytitle:
                quality = 'HD'
            else:
                quality = 'SD'
            links = client.parseDOM(result, 'a', attrs={'rel': 'nofollow'})
            links = [i for i in links if i.startswith('http')]
            for a in links:
                mylinks.append([a, quality])

            threads = []
            for i in mylinks:
                threads.append(workers.Thread(self.check, i))
            [i.start() for i in threads]
            for i in range(0, 10 * 2):
                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(1)
            return self.sources
        except:
            return self.sources
    def pelispedia_tvcache(self):
        result = []

        for i in range(0, 10):
            try:
                u = self.search2_link % str(i * 48)
                u = urlparse.urljoin(self.base_link, u)

                r = str(client2.http_get(u))
                r = re.sub(r'[^\x00-\x7F]+', '', r)
                r = r.split('<li class=')
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i))
                     for i in r]
                r = [(i[0][0], re.sub('\(|\)', '', i[1][0]), i[2][0])
                     for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]

                if len(r) == 0: break
                result += r
            except:
                pass

        if len(result) == 0: return
        result = [(re.sub('http.+?//.+?/', '/',
                          i[0]), cleantitle.get(i[1]), i[2]) for i in result]
        return result
Exemple #21
0
    def get_movie(self, imdb, title, year):
        mytitle = title
        try:
            t = 'http://www.imdb.com/title/%s' % imdb
            t = client.source(t, headers={'Accept-Language': 'es-ES'})
            t = client.parseDOM(t, 'title')[0]
            t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
            mytitle = t

        except:
            pass
        try:
            t = cleantitle.get(mytitle)

            query = self.search3_link % urllib.quote_plus(cleantitle.query2(mytitle))
            query = urlparse.urljoin(self.base_link, query)
            result = client2.http_get(query)

            result = re.sub(r'[^\x00-\x7F]+','', result)
            r = result.split('<li class=')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r]
            r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            r = [i[0] for i in r if year == i[2]][0]
            try: url = re.findall('//.+?(/.+)', r)[0]
            except: url = r
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass
Exemple #22
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            if url == None: return sources
            myurl = urlparse.urljoin(self.base_link, '/forum/' + url)
            result = client2.http_get(myurl)
            result10 = result
            result10 = client.parseDOM(result10, 'div', attrs={'id': '5throw'})[0]
            result10 = client.parseDOM(result10, 'a', attrs={'rel': 'nofollow'}, ret='href')
            mquality = 'HD'
            if '1080'in url: mquality = '1080p'
            for i in result10:
                if 'mail.ru' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'MAIL.RU', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
                if 'yadi.sk' in i:
                    myresolve = resolvers.request(i)
                    sources.append({'source': 'YADISK', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})

            result = client.parseDOM(result, 'iframe', ret='src')
            result = [i for i in result if 'pasep' in i][0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            result = client.source(result)
            result = client.parseDOM(result, 'iframe', ret='src')[0]
            links = resolvers.request(result)
            for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dayt', 'url': i[0]})
            return sources
        except:
            return sources
Exemple #23
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = []
            try:
                r = client.parseDOM(result, 'div', attrs = {'id': 'embed'})[0]
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass
            try:
                r = client.parseDOM(result, 'div', attrs = {'id': 'playerMenu'})[0]
                r = client.parseDOM(r, 'div', ret='data-id', attrs = {'class': 'item'})[0]
                r = cloudflare.source(urlparse.urljoin(self.base_link, self.video_link), post=urllib.urlencode( {'id': r} ))
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass

            for page in pages:
                try:
                    result = client2.http_get(page)

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: raise Exception()

                    result = re.compile('"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"').findall(result)

                    links = [(i[0], '1080p') for i in result if int(i[1]) >= 1080]
                    links += [(i[0], 'HD') for i in result if 720 <= int(i[1]) < 1080]
                    links += [(i[0], 'SD') for i in result if 480 <= int(i[1]) < 720]

                    for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': i[0]})
                except:
                    pass

            return sources
        except:
            return sources
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        control.log('######### DIZILAB ## %s ' % url)
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            #result = client.source(url)
            result = client2.http_get(url)

            try:
                url = re.compile('"episode_player".*?src="([^"]+)"').findall(
                    result)

                links = [(i[0], '1080p') for i in url if int(i[1]) >= 1080]
                links += [(i[0], 'HD') for i in url if 720 <= int(i[1]) < 1080]
                links += [(i[0], 'SD') for i in url if 480 <= int(i[1]) < 720]
                if not 'SD' in [i[1] for i in links]:
                    links += [(i[0], 'SD') for i in url
                              if 360 <= int(i[1]) < 480]

                for i in links:
                    sources.append({
                        'source': 'gvideo',
                        'quality': i[1],
                        'provider': 'Dizilab',
                        'url': i[0]
                    })
            except:
                pass

            try:
                url = client.parseDOM(result, 'iframe', ret='src')
                url = [i for i in url if 'openload.' in i][0]
                sources.append({
                    'source':
                    'openload.co',
                    'quality':
                    client.file_quality_openload(url)['quality'],
                    'provider':
                    'Dizilab',
                    'url':
                    url
                })
            except:
                pass

            return sources

        except:
            return sources
Exemple #25
0
    def onlinedizi_tvcache(self):
        try:
            result = client2.http_get(self.base_link)
            result = client.parseDOM(result, 'ul', attrs = {'class': 'all-series-list.+?'})[0]
            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in result]
            result = [(i[0][-1], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(re.compile('http.+?//.+?/diziler(/.+?/)').findall(i[0]), re.sub('&#\d*;','', i[1])) for i in result]
            result = [(i[0][0], cleantitle.get(i[1])) for i in result if len(i[0]) > 0]

            return result
        except:
            return
Exemple #26
0
def check(url):
    try:
        ifstream = re.search('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)',(url)[0])
        if ifstream: return True
        id = re.compile('//.+?/(?:embed|f)/([0-9a-zA-Z-_]+)').findall(url)[0]
        url = 'https://openload.co/embed/%s/' % id

        result = client2.http_get(url)
        if result == None: return False
        if '>We are sorry!<' in result: return False
        return True
    except:
        return False
Exemple #27
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        sources = []
        #control.log("rainierland-sources-0 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ ")

        try:
            r = urlparse.urljoin(self.base_link, url)

            result = client2.http_get(r)
            #control.log("rainierland-sources-1 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % result)
            r = client.parseDOM(result, 'div', attrs = {'class': 'screen fluid-width-video-wrapper'})[0]
            #control.log("rainierland-sources-2 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
            r = re.compile('src="(.*?)"').findall(r)
            #control.log("rainierland-sources-3 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r)
            if len(r) > 0:
                t = urlparse.urljoin(self.base_link, r[0])
                r2 = client2.http_get(t)
                #control.log("rainierland-sources-4 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r2)
                r3 = re.compile('<source src="(.*?)"').findall(r2)
                for i in r3:
                    try:
                        sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Rainierland', 'url': i})
                    except:
                        pass
                #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r3)
                r4 =  client.parseDOM(r2, 'a', ret='href')
                #control.log("rainierland-sources-5 @@@@@@@@@@@@@@@@@@@@@@@@@@@@ %s" % r4)
                for i in r4:
                    try:
                        url = resolvers.request(i)
                        sources.append({'source': 'openload', 'quality': 'HD', 'provider': 'Rainierland', 'url': url})
                    except:
                        pass

            return sources

        except:
            return sources
Exemple #28
0
    def sezonlukdizi_tvcache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            result =  client2.http_get(url)
            result = re.compile('{(.+?)}').findall(result)

            result = [(re.findall('u\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i), re.findall('d\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i)) for i in result]
            result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(re.compile('/diziler(/.+?)(?://|\.|$)').findall(i[0]), re.sub('&#\d*;','', i[1])) for i in result]
            result = [(i[0][0] + '/', cleantitle.get(i[1])) for i in result if len(i[0]) > 0]

            return result
        except:
            return
Exemple #29
0
    def fmovies_cache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            control.log('>>>>>>>>>>>>---------- CACHE %s' % url)

            #result = client.source(url)
            result = client2.http_get(url)
            result = result.split('>Movies and Series<')[-1]
            control.log('>>>>>>>>>>>>---------- CACHE-2 %s' % result)
            result = client.parseDOM(result, 'ul')[0]
            control.log('>>>>>>>>>>>>---------- CACHE-3 %s' % result)

            result = re.compile('href="(.+?)">(.+?)<').findall(result)

            result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result]
            control.log('>>>>>>>>>>>>---------- CACHE-4 ')

            return result
        except:
            return
    def onlinedizi_tvcache(self):
        try:
            result = client2.http_get(self.base_link)
            result = client.parseDOM(result,
                                     'ul',
                                     attrs={'class': 'all-series-list.+?'})[0]
            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a',
                                       ret='href'), client.parseDOM(i, 'a'))
                      for i in result]
            result = [(i[0][-1], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(re.compile('http.+?//.+?/diziler(/.+?/)').findall(i[0]),
                       re.sub('&#\d*;', '', i[1])) for i in result]
            result = [(i[0][0], cleantitle.get(i[1])) for i in result
                      if len(i[0]) > 0]

            return result
        except:
            return
Exemple #31
0
    def sezonlukdizi_tvcache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            result = client2.http_get(url)
            result = re.compile('{(.+?)}').findall(result)

            result = [(re.findall('u\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i),
                       re.findall('d\s*:\s*(?:\'|\")(.+?)(?:\'|\")', i))
                      for i in result]
            result = [(i[0][0], i[1][0]) for i in result
                      if len(i[0]) > 0 and len(i[1]) > 0]
            result = [(re.compile('/diziler(/.+?)(?://|\.|$)').findall(i[0]),
                       re.sub('&#\d*;', '', i[1])) for i in result]
            result = [(i[0][0] + '/', cleantitle.get(i[1])) for i in result
                      if len(i[0]) > 0]

            return result
        except:
            return
 def get_episode(self, url, imdb, tvdb, title, date, season, episode):
     try:
         if url == None: return
         mytitile = url.lower()
         url = '%s S%02dE%02d' % (url, int(season), int(episode))
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         url = self.search_link % urllib.quote_plus(url)
         query = urlparse.urljoin(self.base_link, url)
         result = client2.http_get(query)
         result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
         result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
         result = [i for i in result if mytitile in i[1].lower()]
         result2 = [i for i in result if '1080' in i[1].lower()]
         result3 = [i for i in result if '720' in i[1].lower()]
         if len(result3) > 0: result = result3
         if len(result2) > 0: result = result2
         url=result[0][0]
         return url
     except:
         return
Exemple #33
0
    def pelispedia_tvcache(self):
        result = []

        for i in range(0,10):
            try:
                u = self.search2_link % str(i * 48)
                u = urlparse.urljoin(self.base_link, u)

                r = str(client2.http_get(u))
                r = re.sub(r'[^\x00-\x7F]+','', r)
                r = r.split('<li class=')
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in r]
                r = [(i[0][0], re.sub('\(|\)','', i[1][0]), i[2][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]

                if len(r) == 0: break
                result += r
            except:
                pass

        if len(result) == 0: return
        result = [(re.sub('http.+?//.+?/','/', i[0]), cleantitle.tv(i[1]), i[2]) for i in result]
        return result
Exemple #34
0
    def fmovies_cache(self):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            control.log('>>>>>>>>>>>>---------- CACHE %s' % url)

            #result = client.source(url)
            result = client2.http_get(url)
            result = result.split('>Movies and Series<')[-1]
            control.log('>>>>>>>>>>>>---------- CACHE-2 %s' % result)
            result = client.parseDOM(result, 'ul')[0]
            control.log('>>>>>>>>>>>>---------- CACHE-3 %s' % result)

            result = re.compile('href="(.+?)">(.+?)<').findall(result)

            result = [(re.sub('http.+?//.+?/', '/',
                              i[0]), re.sub('&#\d*;', '', i[1]))
                      for i in result]
            control.log('>>>>>>>>>>>>---------- CACHE-4 ')

            return result
        except:
            return
 def get_movie(self, imdb, title, year):
     try:
         query = self.search_link % urllib.quote_plus(title +' '+year)
         query = urlparse.urljoin(self.base_link, query)
         result = client2.http_get(query)
         years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]
         result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
         result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
         print('R',result)
         result = [i for i in result if cleantitle.movie(title.lower()) in cleantitle.movie(i[1]).lower()]
         print('R',result)
         result = [i for i in result if any(x in i[1] for x in years)]
         print('R',result)
         result2 = [i for i in result if '1080' in i[1]]
         print('R',result)
         result3 = [i for i in result if '720' in i[1].lower()]
         print('R',result)
         if len(result3) > 0: result = result3
         if len(result2) > 0: result = result2
         url = result[0][0]
         return url
     except:
         return
Exemple #36
0
 def get_movie(self, imdb, title, year):
     try:
         query = self.search_link % (urllib.unquote(title))
         query = urlparse.urljoin(self.base_link, query)
         control.log('cda-online URL %s' % query)
         result = client2.http_get(query)
         result = client.parseDOM(result, 'div', attrs={'class':'item'})
         #print('cda-online',result)
         result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2')[0], client.parseDOM(i, 'span', attrs={'class':'year'})[0]) for i in result]
         #print('cda-online2',result)
         result = [i for i in result if cleantitle.movie(title) in cleantitle.movie(i[1])]
         #print('cda-online3',result)
         years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
         result = [i[0] for i in result if any(x in i[2] for x in years)][0]
         #print('cda-online4',result)
         try: url = re.compile('//.+?(/.+)').findall(result)[0]
         except: url = result
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         control.log('ALLTUBE URL %s' % url)
         return url
     except:
         return
 def get_episode(self, url, imdb, tvdb, title, date, season, episode):
     try:
         if url == None: return
         mytitile = url.lower()
         url = '%s S%02dE%02d' % (url, int(season), int(episode))
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         url = self.search_link % urllib.quote_plus(url)
         query = urlparse.urljoin(self.base_link, url)
         result = client2.http_get(query)
         result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
         result = [(client.parseDOM(i, 'a',
                                    ret='href')[0], client.parseDOM(i,
                                                                    'a')[0])
                   for i in result]
         result = [i for i in result if mytitile in i[1].lower()]
         result2 = [i for i in result if '1080' in i[1].lower()]
         result3 = [i for i in result if '720' in i[1].lower()]
         if len(result3) > 0: result = result3
         if len(result2) > 0: result = result2
         url = result[0][0]
         return url
     except:
         return
Exemple #38
0
 def get_movie(self, imdb, title, year,originaltitle):
     print("cda online originaltitle:%s" % originaltitle)
     try:
         query = self.search_link % (urllib.unquote(title))
         query = urlparse.urljoin(self.base_link, query)
         control.log('cda-online URL %s' % query)
         result = client2.http_get(query)
         result = client.parseDOM(result, 'div', attrs={'class':'item'})
         #print('cda-online',result)
         result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'h2')[0], client.parseDOM(i, 'span', attrs={'class':'year'})[0]) for i in result]
         #print('cda-online2',result)
         result = [i for i in result if cleantitle.movie(title) in cleantitle.movie(i[1])]
         #print('cda-online3',result)
         years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
         result = [i[0] for i in result if any(x in i[2] for x in years)][0]
         #print('cda-online4',result)
         try: url = re.compile('//.+?(/.+)').findall(result)[0]
         except: url = result
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         control.log('ALLTUBE URL %s' % url)
         return url
     except:
         return
Exemple #39
0
    def get_movie(self, imdb, title, year):
        print("R", title)

        try:
            t = cleantitle.get(title)

            query = '%s %s' % (title, year)
            query = base64.b64decode(self.search_link) % urllib.quote_plus(query)
            print("R", query)

            result = client2.http_get(query, headers={'Referer': self.base_link})
            result = json.loads(result)['results']
            print("R",result)

            result = [(i['url'], i['titleNoFormatting']) for i in result]
            result = [(i[0], re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1])) for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result if len(i[1]) > 0]

            r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]

            if len(r) == 0:
                t = 'http://www.imdb.com/title/%s' % imdb
                t = client.source(t, headers={'Accept-Language': 'es-ES'})
                t = client.parseDOM(t, 'title')[0]
                t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
                t = cleantitle.get(t)

                r = [i for i in result if t == cleantitle.get(i[1]) and year == i[2]]

            try:
                url = re.findall('//.+?(/.+)', r[0][0])[0]
            except:
                url = r[0][0]
            try:
                url = re.findall('(/.+?/.+?/)', url)[0]
            except:
                pass
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass

        try:
            t = cleantitle.get(title)

            query = self.search3_link % urllib.quote_plus(cleantitle.query(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client2.http_get(query)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            print("R",result)


            r = result.split('<li class=')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'), re.findall('\((\d{4})\)', i)) for i in
                 r]
            r = [(i[0][0], re.sub('\(|\)', '', i[1][0]), i[2][0]) for i in r if
                 len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            try:
                url = re.findall('//.+?(/.+)', r)[0]
            except:
                url = r
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass
Exemple #40
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            control.log('#Dayt url %s' % url)

            if url == None: return sources
            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)
            control.log('#Dayt content %s' % content)

            if len(content) == 0:
                control.log('#Dayt ttttttt')
                title, year = re.compile('(.+?) \((\d{4})\)').findall(url)[0]
                mytitle = cleantitle.movie(title)
                control.log('#Dayt title, year: %s,%s' % (title, year))

                data = os.path.join(control.dataPath, 'daytse1.db')
                download = True
                try:
                    download = abs(
                        datetime.datetime.fromtimestamp(os.path.getmtime(
                            data)) -
                        (datetime.datetime.now())) > datetime.timedelta(days=2)
                except:
                    pass
                if download == True:
                    control.log('#Dayt DDDOOOWNLOAD ')
                    result = client.request(base64.b64decode(self.data_link))
                    with open(data, "wb") as code:
                        code.write(result)

                dbcon = database.connect(data)
                dbcur = dbcon.cursor()
                dbcur.execute("SELECT * FROM movies WHERE title like '%" +
                              title.lower() + "%'")
                result = dbcur.fetchone()
                control.log('#Dayt Result Final %s' % result[0])

                #result = [i for i in result if mytitle in cleantitle.movie(i[0])]
                #result = [i[0] for i in result if any(x in i[1] for x in years)][0]
                #result = re.compile('(.+?)(?:&amp)').findall(result)[0]
                #return result
                control.log('#Dayt Final %s' % result[1])

                myurl = urlparse.urljoin(
                    self.base_link, '/movies/' + urllib.quote_plus(result[1]))
                control.log('#Dayt Final myurl %s' % myurl)

                myhead = {'Referer': 'http://dayt.se/movies/'}

                #result = client2.http_get('http://dayt.se/', headers={})
                #result = client2.http_get(myurl, headers=myhead, allow_redirect=False)
                #control.log('#Dayt Final result  redir 1 %s' % result)
                result10 = client2.http_get(myurl, headers=myhead)
                #control.log('#Dayt Final result  redir 2 %s' % result)

                result10 = client.parseDOM(result10,
                                           'div',
                                           attrs={'id': '5throw'})[0]
                result10 = client.parseDOM(result10,
                                           'a',
                                           attrs={'rel': 'nofollow'},
                                           ret='href')
                mquality = 'HD'
                if '1080' in result[2]: mquality = '1080p'
                for i in result10:
                    if 'mail.ru' in i:
                        myresolve = resolvers.request(i)
                        sources.append({
                            'source': 'MAIL.RU',
                            'quality': mquality,
                            'provider': 'Dayt',
                            'url': myresolve
                        })
                    if 'yadi.sk' in i:
                        myresolve = resolvers.request(i)
                        sources.append({
                            'source': 'YADISK',
                            'quality': mquality,
                            'provider': 'Dayt',
                            'url': myresolve
                        })

                result = client.parseDOM(result, 'iframe', ret='src')
                result = [i for i in result if 'pasep' in i][0]
                result = client.request(result)
                result = client.parseDOM(result, 'iframe', ret='src')[0]
                result = client.request(result)
                result = client.parseDOM(result, 'iframe', ret='src')[0]
                links = resolvers.request(result)
                for i in links:
                    sources.append({
                        'source': 'gvideo',
                        'quality': i[1],
                        'provider': 'Dayt',
                        'url': i[0]
                    })
                return sources
        except:
            return sources
Exemple #41
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            u = urlparse.urljoin(self.base_link, url)

            r = client2.http_get(u)
            #control.log('R %s' % r)

            r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r)
            r = list(set(r))
            r = [i for i in r if i[1] == '0' or int(i[1]) >= 720]
            control.log('R %s' % r)

            links = []

            for p in r:
                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u}

                    player = urlparse.urljoin(self.base_link, '/ajax/movie/load_player')

                    post = urllib.urlencode({'id': p[0], 'quality': p[1]})

                    result = client2.http_get(player, data=post, headers=headers)
                    #control.log('result %s' % result)

                    frame = client.parseDOM(result, 'iframe', ret='src')
                    embed = client.parseDOM(result, 'embed', ret='flashvars')

                    if frame:
                        if 'player.php' in frame[0]:
                            frame = client.parseDOM(result, 'input', ret='value', attrs={'type': 'hidden'})[0]

                            headers = {'Referer': urlparse.urljoin(self.base_link, frame[0])}

                            url = client.request(frame, headers=headers, output='geturl')

                            links += [
                                {'source': 'gvideo', 'url': url, 'quality': client.googletag(url)[0]['quality'],
                                 'direct': True}]

                        elif 'openload.' in frame[0]:
                            links += [{'source': 'openload.co', 'url': frame[0], 'quality': 'HD', 'direct': False}]

                        elif 'videomega.' in frame[0]:
                            links += [{'source': 'videomega.tv', 'url': frame[0], 'quality': 'HD', 'direct': False}]

                    elif embed:
                        url = urlparse.parse_qs(embed[0])['fmt_stream_map'][0]

                        url = [i.split('|')[-1] for i in url.split(',')]

                        for i in url:
                            try: links.append({'source': 'gvideo', 'url': i, 'quality': client.googletag(i)[0]['quality'],'direct': True})
                            except: pass

                except:
                    pass

            for i in links:
                #sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})
                sources.append({'source':  i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url']})


            return sources
        except:
            return sources
Exemple #42
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = []
            try:
                r = client.parseDOM(result, 'div', attrs={'id': 'embed'})[0]
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass
            try:
                r = client.parseDOM(result, 'div', attrs={'id':
                                                          'playerMenu'})[0]
                r = client.parseDOM(r,
                                    'div',
                                    ret='data-id',
                                    attrs={'class': 'item'})[0]
                r = cloudflare.source(urlparse.urljoin(self.base_link,
                                                       self.video_link),
                                      post=urllib.urlencode({'id': r}))
                pages.append(client.parseDOM(r, 'iframe', ret='src')[0])
            except:
                pass

            for page in pages:
                try:
                    result = client2.http_get(page)

                    captions = re.search(
                        'kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: raise Exception()

                    result = re.compile(
                        '"?file"?\s*:\s*"([^"]+)"\s*,\s*"?label"?\s*:\s*"(\d+)p?[^"]*"'
                    ).findall(result)

                    links = [(i[0], '1080p') for i in result
                             if int(i[1]) >= 1080]
                    links += [(i[0], 'HD') for i in result
                              if 720 <= int(i[1]) < 1080]
                    links += [(i[0], 'SD') for i in result
                              if 480 <= int(i[1]) < 720]

                    for i in links:
                        sources.append({
                            'source': 'gvideo',
                            'quality': i[1],
                            'provider': 'Sezonlukdizi',
                            'url': i[0]
                        })
                except:
                    pass

            return sources
        except:
            return sources
Exemple #43
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            u = urlparse.urljoin(self.base_link, url)

            r = client2.http_get(u)
            #control.log('R %s' % r)

            r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r)
            r = list(set(r))
            r = [i for i in r if i[1] == '0' or int(i[1]) >= 720]
            control.log('R %s' % r)

            links = []

            for p in r:
                try:
                    headers = {
                        'X-Requested-With': 'XMLHttpRequest',
                        'Referer': u
                    }

                    player = urlparse.urljoin(self.base_link,
                                              '/ajax/movie/load_player')

                    post = urllib.urlencode({'id': p[0], 'quality': p[1]})

                    result = client2.http_get(player,
                                              data=post,
                                              headers=headers)
                    #control.log('result %s' % result)

                    frame = client.parseDOM(result, 'iframe', ret='src')
                    embed = client.parseDOM(result, 'embed', ret='flashvars')

                    if frame:
                        if 'player.php' in frame[0]:
                            frame = client.parseDOM(result,
                                                    'input',
                                                    ret='value',
                                                    attrs={'type':
                                                           'hidden'})[0]

                            headers = {
                                'Referer':
                                urlparse.urljoin(self.base_link, frame[0])
                            }

                            url = client.request(frame,
                                                 headers=headers,
                                                 output='geturl')

                            links += [{
                                'source':
                                'gvideo',
                                'url':
                                url,
                                'quality':
                                client.googletag(url)[0]['quality'],
                                'direct':
                                True
                            }]

                        elif 'openload.' in frame[0]:
                            links += [{
                                'source': 'openload.co',
                                'url': frame[0],
                                'quality': 'HD',
                                'direct': False
                            }]

                        elif 'videomega.' in frame[0]:
                            links += [{
                                'source': 'videomega.tv',
                                'url': frame[0],
                                'quality': 'HD',
                                'direct': False
                            }]

                    elif embed:
                        url = urlparse.parse_qs(embed[0])['fmt_stream_map'][0]

                        url = [i.split('|')[-1] for i in url.split(',')]

                        for i in url:
                            try:
                                links.append({
                                    'source':
                                    'gvideo',
                                    'url':
                                    i,
                                    'quality':
                                    client.googletag(i)[0]['quality'],
                                    'direct':
                                    True
                                })
                            except:
                                pass

                except:
                    pass

            for i in links:
                #sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Xmovies', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Xmovies',
                    'url': i['url']
                })

            return sources
        except:
            return sources
Exemple #44
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        return
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                    title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                    year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year']

                    try: episode = data['episode']
                    except: pass

                    query = {'keyword': title, 's':''}
                    #query.update(self.__get_token(query))
                    search_url = urlparse.urljoin(self.base_link, '/search')
                    search_url = search_url + '?' + urllib.urlencode(query)
                    print("R",search_url)
                    result = client2.http_get(search_url)
                    print("r", result)

                    r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0]
                    r = client.parseDOM(r, 'div', attrs = {'class': 'item'})
                    r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r]
                    r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and  len(i[1]) > 0]
                    r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r]

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1])) for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0]
                        url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])]
                        url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])]
                    else:
                        url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]



                    """
                    r = cache.get(self.fmovies_cache, 120)

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1]), i[2]) for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in url if len(i[1]) > 0]
                        url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])]
                        url = [i for i in url if i[3] == year] + [i for i in url if i[3] == data['year']]
                        url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])]
                    else:
                        url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and i[2] == year]

                    """
                    url = url[0][0]
                    url = urlparse.urljoin(self.base_link, url)
                    print("r2", url)

                except:
                    url == self.base_link


            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            referer = url
            #xtoken = self.__get_xtoken()

            result = client.source(url, safe=True)
            #xtoken = self.__get_xtoken()
            print("r22", result)

            alina = client.parseDOM(result, 'title')[0]
            print( re.findall('(\d{4})', alina))

            atr = [i for i in client.parseDOM(result, 'title') if len(re.findall('(\d{4})', i)) > 0][-1]
            if 'season' in data:
                result = result if year in atr or data['year'] in atr else None
            else:
                result = result if year in atr else None

            print("r3",result)

            try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
            except: quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd' or 'hd ' in quality: quality = 'HD'
            else: quality = 'SD'

            result = client.parseDOM(result, 'ul', attrs = {'data-range-id':"0"})
            print("r3",result,quality)

            servers = []
            #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'})
            servers = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a'))
            servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers]
            servers = [(i[0], ''.join(i[1][:1])) for i in servers]
            print("r3",servers)

            try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)]
            except: pass

            for s in servers[:3]:
                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    hash_url = urlparse.urljoin(self.base_link, self.hash_link)
                    query = {'id': s[0], 'update': '0'}
                    query.update(self.__get_token(query))
                    hash_url = hash_url + '?' + urllib.urlencode(query)
                    headers['Referer'] = url
                    result = client2.http_get(hash_url, headers=headers, cache_limit=.5)
                    print("r100",result)


                    query = {'id': s[0], 'update': '0'}
                    query.update(self.__get_token(query))
                    url = url + '?' + urllib.urlencode(query)
                    result = client.source(url, headers=headers, referer=referer, safe=True)
                    print("r100",result)
                    result = json.loads(result)

                    query = result['params']
                    query['mobile'] = '0'
                    query.update(self.__get_token(query))
                    grabber = result['grabber'] + '?' + urllib.urlencode(query)

                    result = client.source(grabber, headers=headers, referer=url, safe=True)
                    result = json.loads(result)

                    result = result['data']
                    result = [i['file'] for i in result if 'file' in i]

                    for i in result:
                        try: sources.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'provider': 'Fmovies', 'url': i})
                        except: pass
                except:
                    pass

            if quality == 'CAM':
                for i in sources: i['quality'] = 'CAM'

            return sources
        except:
            return sources
Exemple #45
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            #control.log('#PUTLOCKER1 %s' % url)

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                imdb = data['imdb']
                match = title.replace('-', '').replace(':', '').replace(
                    '\'', '').replace(' ', '-').replace('--', '-').lower()

                if 'tvshowtitle' in data:
                    url = '%s/show/%s/season/%01d/episode/%01d' % (
                        self.base_link, match, int(
                            data['season']), int(data['episode']))
                else:
                    url = '%s/movie/%s' % (self.base_link, match)
                control.log('#PUTLOCKER2 %s' % url)

                #result = client.source(url, output='title')
                result = client2.http_get(url)

                if '%TITLE%' in result: raise Exception()

                cookie_file = os.path.join(
                    control.cookieDir,
                    '%s_cookies.lwp' % client2.shrink_host(url))
                #cookie_file = os.path.join('/home/mrknow/.kodi/userdata/addon_data/plugin.video.fanfilm/Cookies','%s_cookies.lwp' % client2.shrink_host((url)))
                cj = cookielib.LWPCookieJar(cookie_file)
                try:
                    cj.load(ignore_discard=True)
                except:
                    pass
                auth = cj._cookies['www.putlocker.systems']['/'][
                    '__utmx'].value
                headers = {}

                if not imdb in result: raise Exception()

            else:
                result, headers, content, cookie = client.source(
                    url, output='extended')

            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url

            u = 'http://www.putlocker.systems/ajax/embeds.php'

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'elid': elid
            }
            r = client2.http_get(u, data=post, headers=headers)
            print r
            r = str(json.loads(r))
            r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(
                r, 'IFRAME', ret='.+?')

            links = []

            for i in r:
                try:
                    links += [{
                        'source': 'gvideo',
                        'quality': client.googletag(i)[0]['quality'],
                        'url': i
                    }]
                except:
                    pass

            links += [{
                'source': 'openload.co',
                'quality': 'SD',
                'url': i,
                'direct': False
            } for i in r if 'openload.co' in i]

            links += [{
                'source': 'videomega.tv',
                'quality': 'SD',
                'url': i,
                'direct': False
            } for i in r if 'videomega.tv' in i]

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Putlocker',
                    'url': i['url']
                })
            #control.log('#PUTLOCKER6 SOURCES %s' % sources)

            return sources
        except:
            return sources
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            path = urlparse.urlparse(url).path

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            result = client.parseDOM(result, 'li')
            result = [(client.parseDOM(i, 'a',
                                       ret='href'), client.parseDOM(i, 'a'))
                      for i in result]
            result = [
                i[0] for i in result if len(i[0]) > 0 and path in i[0][0]
                and len(i[1]) > 0 and 'Altyaz' in i[1][0]
            ][0][0]

            url = urlparse.urljoin(self.base_link, result)

            result = client2.http_get(url)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'video-player'})[0]
            result = client.parseDOM(result, 'iframe', ret='src')[-1]

            try:
                url = base64.b64decode(
                    urlparse.parse_qs(
                        urlparse.urlparse(result).query)['id'][0])
                if not url.startswith('http'): raise Exception()
            except:
                url = client2.http_get(result)
                url = urllib.unquote_plus(url.decode('string-escape'))

                frame = client.parseDOM(url, 'iframe', ret='src')

                if len(frame) > 0:
                    url = [client.source(frame[-1], output='geturl')]
                else:
                    url = re.compile('"(.+?)"').findall(url)
                url = [
                    i for i in url
                    if 'ok.ru' in i or 'vk.com' in i or 'openload.co' in i
                ][0]

            try:
                url = 'http://ok.ru/video/%s' % urlparse.parse_qs(
                    urlparse.urlparse(url).query)['mid'][0]
            except:
                pass

            if 'openload.co' in url:
                host = 'openload.co'
                direct = False
                url = [{
                    'url': url,
                    'quality': 'HD'
                }]
            elif 'ok.ru' in url:
                host = 'vk'
                direct = True
                url = resolvers.request(url)
            elif 'vk.com' in url:
                host = 'vk'
                direct = True
                url = resolvers.request(url)
            else:
                raise Exception()

            for i in url:
                sources.append({
                    'source': host,
                    'quality': i['quality'],
                    'provider': 'Onlinedizi',
                    'url': i['url'],
                })

            return sources
        except:
            return sources
Exemple #47
0
 def request(self, url, check):
     try:
         result = client2.http_get(url)
         if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
     except:
         return
Exemple #48
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        control.log("><><><><> PELISPEDIA SOURCE %s" % url)
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client2.http_get(r)

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client2.http_get(f, headers={'Referer': r})

            r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]
            r = [i[0] for i in r if 'pelispedia' in i[1]]

            links = []

            for u in r:
                result = client2.http_get(u, headers={'Referer': f})

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('"file"\s*:\s*"(.+?)"', url)
                    url = [i.split()[0].replace('\\/', '/') for i in url]

                    for i in url:
                        try: links.append({'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i})
                        except: pass
                except:
                    pass

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': u}

                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_flv_flsh/plugins/gkpluginsphp.php')
                    url = client2.http_get(url, data=post, headers=headers)
                    url = json.loads(url)['link']

                    links.append({'source': 'gvideo', 'quality': 'HD', 'url': url})
                except:
                    pass

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]
                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({'sou': 'pic', 'fv': '21', 'url': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk/plugins/protected.php')
                    url = client2.http_get(url, data=post, headers=headers)
                    url = json.loads(url)[0]['url']

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url})
                except:
                    pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url']})

            return sources
        except:
            return sources
Exemple #49
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        return
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '')
                                 for i in data])

                    title = data[
                        'tvshowtitle'] if 'tvshowtitle' in data else data[
                            'title']

                    year = re.findall(
                        '(\d{4})', data['premiered']
                    )[0] if 'tvshowtitle' in data else data['year']

                    try:
                        episode = data['episode']
                    except:
                        pass

                    query = {'keyword': title, 's': ''}
                    #query.update(self.__get_token(query))
                    search_url = urlparse.urljoin(self.base_link, '/search')
                    search_url = search_url + '?' + urllib.urlencode(query)
                    print("R", search_url)
                    result = client2.http_get(search_url)
                    print("r", result)

                    r = client.parseDOM(
                        result, 'div', attrs={'class':
                                              '[^"]*movie-list[^"]*'})[0]
                    r = client.parseDOM(r, 'div', attrs={'class': 'item'})
                    r = [(client.parseDOM(i, 'a', ret='href'),
                          client.parseDOM(i, 'a', attrs={'class': 'name'}))
                         for i in r]
                    r = [(i[0][0], i[1][0]) for i in r
                         if len(i[0]) > 0 and len(i[1]) > 0]
                    r = [(re.sub('http.+?//.+?/', '/',
                                 i[0]), re.sub('&#\d*;', '', i[1])) for i in r]

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1]))
                               for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url
                               if len(i[1]) > 0]
                        url = [
                            i for i in url
                            if cleantitle.get(title) == cleantitle.get(i[1])
                        ]
                        url = [
                            i for i in url if '%01d' %
                            int(data['season']) == '%01d' % int(i[2])
                        ]
                    else:
                        url = [
                            i for i in r
                            if cleantitle.get(title) == cleantitle.get(i[1])
                        ]
                    """
                    r = cache.get(self.fmovies_cache, 120)

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1]), i[2]) for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in url if len(i[1]) > 0]
                        url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])]
                        url = [i for i in url if i[3] == year] + [i for i in url if i[3] == data['year']]
                        url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])]
                    else:
                        url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and i[2] == year]

                    """
                    url = url[0][0]
                    url = urlparse.urljoin(self.base_link, url)
                    print("r2", url)

                except:
                    url == self.base_link

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            referer = url
            #xtoken = self.__get_xtoken()

            result = client.source(url, safe=True)
            #xtoken = self.__get_xtoken()
            print("r22", result)

            alina = client.parseDOM(result, 'title')[0]
            print(re.findall('(\d{4})', alina))

            atr = [
                i for i in client.parseDOM(result, 'title')
                if len(re.findall('(\d{4})', i)) > 0
            ][-1]
            if 'season' in data:
                result = result if year in atr or data['year'] in atr else None
            else:
                result = result if year in atr else None

            print("r3", result)

            try:
                quality = client.parseDOM(result,
                                          'span',
                                          attrs={'class':
                                                 'quality'})[0].lower()
            except:
                quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd' or 'hd ' in quality: quality = 'HD'
            else: quality = 'SD'

            result = client.parseDOM(result,
                                     'ul',
                                     attrs={'data-range-id': "0"})
            print("r3", result, quality)

            servers = []
            #servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'})
            servers = zip(client.parseDOM(result, 'a', ret='data-id'),
                          client.parseDOM(result, 'a'))
            servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers]
            servers = [(i[0], ''.join(i[1][:1])) for i in servers]
            print("r3", servers)

            try:
                servers = [
                    i for i in servers
                    if '%01d' % int(i[1]) == '%01d' % int(episode)
                ]
            except:
                pass

            for s in servers[:3]:
                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    hash_url = urlparse.urljoin(self.base_link, self.hash_link)
                    query = {'id': s[0], 'update': '0'}
                    query.update(self.__get_token(query))
                    hash_url = hash_url + '?' + urllib.urlencode(query)
                    headers['Referer'] = url
                    result = client2.http_get(hash_url,
                                              headers=headers,
                                              cache_limit=.5)
                    print("r100", result)

                    query = {'id': s[0], 'update': '0'}
                    query.update(self.__get_token(query))
                    url = url + '?' + urllib.urlencode(query)
                    result = client.source(url,
                                           headers=headers,
                                           referer=referer,
                                           safe=True)
                    print("r100", result)
                    result = json.loads(result)

                    query = result['params']
                    query['mobile'] = '0'
                    query.update(self.__get_token(query))
                    grabber = result['grabber'] + '?' + urllib.urlencode(query)

                    result = client.source(grabber,
                                           headers=headers,
                                           referer=url,
                                           safe=True)
                    result = json.loads(result)

                    result = result['data']
                    result = [i['file'] for i in result if 'file' in i]

                    for i in result:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                client.googletag(i)[0]['quality'],
                                'provider':
                                'Fmovies',
                                'url':
                                i
                            })
                        except:
                            pass
                except:
                    pass

            if quality == 'CAM':
                for i in sources:
                    i['quality'] = 'CAM'

            return sources
        except:
            return sources
    def get_movie(self, imdb, title, year):
        print("R", title)

        try:
            t = cleantitle.get(title)

            query = '%s %s' % (title, year)
            query = base64.b64decode(
                self.search_link) % urllib.quote_plus(query)
            print("R", query)

            result = client2.http_get(query,
                                      headers={'Referer': self.base_link})
            result = json.loads(result)['results']
            print("R", result)

            result = [(i['url'], i['titleNoFormatting']) for i in result]
            result = [(i[0],
                       re.findall('(?:^Ver |)(.+?)(?: HD |)\((\d{4})', i[1]))
                      for i in result]
            result = [(i[0], i[1][0][0], i[1][0][1]) for i in result
                      if len(i[1]) > 0]

            r = [
                i for i in result if t == cleantitle.get(i[1]) and year == i[2]
            ]

            if len(r) == 0:
                t = 'http://www.imdb.com/title/%s' % imdb
                t = client.source(t, headers={'Accept-Language': 'es-ES'})
                t = client.parseDOM(t, 'title')[0]
                t = re.sub('(?:\(|\s)\d{4}.+', '', t).strip()
                t = cleantitle.get(t)

                r = [
                    i for i in result
                    if t == cleantitle.get(i[1]) and year == i[2]
                ]

            try:
                url = re.findall('//.+?(/.+)', r[0][0])[0]
            except:
                url = r[0][0]
            try:
                url = re.findall('(/.+?/.+?/)', url)[0]
            except:
                pass
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass

        try:
            t = cleantitle.get(title)

            query = self.search3_link % urllib.quote_plus(
                cleantitle.query(title))
            query = urlparse.urljoin(self.base_link, query)

            result = client2.http_get(query)
            result = re.sub(r'[^\x00-\x7F]+', '', result)
            print("R", result)

            r = result.split('<li class=')
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'i'),
                  re.findall('\((\d{4})\)', i)) for i in r]
            r = [(i[0][0], re.sub('\(|\)', '', i[1][0]), i[2][0]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
            r = [
                i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]
            ][0]

            try:
                url = re.findall('//.+?(/.+)', r)[0]
            except:
                url = r
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')

            return url
        except:
            pass
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        control.log("><><><><> PELISPEDIA SOURCE %s" % url)
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client2.http_get(r)

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client2.http_get(f, headers={'Referer': r})

            r = client.parseDOM(result, 'div', attrs={'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]
            r = [i[0] for i in r if 'pelispedia' in i[1]]

            links = []

            for u in r:
                result = client2.http_get(u, headers={'Referer': f})

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('"file"\s*:\s*"(.+?)"', url)
                    url = [i.split()[0].replace('\\/', '/') for i in url]

                    for i in url:
                        try:
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                client.googletag(i)[0]['quality'],
                                'url':
                                i
                            })
                        except:
                            pass
                except:
                    pass

                try:
                    headers = {
                        'X-Requested-With': 'XMLHttpRequest',
                        'Referer': u
                    }

                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)',
                                      result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(
                        self.base_link,
                        '/Pe_flv_flsh/plugins/gkpluginsphp.php')
                    url = client.source(url, data=post, headers=headers)
                    url = json.loads(url)['link']

                    links.append({
                        'source': 'gvideo',
                        'quality': 'HD',
                        'url': url
                    })
                except:
                    pass

                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)',
                                      result)[0]
                    post = urlparse.parse_qs(
                        urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({
                        'sou': 'pic',
                        'fv': '21',
                        'url': post
                    })

                    url = urlparse.urljoin(
                        self.base_link,
                        '/Pe_Player_Html5/pk/pk/plugins/protected.php')
                    url = client2.http_get(url, data=post, headers=headers)
                    url = json.loads(url)[0]['url']

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url
                    })
                except:
                    pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Pelispedia',
                    'url': i['url'],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Exemple #52
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            #control.log('#PUTLOCKER1 %s' % url)

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                imdb = data['imdb']
                match = title.replace('-', '').replace(':', '').replace('\'', '').replace(' ', '-').replace('--', '-').lower()

                if 'tvshowtitle' in data:
                    url = '%s/show/%s/season/%01d/episode/%01d' % (self.base_link, match, int(data['season']), int(data['episode']))
                else:
                    url = '%s/movie/%s' % (self.base_link, match)
                control.log('#PUTLOCKER2 %s' % url)

                #result = client.source(url, output='title')
                result = client2.http_get(url)

                if '%TITLE%' in result: raise Exception()

                cookie_file = os.path.join(control.cookieDir, '%s_cookies.lwp' % client2.shrink_host(url))
                #cookie_file = os.path.join('/home/mrknow/.kodi/userdata/addon_data/plugin.video.specto.polska/Cookies','%s_cookies.lwp' % client2.shrink_host((url)))
                cj = cookielib.LWPCookieJar(cookie_file)
                try: cj.load(ignore_discard=True)
                except: pass
                auth = cj._cookies['www.putlocker.systems']['/']['__utmx'].value
                headers = {}

                if not imdb in result: raise Exception()

            else:
                result, headers, content, cookie = client.source(url, output='extended')


            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url

            u = 'http://www.putlocker.systems/ajax/embeds.php'

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid}
            r = client2.http_get(u, data=post, headers=headers)
            print r
            r = str(json.loads(r))
            r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?')

            links = []

            for i in r:
                try: links += [{'source': 'gvideo', 'quality': client.googletag(i)[0]['quality'], 'url': i}]
                except: pass

            links += [{'source': 'openload.co', 'quality': 'SD', 'url': i, 'direct': False} for i in r if 'openload.co' in i]

            links += [{'source': 'videomega.tv', 'quality': 'SD', 'url': i, 'direct': False} for i in r if 'videomega.tv' in i]


            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url']})
            #control.log('#PUTLOCKER6 SOURCES %s' % sources)

            return sources
        except:
            return sources
Exemple #53
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []
            control.log('#Dayt url %s' % url)

            if url == None: return sources
            content = re.compile('(.+?)\sS\d*E\d*$').findall(url)
            control.log('#Dayt content %s' % content)

            if len(content) == 0:
                control.log('#Dayt ttttttt')
                title, year = re.compile('(.+?) \((\d{4})\)').findall(url)[0]
                mytitle = cleantitle.movie(title)
                control.log('#Dayt title, year: %s,%s' % (title, year))

                data = os.path.join(control.dataPath, 'daytse1.db')
                download = True
                try:download = abs(datetime.datetime.fromtimestamp(os.path.getmtime(data)) - (datetime.datetime.now())) > datetime.timedelta(days=2)
                except:pass
                if download == True:
                    control.log('#Dayt DDDOOOWNLOAD ')
                    result = client.request(base64.b64decode(self.data_link))
                    with open(data, "wb") as code:
                        code.write(result)

                dbcon = database.connect(data)
                dbcur = dbcon.cursor()
                dbcur.execute("SELECT * FROM movies WHERE title like '%"+title.lower()+"%'")
                result = dbcur.fetchone()
                control.log('#Dayt Result Final %s' % result[0])

                #result = [i for i in result if mytitle in cleantitle.movie(i[0])]
                #result = [i[0] for i in result if any(x in i[1] for x in years)][0]
                #result = re.compile('(.+?)(?:&amp)').findall(result)[0]
                #return result
                control.log('#Dayt Final %s' % result[1])

                myurl = urlparse.urljoin(self.base_link, '/movies/' + urllib.quote_plus(result[1]))
                control.log('#Dayt Final myurl %s' % myurl)

                myhead = {'Referer': 'http://dayt.se/movies/'}

                #result = client2.http_get('http://dayt.se/', headers={})
                #result = client2.http_get(myurl, headers=myhead, allow_redirect=False)
                #control.log('#Dayt Final result  redir 1 %s' % result)
                result10 = client2.http_get(myurl, headers=myhead)
                #control.log('#Dayt Final result  redir 2 %s' % result)


                result10 = client.parseDOM(result10, 'div', attrs={'id': '5throw'})[0]
                result10 = client.parseDOM(result10, 'a', attrs={'rel': 'nofollow'}, ret='href')
                mquality = 'HD'
                if '1080'in result[2]: mquality = '1080p'
                for i in result10:
                    if 'mail.ru' in i:
                        myresolve = resolvers.request(i)
                        sources.append({'source': 'MAIL.RU', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})
                    if 'yadi.sk' in i:
                        myresolve = resolvers.request(i)
                        sources.append({'source': 'YADISK', 'quality': mquality, 'provider': 'Dayt', 'url': myresolve})

                result = client.parseDOM(result, 'iframe', ret='src')
                result = [i for i in result if 'pasep' in i][0]
                result = client.request(result)
                result = client.parseDOM(result, 'iframe', ret='src')[0]
                result = client.request(result)
                result = client.parseDOM(result, 'iframe', ret='src')[0]
                links = resolvers.request(result)
                for i in links: sources.append({'source': 'gvideo', 'quality': i[1], 'provider': 'Dayt', 'url': i[0]})
                return sources
        except:
            return sources