class TVOnline: domain = "http://www.xemtivimienphi.com" token = None member_id = None def __init__(self): self.request = Request(header={ 'User-Agent': 'Mozilla/5.0', }, session=True) def getCategory(self): response = self.request.get(self.domain) movies = Channel().get(response, 1) movies['page'] = 1 return [], movies def getMovie(self, id): url = id.replace(self.domain, '') url = url.replace('./', '') url = "{}/{}".format(self.domain, url) response = self.request.get(url) return Movie().get(response, self.domain, url) def getLink(self, url): response = self.request.get(url) return Movie().get_link(response) def search(self, text): url = "%s/findContent/" % self.domain params = {'term': text} response = self.request.post(url, params) return Channel().get(response, 1)
def get_link(url, media): base_url = urlparse(url) base_url = base_url.scheme + '://' + base_url.netloc request = Request(header={ 'Origin': base_url, 'Referer': media['originUrl'] }) response = request.get(str(url)) resolutions = re.findall('RESOLUTION=\d+x(\d+)', response) matches = re.findall(r'(.*\.m3u8)', response) if len(resolutions) > 1: if '720' in resolutions: idx = next( (resolutions.index(i) for i in resolutions if '720' == i), -1) stream_url = url.replace('playlist.m3u8', matches[idx]) return calculate_stream(request.get(stream_url), base_url, media['originUrl']) if '360' in resolutions: idx = next( (resolutions.index(i) for i in resolutions if '360' == i), -1) stream_url = url.replace('playlist.m3u8', matches[idx]) return calculate_stream(request.get(stream_url), base_url, media['originUrl']) return
class Sample: domain = "https://test.com" def __init__(self): self.request = Request(h, session=True) def getCategory(self): response = self.request.get('{}/'.format(self.domain), headers=h) return Category().get(response), Channel().getTop(response) def getChannel(self, channel, page=1): channel = channel.replace(self.domain, "") response = self.request.get(channel, headers=h) return Channel().get(response, page) def getMovie(self, movie_url): response = self.request.get(movie_url, headers=h) return Movie().get(response, movie_url) def getLink(self, movie): url = movie['link'].replace(self.domain, '') url = "%s/%s" % (self.domain, url) response = self.request.get(url, headers=h) return Movie().get_link(response, self.domain, url, self.request) def search(self, text): url = "%s/tim-nang-cao/?keyword=%s" % (self.domain, quote_plus(text)) response = self.request.get(url, headers=h) return Channel().get(response, 1)
def get(self, response, referrer_url, skipEps=False): req = Request() movie = { 'group': {}, 'episode': [], 'links': [], } # soup = BeautifulSoup(response, "html.parser") # get captcha key captcha_key = re.search(r"var recaptcha_key='(.*?)';", response).group(1) respon = req.get( 'https://www.google.com/recaptcha/api2/anchor', params={ "ar": 1, "k": captcha_key, "co": "aHR0cHM6Ly93d3cuZ29vZ2xlLmNvbTo0NDM.", # "hl": "en", "v": "eWmgPeIYKJsH2R2FrgakEIkq", "size": "invisible", # "cb": "fo0mghgojurx", }) token = re.search('id="recaptcha-token" value="(.*?)"', respon).group(1) m_id = re.search(r'id="watch" data-id="(.*?)"', response).group(1) ep_id = re.search(r'data-epid="(.*?)"', response).group(1) respon = req.get('https://fmovies.to/ajax/film/servers', params={ 'id': m_id, 'episode': ep_id, 'token': token }) helper.log(respon) return movie
class Phimmedia: domain = "https://www.phimmedia.tv" def __init__(self): self.request = Request(h, session=True) self.request.post('https://www.phimmedia.tv/mak.php', params={ 'mak_firewall_redirect': self.domain, 'mak_firewall_postcontent': '' }) def getCategory(self): response = self.request.get(self.domain) return Category().get(response), None def getChannel(self, channel, page=1): if page > 1: url = '%s%s&page=%d' % (self.domain, channel, page) else: url = '%s/%s.html' % (self.domain, channel) response = self.request.get(url) return Channel().get(response) def getMovie(self, id): url = "%sxem-online.html" % id response = self.request.get(url) return Movie().get(response) def getLink(self, movie): response = self.request.get(movie['link']) return Movie().get_link(response, movie['link']) def search(self, text, page=1): url = "%s/index.php?keyword=%s&do=phim&act=search&page=%s" % (self.domain, urllib.quote_plus(text), page) response = self.request.get(url) return Channel().get(response)
def parse_link(self, url): r = re.search('getLinkSimple', url) if r: res = Request() res.get(url) url = res.get_request().url return url
class Phimmedia: replace_domain = "https://www.phimmedia.tv" domain = "https://www.phimmedia.info" cookies = {} def __init__(self): self.request = Request(h, session=True) if helper.has_file_path('phimmedia.bin') and helper.get_last_modified_time_file('phimmedia.bin') + 43200 > \ int(time.time()): with open(helper.get_file_path('phimmedia.bin')) as f: self.cookies = pickle.load(f) else: self.updateSession(self.domain) def updateSession(self, url, delay=10): try: scraper = CloudScraper.create_scraper(delay=delay) scraper.headers.update({'User-Agent': user_agent}) self.cookies = scraper.get(url).cookies.get_dict() with open(helper.get_file_path('phimmedia.bin'), 'wb') as f: pickle.dump(self.cookies, f) except: pass def getCategory(self): response = self.request.get("{}/en/".format(self.domain), cookies=self.cookies) return Category().get(response), Channel().get(response) def getChannel(self, channel, page=1): channel = channel.replace(self.replace_domain, "").replace(self.domain, "") if page > 1: url = '%s%s&page=%d' % (self.domain, channel, page) else: url = '%s%s' % (self.domain, channel) response = self.request.get(url, cookies=self.cookies) return Channel().get(response) def getMovie(self, id): url = "%sxem-online.html" % id response = self.request.get(url, cookies=self.cookies) return Movie().get(response) def getLink(self, movie): movie_link = movie['link'].replace(self.replace_domain, self.domain) response = self.request.get(movie_link, cookies=self.cookies) return Movie().get_link(response, movie['link']) def search(self, text, page=1): url = "%s/index.php?keyword=%s&do=phim&act=search&page=%s" % ( self.domain, urllib.quote_plus(text), page) response = self.request.get(url, cookies=self.cookies) return Channel().get(response)
def get_link(url, movie): base_url = urlparse(url) base_url = base_url.scheme + '://' + base_url.netloc request = Request() if 'embedplay' in url: return iframeembed.get_link(url, movie) if url.endswith('m3u8'): header = { 'Origin': 'http://www.vtv16.com', 'User-Agent': "Chrome/59.0.3071.115 Safari/537.36", 'Referer': movie.get('originUrl') } return url + "|%s" % urlencode(header), base_url # method 1 try: mid = re.search(r'\?id=(.*)', url).group(1) hosturl = '%s/getHost/%s' % (base_url, mid) response = Request().post(hosturl, headers={ 'origin': base_url, 'referer': url }) movie_url = base64.b64decode(response) header = { 'Origin': 'http://www.vtv16.com', 'User-Agent': "Chrome/59.0.3071.115 Safari/537.36", 'Referer': movie.get('originUrl') } return movie_url + "|%s" % urlencode(header), base_url except: pass # method 2 request.get(url) location = request.get_request().history[0].headers['Location'] base_url = urlparse(location) base_url = base_url.scheme + '://' + base_url.netloc # https://vip4.movie3s.net/public/dist/index.html?id=0676953662683db3977a8d30e4084414 mid = re.search(r'\?id=(.*)', location).group(1) return '%s/hls/%s/%s.playlist.m3u8' % (base_url, mid, mid), base_url # method 3 medias = json.loads(request.post('%s/vl/%s' % (base_url, mid))) if '720p' in medias: return create_stream(medias['720p'], base_url) return url, base_url
class FShare: def __init__(self, url, username="", password=""): self.request = Request(session=True) self.url = url self.username = username self.password = password def login(self): token = self.get_token() code = self.url.replace('https://www.fshare.vn', '') url = 'https://www.fshare.vn/site/login?backUrl=%s' % code r = self.request.post( url, { '_csrf-app': token, 'LoginForm[email]': self.username, 'LoginForm[password]': self.password, 'LoginForm[rememberMe]': 1 }) return r def get_token(self): r = self.request.get(self.url) return self.extract_token(r) def extract_token(self, response): return re.search('name="csrf-token" content="(.*)">', response).group(1) def get_link(self): if not self.username or not self.password: token = self.get_token() else: r = self.login() token = self.extract_token(r) code = re.search('/file/([^\?]+)', self.url).group(1) r = self.request.post('https://www.fshare.vn/download/get', { '_csrf-app': token, 'linkcode': code, 'withFcode5': 0, 'fcode': '' }) item = json.loads(r) self.logout() if 'errors' in item: helper.message("Fshare error: %s" % item['errors']['linkcode'][0]) raise Exception('Fshare', 'error') return # should block ui to wait until able retrieve a link return item[u'url'] def logout(self): self.request.get('https://www.fshare.vn/site/logout')
def get_stream(url, header, base_path=None, action="HEAD"): req = Request() r = req.get(url, headers=header) if not base_path: base_url = urlparse(url) base_url = base_url.scheme + '://' + base_url.netloc else: base_url= base_path if re.search('EXT-X-STREAM-INF', r): ad_url = get_adaptive_link(r) if 'http' not in ad_url: ad_url = base_url + ad_url r = req.get(ad_url, headers=header) playlist = "" links = [] is_redirect = True lines = r.splitlines() for line in lines: if len(line) > 0: # guess link if '#' not in line[0]: if 'http' in line: path = line elif '//' in line[0:2]: path = "{}{}".format("https:", line) elif '/' in line[0]: path = "{}/{}".format(base_url, line) else: path = "{}/{}".format(base_url, line) if 'vdacdn.com' in path: is_redirect = False path = path.replace('https://', 'http://') if 'cdnplay.xyz' in path: is_redirect = False # path += "|%s" % urlencode(header) links.append({'url': path, 'parser': parse_link, 'responseHeader': True}) else: path = line playlist += '%s\n' % path if is_redirect and len(playlist) > 0: arequest = AsyncRequest(request=req) results = arequest.get(links, redirect=False, headers=header, verify=False) for i in range(len(links)): playlist = playlist.replace(links[i].get('url'), results[i]) url = PasteBin().dpaste(playlist, name='adaptivestream', expire=60) return url
def parse_link(self, url): r = re.search('getLinkSimple', url) if r: res = Request() res.get(url) url = res.get_request().url r = re.search('128.199.198.106/video\?url=(.*)', url) if r: url = urllib.unquote(r.group(1)) return url
def get_link(url): req = Request() req.get(url, redirect=True) response = req.get_request() location = response.history[0].headers['Location'] id = re.search('id=(.*)', location).group(1) base_url = urlparse(location) base_url = base_url.scheme + '://' + base_url.netloc return '%s/hls/%s/%s.playlist.m3u8' % (base_url, id, id)
class Bilutvb: domain = "https://bilutvb.com" def __init__(self): self.request = Request( header={ "Accept-Language": "en-US,en;q=0.9,vi;q=0.8", "accept-encoding": "deflate" }) def getCategory(self): response = self.request.get(self.domain) return Category().get(response), Channel().get(response) def getChannel(self, channel, page=1): channel = channel.replace(self.domain, "") if page > 1: channel = channel.replace('.html/', "/") channel = channel.replace('.html', "/") url = '%s%s/page/%d' % (self.domain, channel, page) else: url = '%s%s' % (self.domain, channel) response = self.request.get(url) return Channel().get(response) def getMovie(self, url): # https://bilutvb.com/ajax/movie_load_info/1936 mid = re.search(r'load_info/(\d+)', url).group(1) # https://bilutvb.com/ajax/get_episodes/1936 response = self.request.get('%s/ajax/get_episodes/%s' % (self.domain, mid)) return Movie().get(response) def getLink(self, movie): # response = self.request.get(movie['link']) # https://bilutvb.com/ajax/get_sources/67325 response = self.request.get('%s/ajax/get_sources/%s' % (self.domain, movie['link'])) return Movie().get_link(response, self.request, self.domain) def search(self, text, page=1): # url = "%s/search/%s" % (self.domain, urllib.quote_plus(text)) url = "%s/wp-admin/admin-ajax.php" % self.domain response = self.request.post(url, params={ 'action': 'halimthemes_ajax_search', 'search': text }) return Channel().getSearchResult(response)
class Tvhay: domain = "http://tvhay.org/" def __init__(self): self.request = Request(session=True) body = self.request.get(self.domain) cookie = SucuriCloudProxy.get_cookie(body) body = self.request.get(self.domain, cookies=cookie, headers={ 'Referer': 'http://tvhay.org/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' }) self.cookie = SucuriCloudProxy.get_cookie(body) def getCategory(self): response = self.request.get(self.domain, cookies=self.cookie, headers={ 'Referer': 'http://tvhay.org/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' }) return Category().get(response) def getChannel(self, channel, page=1): channel = channel.replace(self.domain, "") if page > 1: url = '%s%spage/%d' % (self.domain, channel, page) else: url = '%s%s' % (self.domain, channel) response = self.request.get(url, cookies=self.cookie, headers={ 'Referer': 'http://tvhay.org/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' }) return Channel().get(response, page) def getMovie(self, id): url = Movie().get_movie_link( self.request.get(id, cookies=self.cookie, headers={ 'Referer': 'http://tvhay.org/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' }) ) response = self.request.get(url, cookies=self.cookie, headers={ 'Referer': 'http://tvhay.org/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' }) return Movie().get(response) def getLink(self, movie): response = self.request.get(movie['link'], cookies=self.cookie, headers={ 'Referer': 'http://tvhay.org/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' }) return Movie().get_link(response) def search(self, text): url = "%ssearch/%s" % (self.domain, urllib.quote_plus(text)) response = self.request.get(url, cookies=self.cookie, headers={ 'Referer': 'http://tvhay.org/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36' }) return Channel().get(response, 1)
def parse_link(url): r = re.search('getLinkSimple', url) if r: res = Request() res.get(url) url = res.get_request().url r = re.search('128.199.198.106/video\?url=(.*)', url) if r: url = urllib.unquote(r.group(1)) if 'error' in url.encode('utf-8'): return None return url
def get_link(url): req = Request() req.get(url, redirect=True) response = req.get_request() location = response.history[0].headers['Location'] id = re.search('id=(.*)', location).group(1) # POST http://sl2.animehay.tv/vl/{id} response = json.loads( req.post('http://sl2.animehay.tv/vl/%s' % id, headers={'Referer': location})) if '720p' in response: create_playlist(response['720p']) return None
class Phimmedia: replace_domain = "https://www.phimmedia.me" domain = "https://www.phimmedia.me" cookies = {} def __init__(self): self.request = Request(h, session=True) # def updateSession(self, url, delay=10): # try: # scraper = CloudScraper.create_scraper(delay=delay) # scraper.headers.update({'User-Agent': user_agent}) # self.cookies = scraper.get(url).cookies.get_dict() # with open(helper.get_file_path('phimmedia.bin'), 'wb') as f: # pickle.dump(self.cookies, f) # except: pass def getCategory(self): response = self.request.get("{}/".format(self.domain)) return Category().get(response), Channel().get(response) def getChannel(self, channel, page=1): channel = channel.replace(self.replace_domain, "").replace(self.domain, "") if page > 1: url = '%s%s&page=%d' % (self.domain, channel, page) else: url = '%s%s' % (self.domain, channel) response = self.request.get(url) return Channel().get(response) def getMovie(self, id): url = "%sxem-online.html" % id response = self.request.get(url) return Movie().get(response) def getLink(self, movie): movie_link = movie['link'].replace(self.replace_domain, self.domain) response = self.request.get(movie_link) return Movie().get_link(response, movie['link']) def search(self, text, page=1): url = "%s/index.php?keyword=%s&do=phim&act=search&page=%s" % ( self.domain, quote_plus(text), page) response = self.request.get(url) return Channel().get(response)
def get_link(url, media): base_url = urlparse(url) base_url = base_url.scheme + '://' + base_url.netloc header = { 'Origin': base_url, 'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25' } request = Request(header) response = request.get(str(url)) resolutions = re.findall('RESOLUTION=\d+x(\d+)', response) matches = re.findall(r'(.*\.m3u8)', response) print("Found total %d stream" % len(resolutions), resolutions) if len(resolutions) > 1: if '1080' in resolutions: idx = next( (resolutions.index(i) for i in resolutions if '720' == i), -1) stream_url = url.replace('playlist.m3u8', matches[idx]) stream_url = calculate_stream(request.get(stream_url), base_url, media['originUrl']) print("1080 url:%s" % stream_url) if stream_url: return stream_url if '720' in resolutions: idx = next( (resolutions.index(i) for i in resolutions if '720' == i), -1) stream_url = url.replace('playlist.m3u8', matches[idx]) stream_url = calculate_stream(request.get(stream_url), base_url, media['originUrl']) print("720 url:%s" % stream_url) if stream_url: return stream_url if '360' in resolutions: idx = next( (resolutions.index(i) for i in resolutions if '360' == i), -1) stream_url = url.replace('playlist.m3u8', matches[idx]) stream_url = calculate_stream(request.get(stream_url), base_url, media['originUrl']) print("360 url:%s" % stream_url) if stream_url: return stream_url return str(url) + "|%s" % urlencode(header)
def get_link(url, media): print("*********************** Apply Lotus url %s" % url) header = { 'referer': 'https://lotus.vn/', 'User-Agent': "Chrome/59.0.3071.115 Safari/537.36", } req = Request() response = req.get(url, headers=header) print(response.encode('utf8')) source = re.search(r'"link":\s?"(.*?)",', response) if '.mp4' in source.group(1): url = source.group(1) return url, 'lotus' if source: master_url = source.group(1) playlist = get_adaptive_link(Request().get(master_url, headers=header)) listitems = [] for i in playlist: listitems.append("%s (%s)" % (i[0], i[1])) index = helper.create_select_dialog(listitems) if index == -1: return None, None else: url = playlist[index][1] url = "{}|{}".format(url, urlencode(header)) return url, 'lotus'
def get_link(url, movie): request = Request() response = request.get(url) response = response.replace('/redirect/hls', '/hls') url = PasteBin().dpaste(response, name='movie3s', expire=60) return url, 'movie3s_hls'
def get_link(url): req = Request() response = req.get(url, redirect=True) url = re.search(' data-file="(.*?)"', response).group(1) return url, 'Animehay CCA'
def get_link(url, movie): print "*********************** Apply dood url %s" % url req = Request() response = req.get(url) match = re.search( r'''dsplayer\.hotkeys[^']+'([^']+).+?function\s*makePlay.+?return[^?]+([^"]+)''', response, re.DOTALL) if match: header = {'Referer': url} token = match.group(2) url = 'https://dood.to' + match.group(1) response = req.get(url, headers=header) return dood_decode(response) + token + str(int( time.time() * 1000)) + "|%s" % urlencode(header), 'dood' return url, 'dood'
def get_link(url, media): # https://play.playoffsite.xyz/play/v1/5f753b4889b8c5269a591e29 # https://play.playoffsite.xyz/apiv1/playhq/5f6581f43036707a6803e61c m_id = re.search(r'v1/(.*)', url) header = {'Referer': url} req = Request() if m_id: m_id = m_id.group(1) # get domain list https://play.playoffsite.xyz/play/v1/5f6581f43036707a6803e61c # var DOMAIN_LIST = response = req.get( "https://play.playoffsite.xyz/play/v1/{}".format(m_id), headers=header) domains = re.search(r'var DOMAIN_LIST = (\[.*\])', response).group(1) idfile = re.search(r'var idfile = "(.*)";', response).group(1) response = req.post( "https://play.playoffsite.xyz/apiv1/playhq/{}".format(m_id), headers=header, data="") playlist = create_playlist(response, idfile, domains) url = PasteBin().dpaste(playlist, name='playoffsite', expire=60) # url = helper.write_file('playlist.strm', url) return url, 'Tvhay'
def get_link(url, media): request = Request() base_url = urlparse(media.get('originUrl')) base_url = base_url.scheme + '://' + base_url.netloc header = { 'Referer': url, 'User-Agent': "Chrome/59.0.3071.115 Safari/537.36", 'Origin': base_url } print "Apply iframeembed url %s" % url resp = request.get(url, headers=header) req = request.get_request() if req.history: r_url = req.url rurl = urlparse(r_url) rurl = rurl.scheme + '://' + rurl.netloc rid = re.search(r'id=(.*)', r_url).group(1) rurl = "{}/getLinkStreamMd5/{}".format(rurl, rid) sources = request.get(rurl, headers=header) sources = json.loads(sources) print 11111111111111111111111111 print sources else: sources = re.search(r'sources\s?[=:]\s?(\[.*?\])', resp, re.DOTALL) if sources: sources = "".join([s for s in sources.group(1).splitlines() if s.strip("\r\n")]) sources = re.sub(r'\s+', '', sources) sources = helper.convert_js_2_json(sources) print 2222222222222222222222222 print sources if sources: if len(sources) > 1: listitems = [] for i in sources: listitems.append("%s (%s)" % (i.get('label'), i.get('file'))) index = xbmcgui.Dialog().select("Select stream", listitems) if index == -1: return None, None else: return sources[index].get('file') + "|%s" % urlencode(header), sources[index].get('label') else: return sources[0].get('file') + "|%s" % urlencode(header), sources[0].get('label') return None, None
def get_link(url): req = Request() response = req.get(url, redirect=True) url = re.search(r"sources:\[{file:\s'(.*?)'}", response).group(1) # url = "https://animehay.kyunkyun.net{}".format(url) return url, 'Animehay moekawaii'
def get_link(url): req = Request() response = req.get(url, redirect=True) url = re.search(r'"file": "(.*?)",', response).group(1) url = "https://animehay.kyunkyun.net{}".format(url) return url, 'Animehay kyunkyun'
def get(self, response, skipEps=False): movie = { 'group': {}, 'episode': [], 'links': [], } # get all server list mid = None try: mid = re.search(r'drt:direction,mid:"(.*)",idx:idx,', response).group(1) except: pass if not mid: mid = re.search(r'mid:\s?"(.*?)",', response).group(1) list_eps = re.search(r'div class="movie-eps-nav" data-min="(\d+)" data-max="(\d+)"', response) # http://dongphim.net/content/subitems?drt=down&mid=8ghRyAh1&idx=400 request = Request() idx = int(list_eps.group(1)) eps = "" while True: if idx < 11: break else: url = "http://dongphim.net/content/subitems?drt=down&mid=%s&idx=%s" % (mid, idx) data = json.loads(request.get(url)) eps += data['data'] idx = data['idx'] eps = eps.replace('\t\n\t', '') eps = eps.replace('\n\t\t', '') eps = eps.replace('\\\"', '"') eps = eps.replace('\n\t', '') eps = eps.replace('\t', '') soup = BeautifulSoup(eps, "html.parser") movie['group']['Dongphim'] = [] eps = soup.select('a.movie-eps-item') for i in reversed(range(len(eps))): ep = eps[i] if 'disabled' in ep.get('class'): continue movie['group']['Dongphim'].append({ 'link': ep.get('href').encode('utf-8'), 'title': ep.get('title').encode('utf-8'), }) soup = BeautifulSoup(response, "html.parser") eps = soup.select('a.movie-eps-item') for i in reversed(range(len(eps))): ep = eps[i] if 'disabled' in ep.get('class'): continue movie['group']['Dongphim'].append({ 'link': ep.get('href').encode('utf-8'), 'title': ep.get('title').encode('utf-8'), }) return movie
class Animehay: domain = "http://animehay.tv" def __init__(self): self.username = helper.getSetting('animehay.username') self.password = helper.getSetting('animehay.password') self.request = Request(session=True) self.login() def login(self, redirect=None): params = { 'user_id': self.username, 'password': self.password, 'send_log': "Đăng Nhập" } response = self.request.post('%s//dang-nhap?ref=/' % self.domain, params) return response def getCategory(self): response = self.request.get(self.domain) return Category().get(response), Channel().get(response, 1) def getChannel(self, channel, page=1): channel = channel.replace(self.domain, "") if page > 1: url = '%s%s?page=%d' % (self.domain, channel, page) else: url = '%s%s' % (self.domain, channel) response = self.request.get(url) return Channel().get(response, page) def getMovie(self, id): url = Movie().get_movie_link(Request().get(id)) response = self.request.get(url) return Movie().get(response) def getLink(self, movie): response = self.request.get(movie['link']) return Movie().get_link(response, movie['link']) def search(self, text): url = "%s/tim-kiem?q=%s" % (self.domain, urllib.quote_plus(text)) response = self.request.get(url) return Channel().get(response, 1)
class Phimmoi: domain = "http://www.phimmoi.net/" def __init__(self): self.request = Request(h, session=True) self.request.get('http://www.phimmoi.net/vn.php') def getCategory(self): response = self.request.get(self.domain) return Category().get(response) def getChannel(self, channel, page=1): channel = channel.replace(self.domain, "") if page > 1: url = '%s%spage-%d.html' % (self.domain, channel, page) else: url = '%s%s' % (self.domain, channel) response = self.request.get(url, headers=h) return Channel().get(response, page) def getMovie(self, id): url = "%s%sxem-phim.html" % (self.domain, id) response = self.request.get(url, headers=h) return Movie().get(response, url) def getLink(self, movie): url = movie['link'].replace(self.domain, '') url = "%s%s" % (self.domain, url) response = self.request.get(url, headers=h) return Movie().get_link(response, url) def search(self, text): url = "%stim-kiem/%s/" % (self.domain, urllib.quote_plus(text)) response = self.request.get(url, headers=h) return Channel().get(response, 1)
class Phimmoi: domain = "http://www.phimmoi.net/" def __init__(self): self.request = Request(h, session=True) self.request.get('http://www.phimmoi.net/vn.php') def getCategory(self): response = self.request.get(self.domain) return Category().get(response) def getChannel(self, channel, page=1): channel = channel.replace(self.domain, "") if page > 1: url = '%s%spage-%d.html' % (self.domain, channel, page) else: url = '%s%s' % (self.domain, channel) response = self.request.get(url, headers=h) return Channel().get(response, page) def getMovie(self, id): url = "%s%sxem-phim.html" % (self.domain, id) response = self.request.get(url, headers=h) return Movie().get(response, url) def getLink(self, movie): url = "%s%s" % (self.domain, movie['link']) response = self.request.get(url, headers=h) return Movie().get(response, url, True) def search(self, text): url = "%stim-kiem/%s/" % (self.domain, urllib.quote_plus(text)) response = self.request.get(url, headers=h) return Channel().get(response, 1)
def get_link(url, media): # https://play.playoffsite.xyz/play/v1/5f753b4889b8c5269a591e29 # https://play.playoffsite.xyz/apiv1/playhq/5f6581f43036707a6803e61c m_id = re.search(r'v1/(.*)', url) header = { 'referer': url # 'Referer': 'http://tvhayz.net' } req = Request() if m_id: m_id = m_id.group(1) # get domain list https://play.playoffsite.xyz/play/v1/5f6581f43036707a6803e61c # var DOMAIN_LIST = link = "https://play.playoffsite.xyz/play/v1/{}".format(m_id) response = req.get(link, headers=header) domains = re.search(r'var DOMAIN_LIST = (\[.*\])', response).group(1) idfile = re.search(r'var idfile = "(.*)";', response).group(1) iduser = re.search(r'var idUser = "******";', response).group(1) header = { 'Referer': link, # 'Content-Type': 'application/x-www-form-urlencoded' } # get https://api.playoffsite.xyz/apiv1/views/5fc9bad50a6ad5ac5c00a8e8 # head https://m3u8.playoffsite.xyz/api/v1/png/5fc9bad50a6ad5ac5c00a8e8 # location https://m3u8.playoffsite.xyz/m3u8/v1/4/png/5fc9bad50a6ad5ac5c00a8e8.m3u8 response = req.post( "https://api-sing.playoffsite.xyz/apiv2/{}/{}".format( iduser, idfile), headers=header, params={ 'referrer': 'http://tvhai.org', 'typeend': 'html' }) response = json.loads(response) print response url = response.get('data') # req.head("https://m3u8.playoffsite.xyz/api/v1/png/{}".format(idfile), headers=header) # url = req.get_request().history[0].headers['Location'] # response = req.post("https://api.playoffsite.xyz/apiv1/playhq/{}".format(m_id), headers=header, params="referrer=http%3A%2F%2Ftvhayz.net") # header = {'Referer': link, 'verifypeer': 'false', 'User-Agent': "Chrome/59.0.3071.115 Safari/537.36"} # playlist = create_playlist(response, idfile, domains, header) # url = PasteBin().dpaste(playlist, name='playoffsite', expire=60) # playlist = create_master_playlist(url) # url = PasteBin().dpaste(playlist, name='playoffsite', expire=60) # media['originUrl'] = link # return streamlink.get_link(url, media) return url + "|%s" % urlencode(header), 'hl3' return url, 'Tvhay'
def get_stream(self, url): req = Request() r = req.get(url) str = "" links = [] for line in r.splitlines(): if len(line) > 0: if re.match('http', line): links.append(line) str += '%s\n' % line arequest = AsyncRequest(request=req) results = arequest.head(links) for i in range(len(links)): str = str.replace(links[i], results[i].headers['Location ']) url = PasteBin().dpaste(str, name='animiehay', expire=60) return url
def create_stream(self, url, base_url): retry = 5 res = Request() response = None while retry >= 0: try: print('Retry %d: %s' % (retry, url)) response = res.get(url) if response != 'error': break except: pass finally: retry -= 1 if response: matches = re.findall('(/drive/hls/.*)', response) for m in matches: stream_url = base_url + m response = response.replace(m, stream_url) url = PasteBin().dpaste(response, name=url, expire=60) return url
def get_hls_playlist_stream(self, url): req = Request() response = req.get(url) links = re.findall('(https?://(?!so-trym).*)\r', response) if links: arequest = AsyncRequest(request=req) results = arequest.head(links, headers={ 'origin': 'http://www.phimmoi.net', 'referer': self.originURL }, redirect=False) for i in range(len(links)): response = response.replace(links[i], results[i].headers['location']) links = re.findall('(http://so-trym.*)\r', response) if links: for i in range(len(links)): url = '%s|referer=%s' % (links[i], self.originURL) response = response.replace(links[i], url) url = PasteBin().dpaste(response, name=url, expire=60) return url
class Fcine: domain = "https://fcine.net" token = None member_id = None def __init__(self): self.request = Request(header={ 'User-Agent': 'Mozilla/5.0', 'origin': 'https://fcine.net', 'referer': 'https://fcine.net/login/', }, session=True) def get_token(self, response=None): if not response: response = self.request.get('%s/page/help/' % self.domain) self.token = re.search('csrfKey: "(.*)",', response).group(1) self.member_id = re.search('memberID: (\d+)', response).group(1) return self.token, self.member_id def login(self, username, password, header): params = { 'login__standard_submitted': 1, 'csrfKey': self.token, 'auth': username, 'password': password, 'remember_me': 1, 'remember_me_checkbox': 1 } return self.request.post('%s/login/' % self.domain, params, headers=header) def getCategory(self): response = self.request.get(self.domain) return Category().get(response) def getChannel(self, channel, page=1): url = '%s?alphabet=all&page=%d' % (channel, page) response = self.request.get(url) return Channel().get(response, page) def getMovie(self, id): self.get_token() response = self.login( helper.getSetting('fcine.username'), helper.getSetting('fcine.password'), {'referer': id}) return Movie().get(response) def getLink(self, url): response = self.request.get(url) return Movie().get_link(response) def search(self, text): url = "%s/findContent/" % (self.domain) params = { 'term': text } response = self.request.post(url, params) return Channel().get(response, 1)