def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) video_urls = [] data = httptools.downloadpage(page_url).data text_encode = scrapertools.find_single_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>") text_decode = aadecode(text_encode) patron = "'([^']+)'" media_url = scrapertools.find_single_match(text_decode, patron) video_urls.append([media_url[-4:] + " [Videowood]", media_url]) return video_urls
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("pelisalacarta.servers.openload url=" + page_url) video_urls = [] video = True data = scrapertools.downloadpageWithoutCookies(page_url) if "videocontainer" not in data: video = False url = page_url.replace("/embed/", "/f/") data = scrapertools.downloadpageWithoutCookies(url) text_encode = scrapertools.get_match( data, "Click to start Download.*?<script[^>]+>(.*?)</script") else: text_encode = scrapertools.get_match( data, "<video[^<]+<script[^>]+>(.*?)</script>") from aadecode import decode as aadecode text_decode = aadecode(text_encode) subtitle = scrapertools.find_single_match( data, '<track kind="captions" src="([^"]+)" srclang="es"') #Header para la descarga header_down = "|User-Agent=" + headers['User-Agent'] + "|" if video == True: videourl = scrapertools.get_match(text_decode, "(http.*?true)") videourl = scrapertools.get_header_from_response( videourl, header_to_get="location") videourl = videourl.replace("https://", "http://").replace("?mime=true", "") extension = videourl[-4:] video_urls.append([ extension + " [Openload]", videourl + header_down + extension, 0, subtitle ]) else: videourl = scrapertools.find_single_match( text_decode, '"href",(?:\s|)\'([^\']+)\'') videourl = videourl.replace("https://", "http://") extension = videourl[-4:] video_urls.append( [extension + " [Openload]", videourl + header_down + extension]) for video_url in video_urls: logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0], video_url[1])) return video_urls
def extraer_enlaces_json(data, referer, subtitle=''): itemlist = [] # Ejemplos: # {"Animeyt":[{"file":"https:\/\/storage.googleapis.com\/my-project-yt-195318.appspot.com\/slow.mp4","type":"mp4","label":"1080p"}]} # {"link":[{"link":"http:\/\/video8.narusaku.tv\/static\/720p\/2.1208982.2039540?md5=B64FKYNbFuWvxkGcSbtz2Q&expires=1528839657","label":"720p","type":"mp4"},{"link":"http:\/\/video5.narusaku.tv\/static\/480p\/2.1208982.2039540?md5=yhLG_3VghEUSd5YlCXOTBQ&expires=1528839657","label":"480p","type":"mp4","default":true},{"link":"http:\/\/video3.narusaku.tv\/static\/360p\/2.1208982.2039540?md5=vC0ZJkxRwV1rVBdeF7D4iA&expires=1528839657","label":"360p","type":"mp4"},{"link":"http:\/\/video2.narusaku.tv\/static\/240p\/2.1208982.2039540?md5=b-y_-rgrLMW7hJwFQSD8Tw&expires=1528839657","label":"240p","type":"mp4"}]} # {"link":"https:\/\/storage.googleapis.com\/cloudflare-caching-pelispedia.appspot.com\/cache\/16050.mp4","type":"mp4"} # {"Harbinger":[{"Harbinger":"...","type":"...","label":"..."}], ...} data = data.replace('"Harbinger"', '"file"') # Intentar como json # ------------------ try: json_data = json.loads(data) enlaces = analizar_enlaces_json(json_data) for enlace in enlaces: url = enlace['link'] if 'link' in enlace else enlace['file'] if not url.startswith('http'): url = aadecode( base64.b64decode(url)) # necesario para "Harbinger" if not url.startswith('http'): url = decode_rijndael(url) # post-"Harbinger" en algunos casos tit = '' if 'type' in enlace: tit += '[%s]' % enlace['type'] if 'label' in enlace: tit += '[%s]' % enlace['label'] if tit == '': tit = '.mp4' itemlist.append([tit, corregir_url(url, referer), 0, subtitle]) # Sino, intentar como texto # ------------------------- except: matches = scrapertools.find_multiple_matches( data, '"link"\s*:\s*"([^"]*)"\s*,\s*"label"\s*:\s*"([^"]*)"\s*,\s*"type"\s*:\s*"([^"]*)"' ) if matches: for url, lbl, typ in matches: itemlist.append([ '[%s][%s]' % (typ, lbl), corregir_url(url, referer), 0, subtitle ]) else: url = scrapertools.find_single_match(data, '"link"\s*:\s*"([^"]*)"') if url: itemlist.append( ['.mp4', corregir_url(url, referer), 0, subtitle]) return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("stramondemand.servers.openload url=" + page_url) video_urls = [] data = scrapertools.downloadpageWithoutCookies(page_url) subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"') #Header para la descarga header_down = "|User-Agent="+headers['User-Agent']+"|" from aadecode import decode as aadecode if "videocontainer" not in data: url = page_url.replace("/embed/","/f/") data = scrapertools.downloadpageWithoutCookies(url) text_encode = scrapertools.find_single_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script") text_decode = aadecode(text_encode) videourl = scrapertools.find_single_match(text_decode, '(http.*?)\}').replace("https://","http://") extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"') extension = "." + extension.rsplit(".", 1)[1] video_urls.append([extension + " [Openload]", videourl+header_down+extension]) else: text_encode = scrapertools.find_multiple_matches(data,'<script[^>]+>(゚ω゚.*?)</script>') decodeindex = aadecode(text_encode[0]) subtract = scrapertools.find_single_match(decodeindex, 'welikekodi.*?(\([^;]+\))') index = int(eval(subtract)) # Buscamos la variable que nos indica el script correcto text_decode = aadecode(text_encode[index]) videourl = scrapertools.find_single_match(text_decode, "(http.*?true)").replace("https://","http://") extension = "." + scrapertools.find_single_match(text_decode, "video/(\w+)") video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle]) for video_url in video_urls: logger.info("stramondemand.servers.openload %s - %s" % (video_url[0],video_url[1])) return video_urls
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("pelisalacarta.servers.videowood url=" + page_url) video_urls = [] data = scrapertools.cache_page(page_url) text_encode = scrapertools.find_single_match(data, "(eval\(function\(p,a,c,k,e,d.*?)</script>") from aadecode import decode as aadecode text_decode = aadecode(text_encode) # URL del vídeo patron = "'([^']+)'" media_url = scrapertools.find_single_match(text_decode, patron) video_urls.append([media_url[-4:] + " [Videowood]", media_url]) return video_urls
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("pelisalacarta.servers.openload url=" + page_url) video_urls = [] video = True data = scrapertools.downloadpageWithoutCookies(page_url) if "videocontainer" not in data: video = False url = page_url.replace("/embed/","/f/") data = scrapertools.downloadpageWithoutCookies(url) text_encode = scrapertools.get_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script") else: text_encode = scrapertools.get_match(data,"<video[^<]+<script[^>]+>(.*?)</script>") from aadecode import decode as aadecode text_decode = aadecode(text_encode) subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"') #Header para la descarga header_down = "|User-Agent="+headers['User-Agent']+"|" if video == True: videourl = scrapertools.get_match(text_decode, "(http.*?true)") videourl = scrapertools.get_header_from_response(videourl,header_to_get="location") videourl = videourl.replace("https://","http://").replace("?mime=true","") extension = videourl[-4:] video_urls.append([ extension + " [Openload]", videourl+header_down+extension, 0, subtitle]) else: videourl = scrapertools.find_single_match(text_decode, '"href",(?:\s|)\'([^\']+)\'') videourl = videourl.replace("https://","http://") extension = videourl[-4:] video_urls.append([ extension + " [Openload]", videourl+header_down+extension]) for video_url in video_urls: logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1])) return video_urls