def findvideos(item): logger.info("[Alfa].[guardareseriecc] [findvideos]") itemlist = [] listurl = set() patron = r'<select.*?style="width:100px;" class="dynamic_select">(.*?)</select>' data = httptools.downloadpage(item.url, headers=headers).data elenco = scrapertools.find_single_match(data, patron, 0) patron = '<a class="" href="(.*?)">(.*?)</a>' elenco_link = scrapertools.find_multiple_matches(elenco, patron) for scrapedurl, scrapedtitle in elenco_link: data = httptools.downloadpage(scrapedurl, headers=headers).data if 'protectlink' in data: urls = scrapertools.find_multiple_matches( data, r'<iframe src="[^=]+=(.*?)"') for url in urls: url = url.decode('base64') # tiro via l'ultimo carattere perchè non c'entra url = unshortenit.unwrap_30x_only(url[:-1]) listurl.add(url) if listurl: itemlist = servertools.find_video_items(data=str(listurl)) for videoitem in itemlist: videoitem.title = item.title + '[COLOR orange][B]' + videoitem.title + '[/B][/COLOR]' videoitem.fulltitle = item.fulltitle videoitem.thumbnail = item.thumbnail videoitem.show = item.show videoitem.plot = item.plot videoitem.channel = item.channel videoitem.contentType = item.contentType return itemlist
def play(item): support.log() itemlist = [] ### Handling new cb01 wrapper if host[9:] + "/film/" in item.url: iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "") support.log("/film/ wrapper: ", iurl) if iurl: item.url = iurl if '/goto/' in item.url: item.url = item.url.split('/goto/')[-1].decode('base64') item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw') logger.debug("##############################################################") if "go.php" in item.url: data = httptools.downloadpage(item.url).data if "window.location.href" in data: try: data = scrapertoolsV2.find_single_match(data, 'window.location.href = "([^"]+)";') except IndexError: data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get("location", "") data, c = unshortenit.unwrap_30x_only(data) else: data = scrapertoolsV2.find_single_match(data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') logger.debug("##### play go.php data ##\n%s\n##" % data) else: data = support.swzz_get_url(item) return servertools.find_video_items(data=data)
def swzz_get_url(item): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0' } if "/link/" in item.url: data = httptools.downloadpage(item.url, headers=headers).data if "link =" in data: data = scrapertools.find_single_match(data, 'link = "([^"]+)"') if 'http' not in data: data = 'https:' + data else: match = scrapertools.find_single_match( data, r'<meta name="og:url" content="([^"]+)"') match = scrapertools.find_single_match( data, r'URL=([^"]+)">') if not match else match if not match: from lib import jsunpack try: data = scrapertools.find_single_match( data.replace('\n', ''), r"(eval\s?\(function\(p,a,c,k,e,d.*?)</script>") data = jsunpack.unpack(data) logger.debug("##### play /link/ unpack ##\n%s\n##" % data) except: logger.debug( "##### The content is yet unpacked ##\n%s\n##" % data) data = scrapertools.find_single_match( data, r'var link(?:\s)?=(?:\s)?"([^"]+)";') data, c = unshortenit.unwrap_30x_only(data) else: data = match if data.startswith('/'): data = urlparse.urljoin("http://swzz.xyz", data) if not "vcrypt" in data: data = httptools.downloadpage(data).data logger.debug("##### play /link/ data ##\n%s\n##" % data) elif 'https://stayonline.pro' in item.url: id = item.url.split('/')[-2] reqUrl = 'https://stayonline.pro/ajax/linkView.php' p = urllib.urlencode({"id": id}) data = httptools.downloadpage(reqUrl, post=p).data try: import json data = json.loads(data)['data']['value'] except: return '' else: data = item.url return data