def findvideos(item): log() data = '' matches = support.match(item, 'str="([^"]+)"')[0] if matches: for match in matches: data += str(jsfunctions.unescape(re.sub('@|g', '%', match))) data += str(match) log('DATA', data) if 'animepertutti' in data: log('ANIMEPERTUTTI!') else: data = '' itemlist = support.server(item, data) if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) return itemlist
def findvideos(item): support.log(item.channel + " findvideos") itemlist = support.server(item, data=item.url) # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) return itemlist
def findvideos(item): support.log() itemlist = support.server(item, headers=headers) # Requerido para Filtrar enlaces if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) support.videolibrary(itemlist, item, 'color kod') return itemlist
def play(item): support.log() itemlist = [] ### Handling new cb01 wrapper if host[9:] + "/film/" in item.url: iurl = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get( "location", "") support.log("/film/ wrapper: ", iurl) if iurl: item.url = iurl if '/goto/' in item.url: item.url = item.url.split('/goto/')[-1].decode('base64') item.url = item.url.replace('http://cineblog01.uno', 'http://k4pp4.pw') logger.debug( "##############################################################") if "go.php" in item.url: data = httptools.downloadpage(item.url).data if "window.location.href" in data: try: data = scrapertoolsV2.find_single_match( data, 'window.location.href = "([^"]+)";') except IndexError: data = httptools.downloadpage( item.url, only_headers=True, follow_redirects=False).headers.get("location", "") data, c = unshortenit.unwrap_30x_only(data) else: data = scrapertoolsV2.find_single_match( data, r'<a href="([^"]+)".*?class="btn-wrapper">.*?licca.*?</a>') logger.debug("##### play go.php data ##\n%s\n##" % data) else: data = support.swzz_get_url(item) return support.server(item, data, headers)
def findepisodevideo(item): support.log(item.channel + " findepisodevideo") # Download Pagina data = httptools.downloadpage(item.url, headers=headers).data # Prendo il blocco specifico per la stagione richiesta patron = r'<div class="list [active]*" data-id="%s">(.*?)</div>\s*</div>' % item.extra[ 0][0] blocco = scrapertools.find_single_match(data, patron) # Estraggo l'episodio patron = r'<a data-id="%s[^"]*" data-href="([^"]+)" data-original="([^"]+)" class="[^"]+">' % item.extra[ 0][1].lstrip("0") matches = re.compile(patron, re.DOTALL).findall(blocco) itemlist = support.server(item, data=matches[0][0]) # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) return itemlist
def findvideos(item): support.log(item.channel + " findvideos") data = httptools.downloadpage(item.url).data patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"' matches = re.compile(patron, re.DOTALL).findall(data) for keeplinks, id in matches: headers = [[ 'Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time())) ], ['Referer', keeplinks]] html = httptools.downloadpage(keeplinks, headers=headers).data data += str( scrapertools.find_multiple_matches( html, '</lable><a href="([^"]+)" target="_blank"')) itemlist = support.server(item, data=data) # itemlist = filtertools.get_links(itemlist, item, list_language) autoplay.start(itemlist, item) return itemlist
def play(item): logger.info("[vedohd.py] play") data = support.swzz_get_url(item) return support.server(item, data, headers)