def findhost(): permUrl = httptools.downloadpage('https://www.popcornstream.info', follow_redirects=False, only_headers=True).headers if 'google' in permUrl['location']: host = permUrl['location'].replace( 'https://www.google.it/search?q=site:', '') if host[:4] != 'http': host = 'https://' + permUrl['location'].replace( 'https://www.google.it/search?q=site:', '') else: host = permUrl['location'] return host host = config.get_channel_url(findhost) headers = [['Referer', host]] @support.menu def mainlist(item): film = ["/film/"] anime = ["/genere/anime/"] tvshow = ["/serietv/"] top = [('Generi', ['', 'genre'])] return locals() def search(item, text):
# ------------------------------------------------------------ # Ringraziamo Icarus crew # Canale per guardarefilm # ---------------------------------------------------------- import re import urlparse from core import httptools from core import scrapertools from core import servertools from core.item import Item from platformcode import logger, config __channel__ = 'guardarefilm' host = config.get_channel_url(__channel__) headers = [['Referer', host]] def mainlist(item): logger.info("kod.guardarefilm mainlist") itemlist = [ Item( channel=item.channel, title="[COLOR azure]Novita'[/COLOR]", action="peliculas", url="%s/streaming-al-cinema/" % host, thumbnail= "http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png" ),
# Random use of User-Agents, if nad is not specified HTTPTOOLS_DEFAULT_RANDOM_HEADERS = False domainCF = list() channelsCF = [ 'guardaserieclick', 'casacinema', 'dreamsub', 'ilgeniodellostreaming', 'piratestreaming', 'altadefinizioneclick', 'altadefinizione01_link' ] otherCF = [ 'altadefinizione-nuovo.link', 'wstream.video', 'akvideo.stream', 'backin.net', 'vcrypt.net' ] for ch in channelsCF: domainCF.append( urlparse.urlparse(config.get_channel_url(name=ch)).hostname) domainCF.extend(otherCF) def get_user_agent(): # Returns the global user agent to be used when necessary for the url. return default_headers["User-Agent"] def get_url_headers(url, forced=False): domain = urlparse.urlparse(url)[1] sub_dom = scrapertools.find_single_match(domain, r'\.(.*?\.\w+)') if sub_dom and not 'google' in url: domain = sub_dom domain_cookies = cj._cookies.get("." + domain, {}).get("/", {})