def do_downloadpage(url, post=None, headers=None): # ~ data = httptools.downloadpage(url, post=post, headers=headers).data data = httptools.downloadpage_proxy('cliver', url, post=post, headers=headers).data if '<title>You are being redirected...</title>' in data: try: from lib import balandroresolver ck_name, ck_value = balandroresolver.get_sucuri_cookie(data) if ck_name and ck_value: # ~ logger.debug('Cookies: %s %s' % (ck_name, ck_value)) httptools.save_cookie(ck_name, ck_value, host.replace('https://', '')[:-1]) # ~ data = httptools.downloadpage(url, post=post, headers=headers).data data = httptools.downloadpage_proxy('cliver', url, post=post, headers=headers).data # ~ logger.debug(data) except: pass return data
def do_downloadpage(url, post=None): ant_hosts = ['http://grantorrent.net/', 'https://grantorrent1.com/', 'https://grantorrent.one/', 'https://grantorrent.tv/', 'https://grantorrent.la/', 'https://grantorrent.io/', 'https://grantorrent.cc/', 'https://grantorrent.li/', 'https://grantorrent.eu/'] for ant in ant_hosts: url = url.replace(ant, host) # por si viene de enlaces guardados # ~ data = httptools.downloadpage(url, post=post).data data = httptools.downloadpage_proxy('grantorrent', url, post=post).data # ~ logger.debug(data) if '<title>You are being redirected...</title>' in data: try: from lib import balandroresolver ck_name, ck_value = balandroresolver.get_sucuri_cookie(data) if ck_name and ck_value: # ~ logger.debug('Cookies: %s %s' % (ck_name, ck_value)) httptools.save_cookie(ck_name, ck_value, host.replace('https://', '')[:-1]) # ~ data = httptools.downloadpage(url, post=post).data data = httptools.downloadpage_proxy('grantorrent', url, post=post).data # ~ logger.debug(data) except: pass return data
def do_downloadpage(url, post=None, headers=None): # ~ data = httptools.downloadpage(url, post=post, headers=headers).data data = httptools.downloadpage_proxy('cinetux', url, post=post, headers=headers).data return data
def do_downloadpage(url, post=None, use_cache=False): # ~ url = url.replace('gnula.nu', 'gnula.???') # por si hay cambio de dominio y viene de enlaces guardados # ~ data = httptools.downloadpage(url, post=post, use_cache=use_cache).data data = httptools.downloadpage_proxy('gnula', url, post=post, use_cache=use_cache).data return data
def do_downloadpage(url, post=None, headers=None): url = url.replace('http://', 'https://') url = url.replace('www.cinetux.to/', 'www.cinetux.nu/') # ~ data = httptools.downloadpage(url, post=post, headers=headers).data data = httptools.downloadpage_proxy('cinetux', url, post=post, headers=headers).data return data
def do_downloadpage(url, post=None, follow_redirects=True, only_headers=False): # ~ resp = httptools.downloadpage(url, post=post, follow_redirects=follow_redirects, only_headers=only_headers) resp = httptools.downloadpage_proxy('playview', url, post=post, follow_redirects=follow_redirects, only_headers=only_headers) if only_headers: return resp.headers return resp.data
def do_downloadpage(url, post=None): # ~ data = httptools.downloadpage(url).data data = httptools.downloadpage_proxy( 'pelisvips', url, post=post, headers={ 'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 12_2 like Mac OS X)' }).data return data
def do_downloadpage(url, post=None, headers=None): url = url.replace('http://', 'https://') # por si viene de enlaces guardados url = url.replace('seriespapaya.com', 'seriespapaya.net') # por si viene de enlaces guardados url = url.replace('seriespapaya.net', 'seriespapaya.nu') # por si viene de enlaces guardados url = url.replace('https://www.', 'https://www2.') # por si viene de enlaces guardados # ~ data = httptools.downloadpage(url, post=post).data data = httptools.downloadpage_proxy('seriespapaya', url, post=post).data return data
def do_downloadpage(url, post=None, headers=None, raise_weberror=True): url = url.replace('pelisplay.tv', 'pelisplay.co') # por si viene de enlaces guardados url = url.replace( 'pelisplay.co/ver-peliculas', 'pelisplay.co/peliculas') # por si viene de enlaces guardados # ~ data = httptools.downloadpage(url, post=post, headers=headers, raise_weberror=raise_weberror).data data = httptools.downloadpage_proxy('pelisplay', url, post=post, headers=headers, raise_weberror=raise_weberror).data return data
def do_downloadpage(url, post=None, headers=None): dominio = config.get_setting( 'dominio', 'cinecalidad', default=DOMINIOS[0]) # por si viene de enlaces guardados for dom in DOMINIOS: url = url.replace(dom, dominio) # ~ data = httptools.downloadpage(url, post=post, headers=headers).data data = httptools.downloadpage_proxy('cinecalidad', url, post=post, headers=headers).data return data
def do_downloadpage(url, post=None, referer=None): url = url.replace('http://', 'https://') # por si viene de enlaces guardados url = url.replace('seriespapaya.com', 'seriespapaya.net') # por si viene de enlaces guardados url = url.replace('seriespapaya.net', 'seriespapaya.nu') # por si viene de enlaces guardados url = url.replace('https://www.', 'https://www2.') # por si viene de enlaces guardados headers = {'Referer': HOST} if referer: headers['Referer'] = referer # ~ data = httptools.downloadpage(url, post=post, headers=headers).data data = httptools.downloadpage_proxy('seriespapaya', url, post=post, headers=headers).data return data
def do_downloadpage(url, post=None): url = url.replace('allpeliculas.tv', 'allpeliculas.nu') # por si viene de enlaces guardados # ~ data = httptools.downloadpage(url, post=post).data data = httptools.downloadpage_proxy('allpeliculas', url, post=post).data return data
def do_downloadpage(url, post=None): # ~ url = url.replace('seriesdanko.to', 'seriesdanko.net') # por si viene de enlaces guardados # ~ data = httptools.downloadpage(url, post=post).data data = httptools.downloadpage_proxy('seriesdanko', url, post=post).data return data