Beispiel #1
0
def login():
    logger.info()

    try:
        user = config.get_setting("playmaxuser", "playmax")
        password = config.get_setting("playmaxpassword", "playmax")
        if user == "" and password == "":
            return False, "Para ver los enlaces de este canal es necesario registrarse en playmax.mx"
        elif user == "" or password == "":
            return False, "Usuario o contraseña en blanco. Revisa tus credenciales"

        data = httptools.downloadpage(
            "https://playmax.mx/ucp.php?mode=login").data
        if re.search(r'(?i)class="hb_user_data" title="%s"' % user, data):
            if not config.get_setting("sid_playmax", "playmax"):
                sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
                if not sid_:
                    sid_ = scrapertools.find_single_match(
                        config.get_cookie_data(),
                        'playmax.*?_sid\s*([A-z0-9]+)')
                config.set_setting("sid_playmax", sid_, "playmax")
            return True, ""

        confirm_id = scrapertools.find_single_match(
            data, 'name="confirm_id" value="([^"]+)"')
        sid_log = scrapertools.find_single_match(data,
                                                 'name="sid" value="([^"]+)"')

        post = "username=%s&password=%s&autologin=on&agreed=true&change_lang=0&confirm_id=%s&login=&sid=%s" \
               "&redirect=index.php" % (user, password, confirm_id, sid_log)
        data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login",
                                      post=post).data
        if "contraseña incorrecta" in data:
            logger.error("Error en el login")
            return False, "Contraseña errónea. Comprueba tus credenciales"
        elif "nombre de usuario incorrecto" in data:
            logger.error("Error en el login")
            return False, "Nombre de usuario no válido. Comprueba tus credenciales"
        else:
            logger.info("Login correcto")
            sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
            if not sid_:
                sid_ = scrapertools.find_single_match(
                    config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)')
            config.set_setting("sid_playmax", sid_, "playmax")
            # En el primer logueo se activa la busqueda global y la seccion novedades
            if not config.get_setting("primer_log", "playmax"):
                config.set_setting("include_in_global_search", True, "playmax")
                config.set_setting("include_in_newest_peliculas", True,
                                   "playmax")
                config.set_setting("include_in_newest_series", True, "playmax")
                config.set_setting("include_in_newest_infantiles", True,
                                   "playmax")
                config.set_setting("primer_log", False, "playmax")
            return True, ""
    except:
        import traceback
        logger.error(traceback.format_exc())
        return False, "Error en el login. Comprueba tus credenciales o si la web está operativa"
Beispiel #2
0
def getsearch(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.hdgratis.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<div class="col-xs-2">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
def fichas(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione.black.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">\s*<a href="([^"]+")>\s*<div class="wrapperImage">[^i]+i[^s]+src="([^"]+)"[^>]+> <div class="info">\s*<h5[^>]+>(.*?)<'
    else:
        patron = '<span class="hd">HD</span>\s*<a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+></a> <div class="info">\s*<[^>]+>[^>]+>(.*?)</a>'


    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle  in matches:

        title = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page

    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/vari/successivo_P.png"))

    return itemlist
def fichas( item ):
    logger.info( "streamondemand.channels.guardaserie fichas" )

    itemlist = []

    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.guardaserie.net.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    data = scrapertools.find_single_match( data, '<a[^>]+>Serie Tv</a><ul>(.*?)</ul>' )

    patron  = '<li><a href="([^"]+)[^>]+>([^<]+)</a></li>'

    matches = re.compile( patron, re.DOTALL ).findall( data )

    for scrapedurl, scrapedtitle in matches:

        itemlist.append( Item( channel=__channel__, action="episodios", title= scrapedtitle , fulltitle=scrapedtitle, show=scrapedtitle, url=scrapedurl ) )

    return itemlist
def fichas(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione.black.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">\s*<a href="([^"]+")>\s*<div class="wrapperImage">[^i]+i[^s]+src="([^"]+)"[^>]+> <div class="info">\s*<h5[^>]+>(.*?)<'
    else:
        patron = '<span class="hd">HD</span>\s*<a href="([^"]+)"><img[^s]+src="([^"]+)"[^>]+></a> <div class="info">\s*<[^>]+>[^>]+>(.*?)</a>'


    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle  in matches:

        title = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page

    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
Beispiel #6
0
def fichas(item):
    logger.info("[thegroove360.italiafilmvideohd] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.italiafilm.video.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<li class="item">.*?href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2
        if "serie" in scraped_2:
            scrapedtitle = scrapedtitle + " [COLOR orange](Serie TV)[/COLOR]"
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedthumbnail += "|" + _headers
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos_all"
                         if not "serie" in scrapedurl else "episodios",
                         title=title,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=scrapedtitle),
                    tipo='movie' if not "serie" in scrapedurl else "tv"))

    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')

    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivi >>[/COLOR]",
                url=next_page,
                thumbnail=
                "https://raw.githubusercontent.com/stesev1/channels/master/images/channels_icon/next_1.png"
            ))

    return itemlist
def anno(item):
    logger.info("[altadefinizioneclick.py] genere")
    itemlist = []

    data = anti_cloudflare( item.url )

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile( '(.altadefinizione.click.*?)\n', re.DOTALL ).findall( config.get_cookie_data() )
    for cookie in matches:
        name = cookie.split( '\t' )[5]
        value = cookie.split( '\t' )[6]
        cookies+= name + "=" + value + ";"
    headers.append( ['Cookie',cookies[:-1]] )
    import urllib
    _headers = urllib.urlencode( dict( headers ) )
    ## ------------------------------------------------

    data = scrapertools.find_single_match(data,'<ul class="listSubCat" id="Anno">(.*?)</div>')

    patron  = '<li><a href="(.*?)">(.*?)</a></li>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedtitle in matches:
        itemlist.append( Item(channel=__channel__, action="fichas", title=scrapedtitle, url=scrapedurl, folder=True))

    return itemlist
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[vimple.ru] get_video_url(page_url=%s)" % page_url)

    media_url = scrapertools.get_match(
        re.sub(
            r'\t|\n|\r|\s',
            '',
            scrapertools.cache_page(page_url)
        ),
        '"video"[^,]+,"url":"([^"]+)"'
    ).replace('\\','')

    media_url+= "|Cookie=" + \
        scrapertools.get_match(
            config.get_cookie_data(),
            '.vimple.ru.*?(UniversalUserID\t[a-f0-9]+)'
        ).replace('\t', '=')

    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [vimple.ru]",media_url])

    for video_url in video_urls:
        logger.info("streamondemand.servers.vimpleru %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Beispiel #9
0
def get_megavideo_cookie_id():
    logger.info("[megavideo.py] get_megavideo_cookie_id")

    cookie=""

    cookie_data = config.get_cookie_data()
    logger.info("cookie_data="+cookie_data)
    
    lines = cookie_data.split("\n")
    for line in lines:
        logger.info("line="+line)
    
        if "megavideo.com" in line:
            logger.info("[megavideo.py] patron1")
            patron = 'user="******"]+)"'
            matches = re.compile(patron,re.DOTALL).findall(line)
        
            if len(matches)>0:
                cookie = matches[0]
                break
            else:
                logger.info("[megavideo.py] patron2")
                patron = 'user=([^\;]+);'
                matches = re.compile(patron,re.DOTALL).findall(line)
                if len(matches)>0:
                    cookie = matches[0]
                    break
                else:
                    logger.info("[megavideo.py] No se ha encontrado la cookie de Megavideo")
                    cookie=""
    
    logger.info("cookie="+cookie)
        
    return cookie
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("streamondemand.servers.vimpleru page_url="+page_url)
	
    mobj = re.match(_VALID_URL, page_url)
    video_id = mobj.group('id')
    logger.info("streamondemand.servers.vimpleru video_id="+video_id)

    data = scrapertools.cache_page( page_url )
    logger.info("streamondemand.servers.vimpleru data="+data)

    cookie_data = config.get_cookie_data()
    #logger.info("streamondemand.servers.vimpleru cookie_data="+cookie_data)

    universalid = scrapertools.get_match(cookie_data,'UniversalUserID\s*([a-f0-9]+)')
    logger.info("universalid="+universalid)

    player_url = scrapertools.find_single_match(data,'"swfplayer"\:"([^"]+)"')
    player_url = player_url.replace("\\","")
    logger.info("streamondemand.servers.vimpleru player_url="+player_url)

    player = scrapertools.cache_page( player_url)
    #logger.info("streamondemand.servers.vimpleru player="+repr(player))

    player = zlib.decompress(player[8:])
    #logger.info("streamondemand.servers.vimpleru player="+repr(player))

    xml_pieces = re.findall(b'([a-zA-Z0-9 =+/]{500})', player)
    logger.info("streamondemand.servers.vimpleru xml_pieces="+repr(xml_pieces))
    xml_pieces = [piece[1:-1] for piece in xml_pieces]
    logger.info("streamondemand.servers.vimpleru xml_pieces="+repr(xml_pieces))

    xml_data = b''.join(xml_pieces)
    logger.info("streamondemand.servers.vimpleru xml_data="+repr(xml_data))
    xml_data = base64.b64decode(xml_data)
    logger.info("streamondemand.servers.vimpleru xml_data="+repr(xml_data))

    xml_data = xml.etree.ElementTree.fromstring(xml_data)
	
    video = xml_data.find('Video')
    quality = video.get('quality')
    q_tag = video.find(quality.capitalize())
    '''
    logger.info("streamondemand.servers.vimpleru url: " + q_tag.get('url'))
    logger.info("streamondemand.servers.vimpleru tbr: " +  q_tag.get('bitrate'))
    logger.info("streamondemand.servers.vimpleru filesize: " +  q_tag.get('filesize'))
    logger.info("streamondemand.servers.vimpleru format_id: " +  quality)
    logger.info("streamondemand.servers.vimpleru id: " +  video_id)
    logger.info("streamondemand.servers.vimpleru title: " +  video.find('Title').text)
    logger.info("streamondemand.servers.vimpleru thumbnail: " +  video.find('Poster').get('url'))
    logger.info("streamondemand.servers.vimpleru duration: " +  video.get('duration'))
    logger.info("streamondemand.servers.vimpleru webpage_url: " +  video.find('Share').get('videoPageUrl'))
    '''	
    media_url = q_tag.get('url')+"|Cookie=UniversalUserID="+universalid
    video_urls = []
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [vimple.ru]",media_url])

    for video_url in video_urls:
        logger.info("streamondemand.servers.vimpleru %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Beispiel #11
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("(page_url=%s)" % page_url)

    data = httptools.downloadpage(page_url).data

    media_url = scrapertools.find_single_match(
        data, '"video"[^,]+,"url":"([^"]+)"').replace('\\', '')
    data_cookie = config.get_cookie_data()
    cfduid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(__cfduid\t[a-f0-9]+)') \
                                           .replace('\t', '=')
    univid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(UniversalUserID\t[a-f0-9]+)') \
                                           .replace('\t', '=')

    media_url += "|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0" \
                 "&Cookie=%s; %s" % (cfduid, univid)

    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(media_url)[-4:] + " [vimple.ru]",
        media_url
    ])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #12
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[vimple.ru] get_video_url(page_url=%s)" % page_url)

    media_url = scrapertools.get_match(
        re.sub(r'\t|\n|\r|\s', '', scrapertools.cache_page(page_url)),
        '"video"[^,]+,"url":"([^"]+)"').replace('\\', '')

    media_url+= "|Cookie=" + \
        scrapertools.get_match(
            config.get_cookie_data(),
            '.vimple.ru.*?(UniversalUserID\t[a-f0-9]+)'
        ).replace('\t', '=')

    video_urls = []
    video_urls.append([
        scrapertools.get_filename_from_url(media_url)[-4:] + " [vimple.ru]",
        media_url
    ])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.vimpleru %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Beispiel #13
0
def peliculas(item):
    logger.info("streamondemand.instreaminginfo peliculas")
    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.instreaming.info.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="item">\s*<a href="(.*?)">\s*[^>]+>\s*<img src="(.*?)" alt="(.*?)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador

    next_page = scrapertools.find_single_match(
        data, '<link rel="next" href="(.*?)" />')
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
def getsearch(item):
    logger.info("[hdgratis.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.hdgratis.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<div class="col-xs-2">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
Beispiel #15
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("(page_url='%s')" % page_url)

    if config.get_setting("premium", server="onefichier") == True:
        user = config.get_setting("user", server="onefichier")
        password = config.get_setting("password", server="onefichier")

        url = "https://1fichier.com/login.pl"
        logger.info("url=" + url)
        post_parameters = {
            "mail": user,
            "pass": password,
            "lt": "on",
            "purge": "on",
            "valider": "Send"
        }
        post = urllib.urlencode(post_parameters)
        logger.info("post=" + post)

        data = scrapertools.cache_page(url, post=post)
        # logger.info("data="+data)

        cookies = config.get_cookie_data()
        logger.info("cookies=" + cookies)

        # 1fichier.com   TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        sid_cookie_value = scrapertools.find_single_match(
            cookies, "1fichier.com.*?SID\s+([A-Za-z0-9\+\=]+)")
        logger.info("sid_cookie_value=" + sid_cookie_value)

        # .1fichier.com  TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        cookie = urllib.urlencode({"SID": sid_cookie_value})

        # Averigua el nombre del fichero real
        headers = []
        headers.append([
            'User-Agent',
            'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12'
        ])
        headers.append(['Cookie', cookie])
        filename = scrapertools.get_header_from_response(
            page_url, header_to_get="Content-Disposition")
        logger.info("filename=" + filename)

        # Construye la URL final para Kodi
        location = page_url + "|Cookie=" + cookie
        logger.info("location=" + location)

        video_urls = []
        video_urls.append([filename[-4:] + " (Premium) [1fichier]", location])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #16
0
def peliculas(item):
    logger.info("streamondemand.playcinema peliculas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<img\s*src="(.*?)"[^>]+><\/a><div\s*class[^<]+<a\s*href="(.*?)">(.*?)<'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapedtitle.replace("streaming", "")
        scrapedthumbnail += '|' + _headers
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))

    # Extrae el paginador
    patronvideos = 'class="nextpostslink" rel="next" href="([^"]+)">'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
Beispiel #17
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info(
        "streamondemand.servers.onefichier get_video_url(page_url='%s')" %
        page_url)

    video_urls = []

    if config.get_setting("onefichierpremium") == "true":
        user = config.get_setting("onefichieruser")
        password = config.get_setting("onefichierpassword")

        url = "https://1fichier.com/login.pl"
        logger.info("streamondemand.servers.onefichier url=" + url)
        post_parameters = {
            "mail": user,
            "pass": password,
            "lt": "on",
            "purge": "on",
            "valider": "Send"
        }
        post = urllib.urlencode(post_parameters)
        logger.info("streamondemand.servers.onefichier post=" + post)

        scrapertools.cache_page(url, post=post)
        # logger.info("streamondemand.servers.onefichier data="+data)

        cookies = config.get_cookie_data()
        logger.info("streamondemand.servers.onefichier cookies=" + cookies)

        # 1fichier.com   TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        sid_cookie_value = scrapertools.find_single_match(
            cookies, "1fichier.com.*?SID\s+([A-Za-z0-9\+\=]+)")
        logger.info("streamondemand.servers.onefichier sid_cookie_value=" +
                    sid_cookie_value)

        # .1fichier.com  TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        cookie = urllib.urlencode({"SID": sid_cookie_value})

        # Averigua el nombre del fichero real
        filename = scrapertools.get_header_from_response(
            page_url, header_to_get="Content-Disposition")
        logger.info("streamondemand.servers.onefichier filename=" + filename)

        # Construye la URL final para Kodi
        location = page_url + "|Cookie=" + cookie
        logger.info("streamondemand.servers.onefichier location=" + location)

        video_urls.append([filename[-4:] + " (Premium) [1fichier]", location])

    for video_url in video_urls:
        logger.info("streamondemand.servers.onefichier %s - %s" %
                    (video_url[0], video_url[1]))

    return video_urls
Beispiel #18
0
def peliculas(item):
    logger.info("streamondemand.italiafilmvideo peliculas")
    itemlist = []
    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.italiafilm.video.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<li class="item">\s*<a class="poster" href="([^"]+)" title="([^"]+)">\s*<img src="([^"]+)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        #scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<a class="next page-numbers" href="([^"]+)">Next &#8250;'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo>>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.instreaminginfo peliculas")
    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.instreaming.info.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------


    # Extrae las entradas (carpetas)
    patron = '<div class="item">\s*<a href="(.*?)">\s*[^>]+>\s*<img src="(.*?)" alt="(.*?)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador

    next_page = scrapertools.find_single_match(data, '<link rel="next" href="(.*?)" />')
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.portalehd peliculas")
    itemlist = []

    # Descarga la pagina
    data = data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.portalehd.net.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron  = '<div class="ThreeTablo T-FilmBaslik">\s*'
    patron  += '<h2><a href="(.*?)"[^>]+>(.*?)</a></h2>\s*'
    patron  += '</div>\s*'
    patron  += '<a[^>]+>\s*'
    patron  += '<figure><img src="(.*?)"[^>]+>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        #response = urllib2.urlopen(scrapedurl, headers=headers)
        #html = response.read()
        #start = html.find("<div class=\"DetayAciklama-sol left\">")
        #end = html.find("</p>", start)
        #scrapedplot = html[start:end]
        #scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        #scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------
        itemlist.append( Item(channel=__channel__, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    # Extrae el paginador
    patronvideos  = '<a class="nextpostslink" rel="next" href="(.*?)">Avanti »</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo >>[/COLOR]" , url=scrapedurl , thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True) )

    return itemlist
def fichas(item):
    logger.info("[vediserie.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.vediserie.com.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<h2>[^>]+>\s*'
    patron += '<img[^=]+=[^=]+=[^=]+="([^"]+)"[^>]+>\s*'
    patron += '<A HREF=([^>]+)>[^>]+>[^>]+>[^>]+>\s*'
    patron += '[^>]+>[^>]+>(.*?)</[^>]+>[^>]+>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if scrapedtitle.startswith('<span class="year">'):
            scrapedtitle = scrapedtitle[19:]

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         title=scrapedtitle,
                         fulltitle=scrapedtitle,
                         url=scrapedurl.replace('"', ''),
                         show=scrapedtitle,
                         thumbnail=scrapedthumbnail),
                    tipo='tv'))

    patron = '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page))

    return itemlist
def fichas(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.seriehd.org.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<h2>(.*?)</h2>\s*'
    patron += '<img src="([^"]+)" alt="[^"]*"/>\s*'
    patron += '<A HREF="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         show=scrapedtitle,
                         thumbnail=scrapedthumbnail),
                    tipo='tv'))

    patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo>>[/COLOR]",
                url=next_page,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png"
            ))

    return itemlist
def fichas(item):
    logger.info("[vediserie.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.vediserie.com.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<h2>[^>]+>\s*'
    patron += '<img[^=]+=[^=]+=[^=]+="([^"]+)"[^>]+>\s*'
    patron += '<A HREF=([^>]+)>[^>]+>[^>]+>[^>]+>\s*'
    patron += '[^>]+>[^>]+>(.*?)</[^>]+>[^>]+>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if scrapedtitle.startswith('<span class="year">'):
            scrapedtitle = scrapedtitle[19:]

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl.replace('"', ''),
                 show=scrapedtitle,
                 thumbnail=scrapedthumbnail), tipo='tv'))

    patron = '<span class=\'current\'>[^<]+</span><a class="page larger" href="(.*?)">'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page,
		 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png"))

    return itemlist
def fichas(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile("(.seriehd.org.*?)\n", re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split("\t")[5]
        value = cookie.split("\t")[6]
        cookies += name + "=" + value + ";"
    headers.append(["Cookie", cookies[:-1]])
    import urllib

    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = "<h2>(.*?)</h2>\s*"
    patron += '<img src="([^"]+)" alt="[^"]*"/>\s*'
    patron += '<A HREF="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
        itemlist.append(
            infoSod(
                Item(
                    channel=__channel__,
                    action="episodios",
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    fulltitle=scrapedtitle,
                    url=scrapedurl,
                    show=scrapedtitle,
                    thumbnail=scrapedthumbnail,
                ),
                tipo="tv",
            )
        )

    patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__, action="fichas", title="[COLOR orange]Successivo>>[/COLOR]", url=next_page)
        )

    return itemlist
def peliculas(item):
    logger.info("streamondemand.playcinema peliculas")
    itemlist = []

    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.playcinema.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="moviefilm">\s*'
    patron += '<a href="(.*?)">\s*'
    patron += '<img src="(.*?)" alt="(.*?)"[^>]+></a>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        #response = urllib2.urlopen(scrapedurl)
        #html = response.read()
        #start = html.find("<div class=\"filmicerik\">")
        #start = html.find("<p><span style=\"font-family: Arial, Helvetica, sans-serif;\">")
        #end = html.find("<span style=\"font-size: xx-small;\">+Info", start)
        #end = html.find("</font></a><br />", start)
        #scrapedplot = html[start:end]
        #scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        #scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming",""))
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        itemlist.append( Item(channel=__channel__, action="findvideos", fulltitle=scrapedtitle, show=scrapedtitle, title="[COLOR azure]"+scrapedtitle+"[/COLOR]" , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    # Extrae el paginador
    patronvideos  = '<a class="nextpostslink" rel="next" href="(.*?)">&raquo;</a>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches)>0:
        scrapedurl = urlparse.urljoin(item.url,matches[0])
        itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo>>[/COLOR]" , url=scrapedurl , thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True) )

    return itemlist
def fichas(item):
    logger.info("[vediserie.py] fichas")
    itemlist = []

    data = anti_cloudflare(item.url)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.vediserie.com.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<h2>[^>]+>\s*'
    patron += '<img[^=]+=[^=]+=[^=]+="([^"]+)"[^>]+>\s*'
    patron += '<A HREF=([^>]+)>[^>]+>[^>]+>[^>]+>\s*'
    patron += '[^>]+>[^>]+>(.*?)</[^>]+>[^>]+>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if scrapedtitle.startswith('<span class="year">'):
            scrapedtitle = scrapedtitle[19:]
        itemlist.append(
                Item(channel=__channel__,
                     action="episodios",
                     title=scrapedtitle,
                     fulltitle=scrapedtitle,
                     url=scrapedurl.replace('"', ''),
                     show=scrapedtitle,
                     thumbnail=scrapedthumbnail))

    patron = '<a class="nextpostslink" rel="next" href="([^"]+)">»</a>'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
                Item(channel=__channel__,
                     action="fichas",
                     title="[COLOR orange]Successivo>>[/COLOR]",
                     url=next_page))

    return itemlist
Beispiel #27
0
def get_megaupload_cookie():
    logger.info("[megaupload.py] get_megaupload_cookie")
    cookie_data = config.get_cookie_data()
    patron = 'user="******"]+)".*?domain=".megaupload.com"'
    matches = re.compile(patron, re.DOTALL).findall(cookie_data)

    if len(matches) == 0:
        patron = 'user=([^\;]+);.*?domain=".megaupload.com"'
        matches = re.compile(patron, re.DOTALL).findall(cookie_data)

    if len(matches) == 0:
        cookie = ""
    else:
        cookie = matches[0]

    return cookie
def get_megaupload_cookie():
    logger.info("[megaupload.py] get_megaupload_cookie")
    cookie_data = config.get_cookie_data()
    patron  = 'user="******"]+)".*?domain=".megaupload.com"'
    matches = re.compile(patron,re.DOTALL).findall(cookie_data)
    
    if len(matches)==0:
        patron  = 'user=([^\;]+);.*?domain=".megaupload.com"'
        matches = re.compile(patron,re.DOTALL).findall(cookie_data)
        
    if len(matches)==0:
        cookie = ""
    else:
        cookie = matches[0]
    
    return cookie
def episodios(item):
    logger.info("streamondemand.channels.guardaserie episodios")

    itemlist = []

    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.guardaserie.net.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------
    
    serie_id = scrapertools.get_match( data, '/?id=(\d+)" rel="nofollow"' )

    data = scrapertools.get_match( data, '<div id="episode">(.*?)</div>' )

    seasons_episodes = re.compile( '<select name="episode" id="(\d+)">(.*?)</select>', re.DOTALL ).findall( data )

    for scrapedseason, scrapedepisodes in seasons_episodes:

        episodes = re.compile( '<option value="(\d+)"', re.DOTALL ).findall( scrapedepisodes )
        for scrapedepisode in episodes:

            season = str ( int( scrapedseason ) + 1 )
            episode = str ( int( scrapedepisode ) + 1 )
            if len( episode ) == 1: episode = "0" + episode

            title = season + "x" + episode + " - " + item.title

            ## Le pasamos a 'findvideos' la url con tres partes divididas por el caracter "?"
            ## [host+path]?[argumentos]?[Referer]
            url = host + "/wp-admin/admin-ajax.php?action=get_episode&id=" + serie_id + "&season=" + scrapedseason + "&episode=" + scrapedepisode + "?" + item.url 

            itemlist.append( Item( channel=__channel__, action="findvideos", title= title, url=url, fulltitle=item.title, show=item.title, thumbnail=item.thumbnail ) )

    if config.get_library_support():
        itemlist.append( Item(channel=__channel__, title="[COLOR azure]Aggiungi [/COLOR]" + item.title + "[COLOR azure] alla libreria di Kodi[/COLOR]", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) )
        itemlist.append( Item(channel=__channel__, title="[COLOR azure]Scarica tutti gli episodi della serie[/COLOR]", url=item.url, action="download_all_episodes", extra="episodios", show=item.show) )

    return itemlist
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("streamondemand.servers.onefichier get_video_url(page_url='%s')" % page_url)

    if config.get_setting("onefichierpremium")=="true":

        user = config.get_setting("onefichieruser")
        password = config.get_setting("onefichierpassword")

        url = "https://1fichier.com/login.pl"
        logger.info("streamondemand.servers.onefichier url="+url)
        post_parameters = {"mail":user,"pass":password,"lt":"on","purge":"on","valider":"Send"}
        post = urllib.urlencode(post_parameters)
        logger.info("streamondemand.servers.onefichier post="+post)

        data = scrapertools.cache_page(url, post=post)
        #logger.info("streamondemand.servers.onefichier data="+data)

        cookies = config.get_cookie_data()
        logger.info("streamondemand.servers.onefichier cookies="+cookies)

        #1fichier.com   TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        sid_cookie_value = scrapertools.find_single_match(cookies,"1fichier.com.*?SID\s+([A-Za-z0-9\+\=]+)")
        logger.info("streamondemand.servers.onefichier sid_cookie_value="+sid_cookie_value)

        #.1fichier.com  TRUE    /   FALSE   1443553315  SID imC3q8MQ7cARw5tkXeWvKyrH493rR=1yvrjhxDAA0T0iEmqRfNF9GXwjrwPHssAQ
        cookie = urllib.urlencode({"SID":sid_cookie_value})

        # Averigua el nombre del fichero real
        headers = []
        headers.append(['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12'])
        headers.append(['Cookie', cookie])
        filename = scrapertools.get_header_from_response(page_url,header_to_get="Content-Disposition")
        logger.info("streamondemand.servers.onefichier filename="+filename)

        # Construye la URL final para Kodi
        location = page_url+"|Cookie="+cookie
        logger.info("streamondemand.servers.onefichier location="+location)

        video_urls = []
        video_urls.append( [filename[-4:]+" (Premium) [1fichier]" , location] )

    for video_url in video_urls:
        logger.info("streamondemand.servers.onefichier %s - %s" % (video_url[0],video_url[1]))

    return video_urls
def dettaglio(item):
    log("animetubeita", "dettaglio")

    itemlist = []
    headers = {
        'Upgrade-Insecure-Requests':
        '1',
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
    }

    episodio = 1
    patron = '<a href="http:\/\/link[^a]+animetubeita[^c]+com\/[^\/]+\/[^s]+stream[^p]+php(\?.*?)"'
    for scrapedurl in scrapedAll(item.url, patron):
        title = "Episodio " + str(episodio)
        episodio += 1
        url = host + "/stream.php" + scrapedurl
        headers['Referer'] = url
        data = httptools.downloadpage(url, headers=headers).data
        # ------------------------------------------------
        cookies = ""
        matches = re.compile('(.animetubeita.com.*?)\n',
                             re.DOTALL).findall(config.get_cookie_data())
        for cookie in matches:
            name = cookie.split('\t')[5]
            value = cookie.split('\t')[6]
            cookies += name + "=" + value + ";"
        headers['Cookie'] = cookies[:-1]
        # ------------------------------------------------
        url = scrapertools.find_single_match(
            data, """<source src="([^"]+)" type='video/mp4'>""")
        url += '|' + urllib.urlencode(headers)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title="[COLOR azure]" + title + "[/COLOR]",
                 url=url,
                 thumbnail=item.thumbnail,
                 fanart=item.thumbnail,
                 plot=item.plot))

    return itemlist
Beispiel #32
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("(page_url=%s)" % page_url)

    data = httptools.downloadpage(page_url).data

    media_url = scrapertools.find_single_match(data, '"video"[^,]+,"url":"([^"]+)"').replace('\\','')
    data_cookie = config.get_cookie_data()
    cfduid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(__cfduid\t[a-f0-9]+)') \
                                           .replace('\t', '=')
    univid = scrapertools.find_single_match(data_cookie, '.vimple.ru.*?(UniversalUserID\t[a-f0-9]+)') \
                                           .replace('\t', '=')

    media_url += "|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0" \
                 "&Cookie=%s; %s" % (cfduid, univid)

    video_urls = []
    video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [vimple.ru]", media_url])

    for video_url in video_urls:
        logger.info("%s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #33
0
def fichas(item):
    logger.info("streamondemand.channels.guardaserie fichas")

    itemlist = []

    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.guardaserie.net.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    data = scrapertools.find_single_match(
        data, '<a[^>]+>Serie Tv</a><ul>(.*?)</ul>')

    patron = '<li><a href="([^"]+)[^>]+>([^<]+)</a></li>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle in matches:

        itemlist.append(
            Item(channel=__channel__,
                 action="episodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 url=scrapedurl))

    return itemlist
def peli_cat(item):
    logger.info("streamondemand.cinemano peliculas")
    itemlist = []

    # Descarga la pagina
    #data = scrapertools.cache_page(item.url)

    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.cinenano.net.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="image">\s*<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" />'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<link rel="next" href="(.*?)" />'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
def listanime(item):
    logger.info("[cineblog01.py] mainlist")
    itemlist = []

    # Descarga la página
    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.cineblog01.cc.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="span4"> <a.*?<img.*?src="(.*?)".*?'
    patronvideos += '<div class="span8">.*?<a href="(.*?)">.*?'
    patronvideos += '<h1>(.*?)</h1></a>.*?<br />(.*?)<br>.*?'
    matches = re.compile(patronvideos, re.DOTALL).finditer(data)

    for match in matches:
        scrapedthumbnail = match.group(1)
        scrapedurl = match.group(2)
        scrapedtitle = scrapertools.unescape(match.group(3))
        scrapedplot = scrapertools.unescape(match.group(4))
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        if scrapedplot.startswith(""):
            scrapedplot = scrapedplot[64:]
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")

        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------				

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="listaaz" if scrapedtitle == "Lista Alfabetica Completa Anime/Cartoon" else "episodios_anime",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 viewmode="movie_with_plot",
                 plot=scrapedplot))

    # Put the next page mark
    try:
        next_page = scrapertools.get_match(data, "<link rel='next' href='([^']+)'")
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="listanime",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
    except:
        pass

    return itemlist
Beispiel #36
0
def fichas(item):
    logger.info("[altadefinizioneclick.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(r'<div class="wrapperImage"[^<]+<a',
                  '<div class="wrapperImage"><fix>SD</fix><a', data)
    # fix - IMDB
    data = re.sub(r'<h5> </div>', '<fix>IMDB: 0.0</fix>', data)
    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione.site.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">.*?'
        patron += 'href="([^"]+)".*?'
        patron += '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'src="([^"]+)".*?'
        patron += 'class="titleFilm">([^<]+)<.*?'
        patron += 'IMDB: ([^<]+)<'
    else:
        patron = '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'href="([^"]+)".*?'
        patron += 'src="([^"]+)".*?'
        patron += 'href[^>]+>([^<]+)</a>.*?'
        patron += 'IMDB: ([^<]+)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_1, scraped_2, scrapedthumbnail, scrapedtitle, scrapedpuntuacion in matches:

        scrapedurl = scraped_2
        scrapedcalidad = scraped_1
        if "/?s=" in item.url:
            scrapedurl = scraped_1
            scrapedcalidad = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        title += " (" + scrapedcalidad + ") (" + scrapedpuntuacion + ")"

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title="[COLOR azure]" + title + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=title),
                    tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a class="next page-numbers" href="([^"]+)">')
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
def fichas(item):
    logger.info("[italiafilmvideohd.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.italiafilm.video.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<li class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:

        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        try:
           plot, fanart, poster, extrameta = info(scrapedtitle)

           itemlist.append(
               Item(channel=__channel__,
                    thumbnail=poster,
                    fanart=fanart if fanart != "" else poster,
                    extrameta=extrameta,
                    plot=str(plot),
                    action="findvideos",
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    url=scrapedurl,
                    fulltitle=title,
                    show=scrapedtitle,
                    folder=True))
        except:
           itemlist.append(
               Item(channel=__channel__,
                    action="findvideos",
                    title=title,
                    url=scrapedurl,
                    thumbnail=scrapedthumbnail,
                    fulltitle=title,
                    show=scrapedtitle))

    # Paginación
    next_page = re.compile('<a href="(.+?)" class="single_page" title=".+?">', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page

    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
Beispiel #38
0
def peliculas(item):
    logger.info("pelisalacarta.itastreaming peliculas")
    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile( '(.itastreaming.co.*?)\n', re.DOTALL ).findall( config.get_cookie_data() )
    for cookie in matches:
        name = cookie.split( '\t' )[5]
        value = cookie.split( '\t' )[6]
        cookies+= name + "=" + value + ";"
    headers.append( ['Cookie',cookies[:-1]] )
    import urllib
    _headers = urllib.urlencode( dict( headers ) )
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="item">\s*<a href="([^"]+)" title="([^"]+)">\s*<div class="img">\s*<img src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find('<div class="post-content">')
        end = html.find('</div>', start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)

        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")

        ## ------------------------------------------------
        scrapedthumbnail+= "|" + _headers
        ## ------------------------------------------------

        itemlist.append(
            Item(channel=__channel__,
                 action="findvid",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 viewmode="movie_with_plot",
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = "class=previouspostslink' href='([^']+)'>Seguente &rsaquo;"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
def fichas(item):
    logger.info("[italiafilmvideohd.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<li class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=title,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=title,
                         show=scrapedtitle),
                    tipo='movie'))

    itemlist.append(
        Item(channel=__channel__,
             action="HomePage",
             title="[COLOR yellow]Torna Home[/COLOR]",
             folder=True)),

    # Paginación
    next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')

    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.cinemano peliculas")
    itemlist = []

    # Descarga la pagina
    #data = scrapertools.cache_page(item.url)

    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.cinenano.net.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="boxinfo">\s*<a href="([^=]+)">\s*<span class="tt">(.*?)</span>\s*<span class="ttx">\s*([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedplot in matches:
        #scrapedplot = ""
        scrapedthumbnail = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = '<link rel="next" href="(.*?)" />'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
Beispiel #41
0
def peliculas(item):
    logger.info("streamondemand.portalehd peliculas")
    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.portalehd.net.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="ThreeTablo T-FilmBaslik">\s*'
    patron += '<h2><a href="([^"]+)" title="([^"]+)">.*?</h2>\s*'
    patron += '</div>\s*'
    patron += '<a[^>]+>\s*'
    patron += '<figure><img src="([^"]+)"[^>]+>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        # response = urllib2.urlopen(scrapedurl, headers=headers)
        # html = response.read()
        # start = html.find("<div class=\"DetayAciklama-sol left\">")
        # end = html.find("</p>", start)
        # scrapedplot = html[start:end]
        # scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        # scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        tmdbtitle1 = scrapedtitle.split("Sub ")[0]
        tmdbtitle = tmdbtitle1.split("(")[0]
        try:
            plot, fanart, poster, extrameta = info(tmdbtitle)

            itemlist.append(
                Item(channel=__channel__,
                     thumbnail=poster,
                     fanart=fanart if fanart != "" else poster,
                     extrameta=extrameta,
                     plot=str(plot),
                     action="findvideos",
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     url=scrapedurl,
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     folder=True))
        except:
            itemlist.append(
                Item(channel=__channel__,
                     action="findvideos",
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     plot=scrapedplot,
                     folder=True))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">Avanti »</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
def fichas(item):
    logger.info("[italiafilmvideohd.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<li class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:
        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle), tipo='movie'))

    itemlist.append(
        Item(channel=__channel__,
             action="HomePage",
             title="[COLOR yellow]Torna Home[/COLOR]",
             folder=True)),

    # Paginación
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span aria-hidden="true">&raquo;')

    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
Beispiel #43
0
def peliculas(item):
    logger.info("streamondemand.playcinema peliculas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<div class="moviefilm">\s*'
    patron += '<a href="([^"]+)">\s*'
    patron += '<img src="([^"]+)" alt="([^"]+)"[^>]+></a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        # response = urllib2.urlopen(scrapedurl)
        # html = response.read()
        # start = html.find("<div class=\"filmicerik\">")
        # start = html.find("<p><span style=\"font-family: Arial, Helvetica, sans-serif;\">")
        # end = html.find("<span style=\"font-size: xx-small;\">+Info", start)
        # end = html.find("</font></a><br />", start)
        # scrapedplot = html[start:end]
        # scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        # scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("Streaming", ""))
        scrapedthumbnail += '|' + _headers
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">&raquo;</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
def peliculas(item):
    logger.info("fusionse.altadefinizione01 peliculas")
    itemlist = []

    # Descarga la pagina
    # data = scrapertools.cache_page(item.url)

    data = scrapertools.anti_cloudflare(item.url, headers)

    ## ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<a\s+href="([^"]+)"\s+title="[^"]*">\s+<img\s+width="[^"]*"\s+height="[^"]*"\s+src="([^"]+)"\s+class="[^"]*"\s+alt="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming", ""))
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail), tipo="movie"))

    # Extrae el paginador
    patronvideos = 'class="nextpostslink" rel="next" href="([^"]+)">&raquo;'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
def fichas(item):
    logger.info("[itastreaming.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(r'<div class="wrapperImage"[^<]+<a',
                  '<div class="wrapperImage"><fix>SD</fix><a', data)
    # fix - IMDB
    data = re.sub(r'<h5> </div>', '<fix>IMDB: 0.0</fix>', data)
    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<div class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(
        data, "href='([^']+)'>Seguente &rsaquo;")
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.portalehd peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.24hd.online.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<li><img src=".*?src="([^"]+)".*?<a href="([^"]+)".*?class="title">([^<]+)</a>.*?</li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedplot = ""
        #scrapedthumbnail = ""
        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        if (DEBUG): logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True), tipo='movie'))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">Avanti »</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
def fichas( item ):
    logger.info( "[altadefinizioneclick.py] fichas" )

    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare( item.url )
	
	    ## fix - calidad
    data = re.sub(
        r'<div class="wrapperImage"[^<]+<a',
        '<div class="wrapperImage"><fix>SD</fix><a',
        data
    )
    ## fix - IMDB
    #data = re.sub(
     #   r'</h\d> </div>',
      #  '<fix>IMDB: 0.0</fix>',
      #  data
    #)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile( '(.altadefinizione.click.*?)\n', re.DOTALL ).findall( config.get_cookie_data() )
    for cookie in matches:
        name = cookie.split( '\t' )[5]
        value = cookie.split( '\t' )[6]
        cookies+= name + "=" + value + ";"
    headers.append( ['Cookie',cookies[:-1]] )
    import urllib
    _headers = urllib.urlencode( dict( headers ) )
    ## ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">.*?'
        patron+= 'href="([^"]+)".*?'
        patron+= '<div class="wrapperImage"[^<]+'
        patron+= '<[^>]+>([^<]+)<.*?'
        patron+= 'src="([^"]+)".*?'
        patron+= 'class="titleFilm">([^<]+)<.*?'
        patron+= 'IMDB: ([^<]+)<'
    else:
        patron = '<div class="wrapperImage"[^<]+'
        patron+= '<[^>]+>([^<]+)<.*?'
        patron+= 'href="([^"]+)".*?'
        patron+= 'src="([^"]+)".*?'
        patron+= 'href[^>]+>([^<]+)</a>.*?'
        patron+= 'IMDB: ([^<]+)<'

    matches = re.compile( patron, re.DOTALL ).findall( data )

    for scraped_1, scraped_2, scrapedthumbnail, scrapedtitle, scrapedpuntuacion in matches:

        scrapedurl = scraped_2
        scrapedcalidad = scraped_1
        if "/?s=" in item.url:
            scrapedurl = scraped_1
            scrapedcalidad = scraped_2

        title = scrapertools.decodeHtmlentities( scrapedtitle )
        title+= " (" + scrapedcalidad + ") (" + scrapedpuntuacion + ")"

        ## ------------------------------------------------
        scrapedthumbnail+= "|" + _headers
        ## ------------------------------------------------

        itemlist.append( Item( channel=__channel__, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=title, show=scrapedtitle ) )

    ## Paginación
    next_page = scrapertools.find_single_match( data, '<a class="next page-numbers" href="([^"]+)">' )
    if next_page != "":
        itemlist.append( Item( channel=__channel__, action="fichas" , title=">> Successivo" , url=next_page ) )

    return itemlist
Beispiel #48
0
def fichas(item):
    logger.info("[itastreaming.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)
    # fix - calidad
    data = re.sub(r'<div class="wrapperImage"[^<]+<a',
                  '<div class="wrapperImage"><fix>SD</fix><a', data)
    # fix - IMDB
    data = re.sub(r'<h5> </div>', '<fix>IMDB: 0.0</fix>', data)
    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.itastreaming.click.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<div class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:

        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        tmdbtitle1 = scrapedtitle.split("[")[0]
        tmdbtitle = tmdbtitle1.split("(")[0]
        year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')

        try:
            plot, fanart, poster, extrameta = info(tmdbtitle, year)

            itemlist.append(
                Item(channel=__channel__,
                     thumbnail=poster,
                     fanart=fanart if fanart != "" else poster,
                     extrameta=extrameta,
                     plot=str(plot),
                     action="findvideos",
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     url=scrapedurl,
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     folder=True))
        except:
            itemlist.append(
                Item(channel=__channel__,
                     action="findvideos",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     fulltitle=scrapedtitle,
                     show=scrapedtitle))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>',
                           re.DOTALL).findall(data)
    for page in next_page:
        next_page = page
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.altadefinizione01 peliculas")
    itemlist = []

    # Descarga la pagina
    # data = scrapertools.cache_page(item.url)

    data = scrapertools.anti_cloudflare(item.url, headers)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione01.click.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<a\s+href="([^"]+)"\s+title="[^"]*">\s+<img\s+width="[^"]*"\s+height="[^"]*"\s+src="([^"]+)"\s+class="[^"]*"\s+alt="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedplot = ""
        scrapedtitle = scrapertools.decodeHtmlentities(
            scrapedtitle.replace("Streaming", ""))
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------

        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         title=scrapedtitle,
                         fulltitle=scrapedtitle,
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail),
                    tipo="movie"))

    # Extrae el paginador
    patronvideos = 'class="nextpostslink" rel="next" href="([^"]+)">&raquo;'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist
Beispiel #50
0
def get_cookie_value(extension=""):
    cookies = config.get_cookie_data()
    cfduid = scrapertools.find_single_match(cookies,"tv-vip.*?__cfduid\s+([A-Za-z0-9\+\=]+)")
    cf_clearance = scrapertools.find_single_match(cookies,"tv-vip.*?cf_clearance\s+([A-Za-z0-9\+\=-]+)")
    cookie = "&Cookie=__cfduid="+cfduid+extension+"; cf_clearance="+cf_clearance
    return cookie
def fichas(item):
    logger.info("[altadefinizioneclick.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)
    # fix - calidad
    data = re.sub(
        r'<div class="wrapperImage"[^<]+<a',
        '<div class="wrapperImage"><fix>SD</fix><a',
        data
    )
    # fix - IMDB
    data = re.sub(
        r'<h5> </div>',
        '<fix>IMDB: 0.0</fix>',
        data
    )
    # ------------------------------------------------
    cookies = ""
    matches = config.get_cookie_data(item.url).splitlines()[4:]
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    if "/?s=" in item.url:
        patron = '<div class="col-lg-3 col-md-3 col-xs-3">.*?'
        patron += 'href="([^"]+)".*?'
        patron += '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'src="([^"]+)".*?'
        patron += 'class="titleFilm">([^<]+)<.*?'
        patron += 'IMDB: ([^<]+)<'
    else:
        patron = '<div class="wrapperImage"[^<]+'
        patron += '<[^>]+>([^<]+)<.*?'
        patron += 'href="([^"]+)".*?'
        patron += 'src="([^"]+)".*?'
        patron += 'href[^>]+>([^<]+)</a>.*?'
        patron += 'IMDB: ([^<]+)<'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_1, scraped_2, scrapedthumbnail, scrapedtitle, scrapedpuntuacion in matches:

        scrapedurl = scraped_2
        scrapedcalidad = scraped_1
        if "/?s=" in item.url:
            scrapedurl = scraped_1
            scrapedcalidad = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        title += " (" + scrapedcalidad + ") (" + scrapedpuntuacion + ")"

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="findvideos",
                 title="[COLOR azure]" + title + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=title), tipo='movie'))

    # Paginación
    next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
def fichas(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = anti_cloudflare(item.url)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.seriehd.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<h2>(.*?)</h2>\s*'
    patron += '<img src="([^"]+)" alt="[^"]*"/>\s*'
    patron += '<A HREF="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()

        tmdbtitle = scrapedtitle.split("(")[0]
        try:
           plot, fanart, poster, extrameta = info(tmdbtitle)

           itemlist.append(
               Item(channel=__channel__,
                    thumbnail=poster,
                    fanart=fanart if fanart != "" else poster,
                    extrameta=extrameta,
                    plot=str(plot),
                    action="episodios",
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    url=scrapedurl,
                    fulltitle=scrapedtitle,
                    show=scrapedtitle,
                    folder=True))
        except:
           itemlist.append(
               Item(channel=__channel__,
                    action="episodios",
                    title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                    fulltitle=scrapedtitle,
                    url=scrapedurl,
                    show=scrapedtitle,
                    thumbnail=scrapedthumbnail))

    patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
                Item(channel=__channel__,
                     action="fichas",
                     title="[COLOR orange]Successivo>>[/COLOR]",
                     url=next_page))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.altadefinizione01 peliculas")
    itemlist = []

    # Descarga la pagina
    # data = scrapertools.cache_page(item.url)

    data = anti_cloudflare(item.url)

    ## ------------------------------------------------
    cookies = ""
    matches = re.compile('(.altadefinizione01.com.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    ## ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<a\s+href="([^"]+)"\s+title="[^"]*">\s+<img\s+width="[^"]*"\s+height="[^"]*"\s+src="([^"]+)"\s+class="[^"]*"\s+alt="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        html = scrapertools.cache_page(scrapedurl, headers=headers)
        start = html.find("<div class=\"aciklama\">")
        end = html.find("<div class=\'bMavi\'>Titolo originale:", start)
        scrapedplot = html[start:end]
        scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
        scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming", ""))
        if DEBUG: logger.info(
            "title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
        ## ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        ## ------------------------------------------------
        itemlist.append(
            Item(channel=__channel__,
                 action="findvid",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,
                 url=scrapedurl,
                 viewmode="movie_with_plot",
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    patronvideos = 'class="nextpostslink" rel="next" href="([^"]+)">&raquo;'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=scrapedurl,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                 folder=True))

    return itemlist
Beispiel #54
0
def fichas(item):
    logger.info("[italiafilmvideohd.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)
    # fix - calidad

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.italiafilm.video.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<li class="item">.*?'
    patron += 'href="([^"]+)".*?'
    patron += 'title="([^"]+)".*?'
    patron += '<img src="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scraped_2, scrapedtitle, scrapedthumbnail in matches:

        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)
        # title += " (" + scrapedcalidad + ")

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        try:
            plot, fanart, poster, extrameta = info(scrapedtitle)

            itemlist.append(
                Item(channel=__channel__,
                     thumbnail=poster,
                     fanart=fanart if fanart != "" else poster,
                     extrameta=extrameta,
                     plot=str(plot),
                     action="findvideos",
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     url=scrapedurl,
                     fulltitle=title,
                     show=scrapedtitle,
                     folder=True))
        except:
            itemlist.append(
                Item(channel=__channel__,
                     action="findvideos",
                     title=title,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     fulltitle=title,
                     show=scrapedtitle))

    # Paginación
    next_page = re.compile('<a href="(.+?)" class="single_page" title=".+?">',
                           re.DOTALL).findall(data)
    for page in next_page:
        next_page = page

    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=next_page,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"
            ))

    return itemlist
Beispiel #55
0
def fichas(item):
    logger.info("[vediserie.py] fichas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.vediserie.com.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<h2>[^>]+>\s*'
    patron += '<img[^=]+=[^=]+=[^=]+="([^"]+)"[^>]+>\s*'
    patron += '<A HREF=([^>]+)>[^>]+>[^>]+>[^>]+>\s*'
    patron += '[^>]+>[^>]+>(.*?)</[^>]+>[^>]+>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        if scrapedtitle.startswith('<span class="year">'):
            scrapedtitle = scrapedtitle[19:]
        try:
            plot, fanart, poster, extrameta = info_tv(scrapedtitle)

            itemlist.append(
                Item(channel=__channel__,
                     thumbnail=poster,
                     fanart=fanart if fanart != "" else poster,
                     extrameta=extrameta,
                     plot=str(plot),
                     action="episodios",
                     title=scrapedtitle,
                     url=scrapedurl.replace('"', ''),
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     folder=True))
        except:
            itemlist.append(
                Item(channel=__channel__,
                     action="episodios",
                     title=scrapedtitle,
                     fulltitle=scrapedtitle,
                     url=scrapedurl.replace('"', ''),
                     show=scrapedtitle,
                     thumbnail=scrapedthumbnail))

    patron = '<span class=\'current\'>[^<]+</span><a class="page larger" href="(.*?)">'
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(
                channel=__channel__,
                action="fichas",
                title="[COLOR orange]Successivo>>[/COLOR]",
                url=next_page,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/successivo_P.png"
            ))

    return itemlist
def searchfilm(item):
    logger.info("[itastreaming.py] fichas")

    itemlist = []

    # Descarga la pagina
    data = anti_cloudflare(item.url)
    # fix - calidad
    data = re.sub(
        r'<div class="wrapperImage"[^<]+<a',
        '<div class="wrapperImage"><fix>SD</fix><a',
        data
    )
    # fix - IMDB
    data = re.sub(
        r'<h5> </div>',
        '<fix>IMDB: 0.0</fix>',
        data
    )
    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.itastreaming.co.*?)\n', re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<li class="s-item">.*?'
    patron += 'src="([^"]+)".*?'
    patron += 'alt="([^"]+)".*?'
    patron += 'href="([^"]+)".*?'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedtitle, scraped_2 in matches:

        scrapedurl = scraped_2

        title = scrapertools.decodeHtmlentities(scrapedtitle)

        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------

        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=title,
                 show=scrapedtitle))

    # Paginación
    next_page = re.compile('<link rel="next" href="(.+?)"/>', re.DOTALL).findall(data)
    for page in next_page:
        next_page = page
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo >>[/COLOR]",
                 url=next_page,
                 thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))

    return itemlist
Beispiel #57
0
def fichas(item):
    logger.info("[seriehd.py] fichas")
    itemlist = []

    data = anti_cloudflare(item.url)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.seriehd.org.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    import urllib
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    patron = '<h2>(.*?)</h2>\s*'
    patron += '<img src="([^"]+)" alt="[^"]*"/>\s*'
    patron += '<A HREF="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
        scrapedthumbnail += "|" + _headers
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()

        tmdbtitle = scrapedtitle.split("(")[0]
        try:
            plot, fanart, poster, extrameta = info(tmdbtitle)

            itemlist.append(
                Item(channel=__channel__,
                     thumbnail=poster,
                     fanart=fanart if fanart != "" else poster,
                     extrameta=extrameta,
                     plot=str(plot),
                     action="episodios",
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     url=scrapedurl,
                     fulltitle=scrapedtitle,
                     show=scrapedtitle,
                     folder=True))
        except:
            itemlist.append(
                Item(channel=__channel__,
                     action="episodios",
                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                     fulltitle=scrapedtitle,
                     url=scrapedurl,
                     show=scrapedtitle,
                     thumbnail=scrapedthumbnail))

    patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
    next_page = scrapertools.find_single_match(data, patron)
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title="[COLOR orange]Successivo>>[/COLOR]",
                 url=next_page))

    return itemlist
def peliculas(item):
    logger.info("streamondemand.portalehd peliculas")
    itemlist = []

    # Descarga la pagina
    data = scrapertools.anti_cloudflare(item.url, headers)

    # ------------------------------------------------
    cookies = ""
    matches = re.compile('(.24hd.online.*?)\n',
                         re.DOTALL).findall(config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"
    headers.append(['Cookie', cookies[:-1]])
    _headers = urllib.urlencode(dict(headers))
    # ------------------------------------------------

    # Extrae las entradas (carpetas)
    patron = '<li><img src=".*?src="([^"]+)".*?<a href="([^"]+)".*?class="title">([^<]+)</a>.*?</li>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
        scrapedplot = ""
        #scrapedthumbnail = ""
        # ------------------------------------------------
        scrapedthumbnail += "|" + _headers
        # ------------------------------------------------
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="findvideos",
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         plot=scrapedplot,
                         folder=True),
                    tipo='movie'))

    # Extrae el paginador
    patronvideos = '<a class="nextpostslink" rel="next" href="([^"]+)">Avanti »</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 folder=True)),
        itemlist.append(
            Item(
                channel=__channel__,
                action="peliculas",
                title="[COLOR orange]Successivo >>[/COLOR]",
                url=scrapedurl,
                thumbnail=
                "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
                folder=True))

    return itemlist