Ejemplo n.º 1
0
def list_all(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    contentSerieName = ''

    patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)".*?data-original-title="([^"]+)">'
    matches = re.compile(patron, re.DOTALL).findall(data)


    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:

        url = scrapedurl
        thumbnail = scrapedthumbnail
        title = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(Item(channel=item.channel,
                             action='seasons',
                             title=title,
                             url=url,
                             thumbnail=thumbnail,
                             contentSerieName=scrapedtitle,
                             context=filtertools.context(item, list_language, list_quality),
                             ))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # #Paginacion

    if itemlist != []:
        next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
        if next_page != '':
            itemlist.append(Item(channel=item.channel,
                                 action="list_all",
                                 title='Siguiente >>>',
                                 url=next_page,
                                 thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
                                 ))
    return itemlist
Ejemplo n.º 2
0
def seasons(item):
    logger.info()

    itemlist = list()
    page = create_soup(item.url)
    tags = get_tags(page, item.contentSerieName)
    soup = page.find("div", id="seasons")

    matches = soup.find_all("div", class_="se-c")

    infoLabels = item.infoLabels

    for elem in matches:
        season = elem.find("span", class_="se-t").text
        title = "Temporada %s %s" % (season, tags)
        infoLabels["season"] = season

        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=item.url,
                 action='episodesxseasons',
                 context=filtertools.context(item, list_language,
                                             list_quality),
                 infoLabels=infoLabels))

    tmdb.set_infoLabels_itemlist(itemlist, True)

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
                url=item.url,
                action="add_serie_to_library",
                extra="episodios",
                contentSerieName=item.contentSerieName))

    return itemlist
Ejemplo n.º 3
0
def search(item, texto):
    logger.info("texto: %s" % texto)
    itemlist = []
    infoLabels = ()
    data = httptools.downloadpage(
        urlparse.urljoin(HOST, "/buscar.php?term=%s" % texto)).data
    data_dict = jsontools.load(data)
    try:
        tvshows = data_dict["myData"]
    except:
        return []
    for show in tvshows:
        itemlist.append(
            item.clone(action="episodios",
                       context=filtertools.context(item, list_idiomas,
                                                   list_quality),
                       contentSerieName=show["titulo"],
                       thumbnail=urlparse.urljoin(HOST, show["img"]),
                       title=show["titulo"],
                       url=urlparse.urljoin(HOST, show["urla"])))
    tmdb.set_infoLabels(itemlist)
    return itemlist
Ejemplo n.º 4
0
def search(item, texto):
    logger.info("texto: %s" % texto)
    itemlist = []
    infoLabels = ()
    data_dict = httptools.downloadpage(
        urlparse.urljoin(HOST, "/buscar.php?term=%s" % texto)).json
    try:
        tvshows = data_dict["myData"]
    except:
        return []
    for show in tvshows:
        title = re.sub('\s*\((.*?)\)$', '', show["titulo"])
        itemlist.append(
            item.clone(action="seasons",
                       context=filtertools.context(item, list_idiomas,
                                                   list_quality),
                       contentSerieName=title,
                       thumbnail=urlparse.urljoin(HOST, show["img"]),
                       title=show["titulo"],
                       url=urlparse.urljoin(HOST, show["urla"])))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    return itemlist
Ejemplo n.º 5
0
def list_from_genre(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    contentSerieName = ''

    patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail in matches:
        url = scrapedurl
        thumbnail = scrapedthumbnail
        title = scrapertools.find_single_match(scrapedurl, 'https://seriesblanco.org/capitulos/([^/]+)/')
        title = title.replace('-', ' ').capitalize()

        itemlist.append(Item(channel=item.channel,
                             action='seasons',
                             title=title,
                             url=url,
                             thumbnail=thumbnail,
                             contentSerieName=title,
                             context=filtertools.context(item, list_language, list_quality),
                             ))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # #Paginacion

    if itemlist != []:
        next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="Next')
        if next_page != '':
            itemlist.append(Item(channel=item.channel,
                                 action="list_from_genre",
                                 title='Siguiente >>>',
                                 url=next_page,
                                 thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
                                 ))
    return itemlist
Ejemplo n.º 6
0
def list_all(item):
    logger.info()

    itemlist = []
    data = get_source(item.url)
    data = data.replace ("'", '"')
    patron = '<li><div style=.*?><a href="([^"]+)"><img.*?src="([^"]+)" title="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.strip()
        url = host + scrapedurl
        thumbnail = scrapedthumbnail
        title = scrapertools.decodeHtmlentities(scrapedtitle)

        itemlist.append(Item(channel=item.channel,
                             action='seasons',
                             title=title,
                             url=url,
                             thumbnail=thumbnail,
                             contentSerieName=scrapedtitle,
                             context=filtertools.context(item, list_language, list_quality),
                             ))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    # #Paginacion

    if itemlist != []:
        base_page = scrapertools.find_single_match(item.url,'(.*?)?')
        next_page = scrapertools.find_single_match(data, '</span><a href=?pagina=2>>></a>')
        if next_page != '':
            itemlist.append(Item(channel=item.channel,
                                 action="lista",
                                 title='Siguiente >>>',
                                 url=base_page+next_page,
                                 thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
                                 ))
    return itemlist
Ejemplo n.º 7
0
def novedades(item):
    logger.info()

    itemlist = list()

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "",
                  data)
    data = re.sub(r"<!--.*?-->", "", data)
    logger.debug(data)
    patron = '<a title="([^"]+)" href="([^"]+)".*?>'
    patron += "<img.*?src='([^']+)'"
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedtitle, scrapedurl, scrapedthumb in matches:
        # patron = "^(.*?)(?:Ya Disponible|Disponible|Disponbile|disponible|\(Actualizada\))$"
        # match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
        title = scrapertools.decodeHtmlentities(scrapedtitle)
        language = ''
        # language = scrapertools.find_multiple_matches(title,'(Vose|Español|Latino)')
        # for lang in language:
        #     title = title.replace(lang,'')
        # title = title.replace ('Disponible','')
        # title = title.replace('Ya', '')
        # title = title.strip()

        show = scrapertools.find_single_match(title, "^(.+?) \d+[x|X]\d+")

        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=urlparse.urljoin(HOST, scrapedurl),
                 show=show,
                 action="episodios",
                 thumbnail=scrapedthumb,
                 context=filtertools.context(item, list_idiomas, CALIDADES),
                 language=language))

    return itemlist
Ejemplo n.º 8
0
def search_results(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url)

    for elem in soup.find_all("div", class_="result-item"):

        url = elem.a["href"]
        thumb = elem.img["src"]
        title = elem.img["alt"]
        year = elem.find("span", class_="year").text

        language = get_language(elem)

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb,
                        language=language, infoLabels={'year': year})

        if "movies" in url:
            new_item.action = "findvideos"
            new_item.contentTitle = new_item.title
        else:
            new_item.action = "seasons"
            new_item.contentSerieName = new_item.title
            new_item.context = filtertools.context(item, list_language, list_quality)

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    try:
        url_next_page = soup.find_all("a", class_="arrow_pag")[-1]["href"]
    except:
        return itemlist

    itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='search_results'))

    return itemlist
Ejemplo n.º 9
0
def seasons(item):
    logger.info()

    itemlist = list()
    data = AlfaChannel.create_soup(item.url).find_all("script")[-2]
    al = scrapertools.find_single_match(data["src"], 'base64,(.*)')
    fa = base64.b64decode(al)
    id = scrapertools.find_single_match(fa, 'var id=(\d+)')
    post = {"action": "seasons", "id": id}
    soup = AlfaChannel.get_data_by_post(post=post).soup
    matches = soup.find_all("span", class_="se-t")
    infoLabels = item.infoLabels

    for elem in matches:
        season = elem.text
        title = "Temporada %s" % season
        infoLabels["season"] = season

        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=item.url,
                 action='episodesxseason',
                 context=filtertools.context(item, list_idiomas, list_quality),
                 infoLabels=infoLabels))

    tmdb.set_infoLabels_itemlist(itemlist, True)

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
                url=item.url,
                action="add_serie_to_library",
                extra="episodios",
                contentSerieName=item.contentSerieName))

    return itemlist
Ejemplo n.º 10
0
def seasons(item):
    logger.info()

    itemlist = list()
    data = httptools.downloadpage(item.url, canonical=canonical).data
    fom, hash = scrapertools.find_single_match(data,
                                               "fom:(.*?),hash:'([^']+)'")
    json_data = jsontools.load(fom)
    infoLabels = item.infoLabels

    for elem in json_data:
        season = elem
        title = "Temporada %s" % season
        infoLabels["season"] = season
        epi_data = json_data[elem]
        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 action='episodesxseasons',
                 epi_data=epi_data,
                 hash=hash,
                 context=filtertools.context(item, list_language,
                                             list_quality),
                 infoLabels=infoLabels))

    tmdb.set_infoLabels_itemlist(itemlist, True)
    itemlist = sorted(itemlist, key=lambda i: i.contentSeason)
    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
                url=item.url,
                action="add_serie_to_library",
                extra="episodios",
                contentSerieName=item.contentSerieName))

    return itemlist
Ejemplo n.º 11
0
def search_results(item):
    import urllib
    itemlist = []
    headers = {
        "Origin": "http://www.wikiseriesonline.nu",
        "Accept-Encoding": "gzip, deflate",
        "Host": "www.wikiseriesonline.nu",
        "Accept-Language": "es-ES,es;q=0.8,en;q=0.6",
        "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        "Accept": "*/*",
        "Referer": item.url,
        "X-Requested-With": "XMLHttpRequest",
        "Connection": "keep-alive",
        "Content-Length": "7"
    }
    post = {"n": item.text}
    post = urllib.urlencode(post)
    url = host + 'wp-content/themes/wikiSeries/searchajaxresponse.php'
    data = httptools.downloadpage(url, post=post, headers=headers).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)

    patron = "<!-- .Posts -->.*?<a href=(.*?)>.*?src=(.*?) .*?titleinst>(.*?)<"
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        if item.text.lower() in scrapedtitle.lower():
            itemlist.append(
                Item(channel=item.channel,
                     title=scrapedtitle,
                     contentSerieName=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=scrapedthumbnail,
                     action='seasons',
                     context=filtertools.context(item, list_language,
                                                 list_quality)))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Ejemplo n.º 12
0
def extract_series_from_data(item, data):
    itemlist = []
    episode_pattern = re.compile('/capitulo-([0-9]+)/')
    shows = re.findall(
        "<a.+?href=['\"](?P<url>/serie[^'\"]+)[^<]*<img[^>]*src=['\"](?P<img>http[^'\"]+).*?"
        "(?:alt|title)=['\"](?P<name>[^'\"]+)", data)
    for url, img, name in shows:
        try:
            name.decode('utf-8')
        except UnicodeError:
            name = unicode(name, "iso-8859-1",
                           errors="replace").encode("utf-8")

        # logger.debug("Show found: %s -> %s (%s)" % (name, url, img))
        if not episode_pattern.search(url):
            action = "episodios"
        else:
            action = "findvideos"

        itemlist.append(
            item.clone(title=name,
                       url=urlparse.urljoin(HOST, url),
                       action=action,
                       show=name,
                       thumbnail=img,
                       context=filtertools.context(item, list_idiomas,
                                                   CALIDADES)))

    more_pages = re.search('pagina=([0-9]+)">>>', data)
    if more_pages:
        # logger.debug("Adding next page item")
        itemlist.append(item.clone(title="Siguiente >>", extra=item.extra + 1))

    if item.extra > 1:
        # logger.debug("Adding previous page item")
        itemlist.append(item.clone(title="<< Anterior", extra=item.extra - 1))

    return itemlist
Ejemplo n.º 13
0
def search_results(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(host + 'finder.php', post=item.post).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    patron = "<a href='(.*?)'>.*?src=(.*?) style.*?value=(.*?)>"

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumb, scrapedtitle in matches:
        itemlist.append(
            Item(channel=item.channel,
                 title=scrapedtitle,
                 url=host + scrapedurl,
                 action="seasons",
                 thumbnail=scrapedthumb,
                 contentSerieName=scrapedtitle,
                 context=filtertools.context(item, list_language,
                                             list_quality)))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Ejemplo n.º 14
0
def search_results(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)

    patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumb, scrapedtitle in matches:
        itemlist.append(Item(channel=item.channel,
                             title=scrapedtitle,
                             url=host+scrapedurl,
                             action="seasons",
                             thumbnail=scrapedthumb,
                             contentSerieName=scrapedtitle,
                             context=filtertools.context(item, list_language, list_quality)
                             ))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    return itemlist
Ejemplo n.º 15
0
def latest(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find("div", class_="body bg4")

    for elem in soup.find_all("article"):
        url = elem.a["href"]
        title = elem.find("div", class_="tit over txtc").text

        itemlist.append(
            Item(channel=item.channel,
                 url=url,
                 title=title,
                 contentSerieName=title,
                 action="seasons",
                 context=filtertools.context(item, list_idiomas,
                                             list_quality)))

    tmdb.set_infoLabels_itemlist(itemlist, True)

    # Paginación

    try:
        next_page = soup.find("a", class_="next")["href"]

        if next_page:
            itemlist.append(
                Item(channel=item.channel,
                     title="Siguiente >>",
                     url=next_page,
                     action='latest'))
    except:
        pass

    return itemlist
Ejemplo n.º 16
0
def list_all(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find("div", id="content")
    matches = soup.find_all("div", class_="swiper-container")[item.pos].find_all("div", class_="swiper-slide")

    for elem in matches:
        url = elem.a["href"]
        title = elem.find("div", class_="card-title").text.strip()
        year = elem.find("div", class_="card-subtitle").text.strip()
        if item.pos == 1:
            content_title = title
            title = "%s - %s" % (title, year)
        thumb = elem.img["src"]

        new_item = Item(channel=item.channel, title=title, url=url, thumbnail=thumb, infoLabels={"year": year})

        if item.pos != 4:

            if item.pos == 1:
                new_item.contentSerieName = content_title
                new_item.action = "findvideos"
            else:
                new_item.contentSerieName = title
                new_item.action = "episodios"
            new_item.context = filtertools.context(item, list_language, list_quality)
        else:
            new_item.contentTitle = title
            new_item.action = "findvideos"

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Ejemplo n.º 17
0
def alpha_list(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find_all("div", class_="row form-group")

    for elem in soup:
        info = elem.h4

        if not info:
            continue

        thumb = elem.img["src"]
        url = elem.h4.a["href"]
        title = elem.h4.a.text

        itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=thumb, contentSerieName=title,
                             action='seasons', context=filtertools.context(item, list_idiomas, list_quality)))

    if item.first:
        first = item.first
    else:
        first = 0
    last = first + 30
    if last >= len(itemlist):
        last = len(itemlist)

    itemlist = itemlist[first:last]

    tmdb.set_infoLabels_itemlist(itemlist, True)
    first = last

    itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=item.url, action='alpha_list',
                         first=first))

    return itemlist
Ejemplo n.º 18
0
def seasons(item):
    logger.info()
    itemlist = []

    data = get_source(item.url, ctype=item.tmod)
    patron = 'Temporada (\d+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if matches == 1:
        return episodesxseason(item)
    infoLabels = item.infoLabels
    for scrapedseason in matches:
        contentSeasonNumber = scrapedseason
        title = 'Temporada %s' % scrapedseason
        infoLabels['season'] = contentSeasonNumber

        itemlist.append(
            Item(channel=item.channel,
                 action='episodesxseason',
                 url=item.url,
                 title=title,
                 contentSeasonNumber=contentSeasonNumber,
                 infoLabels=infoLabels,
                 context=filtertools.context(item, list_language)))
    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
                url=item.url,
                action="add_serie_to_library",
                extra="episodios",
                contentSerieName=item.contentSerieName,
                extra1='library'))

    return itemlist
Ejemplo n.º 19
0
def seasons(item):
    logger.info()

    itemlist = list()

    matches = create_soup(item.url).find_all("ul", id=re.compile("season-\d+"))

    infoLabels = item.infoLabels

    for elem in matches:

        season = elem["id"].split("-")[1]
        title = "Temporada %s" % season
        infoLabels["season"] = season

        itemlist.append(
            Item(channel=item.channel,
                 title=title,
                 url=item.url,
                 action='episodesxseasons',
                 context=filtertools.context(item, list_language,
                                             list_quality),
                 infoLabels=infoLabels))

    tmdb.set_infoLabels_itemlist(itemlist, True)

    if config.get_videolibrary_support() and len(itemlist) > 0:
        itemlist.append(
            Item(
                channel=item.channel,
                title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
                url=item.url,
                action="add_serie_to_library",
                extra="episodios",
                contentSerieName=item.contentSerieName))

    return itemlist
Ejemplo n.º 20
0
def latest(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find("div", class_="body bg4")

    for elem in soup.find_all("article"):

        url = elem.a["href"]
        title = elem.find("div", class_="tit over txtc").text

        itemlist.append(
            Item(channel=item.channel,
                 url=url,
                 title=title,
                 contentSerieName=title,
                 action="seasons",
                 context=filtertools.context(item, list_idiomas,
                                             list_quality)))

    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Ejemplo n.º 21
0
def section(item):
    logger.info()

    itemlist = list()

    soup = create_soup(item.url).find("div", class_="sidebar left")

    if "Alfabético" not in item.title:
        value = soup.find_all("div", class_= lambda x: x and x.startswith("top-"))
    else:
        value = soup.find("div", id="letters").find_all("a")

    for elem in value:

        action = "latest"
        if "Alfabético" not in item.title:
            elem_data = elem.find_all("a")
            elem = elem_data[0] if len(elem_data) == 1 else elem_data[1]
            action = "seasons"
            url = elem["href"]
        else:
            url = urlparse.urljoin(host, elem["href"])

        title = elem.text

        new_item = Item(channel=item.channel, title=title, action=action, url=url)

        if "letra" not in url:
            new_item.contentSerieName = title
            new_item.context = filtertools.context(item, list_idiomas, list_quality)

        itemlist.append(new_item)

    tmdb.set_infoLabels_itemlist(itemlist, True)

    return itemlist
Ejemplo n.º 22
0
def series_por_letra_y_grupo(item):
    logger.info("letra: %s - grupo: %s" % (item.letter, item.extra))
    itemlist = []
    url = urlparse.urljoin(HOST, "autoload_process.php")

    post_request = {"group_no": item.extra, "letra": item.letter.lower()}
    data = httptools.downloadpage(url,
                                  post=urllib.urlencode(post_request)).data

    series = re.findall(
        'list_imagen.+?src="(?P<img>[^"]+).+?<div class="list_titulo"><a[^>]+href="(?P<url>[^"]+)[^>]+>(.*?)</a>',
        data, re.MULTILINE | re.DOTALL)

    for img, url, name in series:
        itemlist.append(
            item.clone(action="episodios",
                       title=name,
                       show=name,
                       url=urlparse.urljoin(HOST, url),
                       thumbnail=urlparse.urljoin(HOST, img),
                       context=filtertools.context(item, list_idiomas,
                                                   CALIDADES)))

    if len(series) == 8:
        itemlist.append(
            item.clone(title="Siguiente >>",
                       action="series_por_letra_y_grupo",
                       extra=item.extra + 1))

    if item.extra > 0:
        itemlist.append(
            item.clone(title="<< Anterior",
                       action="series_por_letra_y_grupo",
                       extra=item.extra - 1))

    return itemlist
Ejemplo n.º 23
0
def series_por_letra_y_grupo(item):
    logger.info("letra: %s - grupo: %s" % (item.letter, item.extra))
    itemlist = []
    url = urlparse.urljoin(HOST, "autoload_process.php")
    post_request = {"group_no": item.extra, "letra": item.letter.lower()}
    data = httptools.downloadpage(url,
                                  post=urllib.urlencode(post_request)).data
    data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
    patron = '<div class=list_imagen><img src=(.*?) \/>.*?<div class=list_titulo><a href=(.*?) style=.*?inherit;>(.*?)'
    patron += '<.*?justify>(.*?)<.*?Año:<\/b>.*?(\d{4})<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for img, url, name, plot, year in matches:
        new_item = Item(channel=item.channel,
                        action="episodios",
                        title=name,
                        show=name,
                        url=urlparse.urljoin(HOST, url),
                        thumbnail=urlparse.urljoin(HOST, img),
                        context=filtertools.context(item, list_idiomas,
                                                    list_quality),
                        plot=plot,
                        infoLabels={'year': year})
        if year:
            tmdb.set_infoLabels_item(new_item)
        itemlist.append(new_item)
    if len(matches) == 8:
        itemlist.append(
            item.clone(title="Siguiente >>",
                       action="series_por_letra_y_grupo",
                       extra=item.extra + 1))
    if item.extra > 0:
        itemlist.append(
            item.clone(title="<< Anterior",
                       action="series_por_letra_y_grupo",
                       extra=item.extra - 1))
    return itemlist
Ejemplo n.º 24
0
def series_seccion(item):
    logger.info()

    itemlist = []
    next_page = ''
    data = item.data
    data = data.replace('ahref', 'a href')
    patron = "<a href='([^']+)'.*?>(.*?)</a>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    if int(item.first) + 20 < len(matches):
        limit = int(item.first) + 20
        next_page = limit + 1
    else:
        limit = len(matches)
    for scrapedurl, scrapedtitle in matches[item.first:limit]:
        itemlist.append(
            Item(channel=item.channel,
                 action="episodios",
                 title=scrapedtitle,
                 show=scrapedtitle,
                 url=urlparse.urljoin(HOST, scrapedurl),
                 context=filtertools.context(item, list_idiomas,
                                             list_quality)))

    tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
    #pagination

    if next_page != '':
        itemlist.append(
            Item(channel=item.channel,
                 action="series_seccion",
                 title='Siguiente >>>',
                 data=item.data,
                 first=next_page))

    return itemlist
Ejemplo n.º 25
0
def section(item):
    logger.info()

    itemlist = list()
    if "vistas" in item.title:
        value = "HTML3"
    else:
        value = "HTML2"
    soup = create_soup(item.url).find("div", id=value)

    for elem in soup.find_all("li"):
        url = elem.a["href"]
        title = elem.a.text

        itemlist.append(
            Item(channel=item.channel,
                 url=url,
                 title=title,
                 contentSerieName=title,
                 action="seasons",
                 context=filtertools.context(item, list_idiomas,
                                             list_quality)))
    tmdb.set_infoLabels_itemlist(itemlist, True)
    return itemlist
Ejemplo n.º 26
0
def list_all(item):
    logger.info()

    itemlist = list()
    next = False
    # if item.title == "Mas Vistas":
    #     post = {"action": "action_changue_post_by", "type": "#Views", "posttype": "series"}
    #     matches = create_soup(item.url, post=post, headers={"referer": host})
    # elif item.title == "Ultimas":
    #     post = {"action": "action_changue_post_by", "type": "#Latest", "posttype": "series"}
    #     matches = create_soup(item.url, post=post)
    # else:
    soup = create_soup(item.url)
    matches = soup.find(
        "ul", class_=re.compile(r"MovieList Rows AX A04 B03 C20 D03 E20 Alt"))

    if not matches:
        return itemlist

    matches = matches.find_all("article")

    first = item.first
    last = first + 25
    if last >= len(matches):
        last = len(matches)
        next = True

    for elem in matches[first:last]:
        url = elem.a["href"]
        title = elem.find(["div", "ul"], class_="Title").text
        #thumb = elem.img["data-src"]
        thumb = elem.img["src"]

        itemlist.append(
            Item(channel=item.channel,
                 url=url,
                 title=title,
                 thumbnail=thumb,
                 action="seasons",
                 contentSerieName=title,
                 context=filtertools.context(item, list_language,
                                             list_quality)))

    tmdb.set_infoLabels_itemlist(itemlist, True)

    if not next:
        url_next_page = item.url
        first = last
    else:
        try:
            url_next_page = soup.find("div",
                                      class_="nav-links").find_all("a")[-1]
            if url_next_page.text:
                url_next_page = ''
            else:
                url_next_page = url_next_page["href"]
        except:
            return itemlist

        url_next_page = '%s' % url_next_page
        first = 0

    if url_next_page and len(matches) > 26:
        itemlist.append(
            Item(channel=item.channel,
                 title="Siguiente >>",
                 url=url_next_page,
                 action='list_all',
                 first=first))

    return itemlist
Ejemplo n.º 27
0
def set_filter(*args):
    logger.info()

    args[2].context = filtertools.context(args[3], list_language, list_quality)

    return args[2]
Ejemplo n.º 28
0
def list_all(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    if item.type == 'movies':
        patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
        patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?'
        patron += '<div class="calidad" >([^<]+)</div> <div class="audio-info">'
        patron += '(.*?)<div class="w3l-action-icon">.*?<p>([^<]+)</p>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle, quality, lang_data, year in matches:

            title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
            if 'screener' in quality.lower():
                quality = 'Screener'
            contentTitle = scrapedtitle
            thumbnail = scrapedthumbnail
            url = scrapedurl
            language = get_language(lang_data)
            itemlist.append(
                item.clone(action='findvideos',
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           contentTitle=contentTitle,
                           language=language,
                           quality=quality,
                           infoLabels={'year': year}))

    elif item.type == 'tvshows':
        patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
        patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?<p>([^<]+)</p>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
            title = scrapedtitle
            contentSerieName = scrapedtitle
            thumbnail = scrapedthumbnail
            url = scrapedurl

            itemlist.append(
                item.clone(action='seasons',
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           contentSerieName=contentSerieName,
                           context=filtertools.context(item, list_language,
                                                       list_quality),
                           infoLabels={'year': year}))

    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    #  Paginación

    url_next_page = scrapertools.find_single_match(
        data, "<a class='last' href='([^']+)'>»</a>")
    if url_next_page:
        itemlist.append(
            item.clone(title="Siguiente >>",
                       url=url_next_page,
                       action='list_all'))

    return itemlist
Ejemplo n.º 29
0
def list_all(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    if item.type == 'movies':
        patron = r'<article id="post-\d+" class="item movies"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
        patron += r'quality">([^<]+)</span>\s*<\/div><a href="([^"]+)">.*?'
        patron += r'<\/h3><span>([^>]+)<\/span><\/div>.*?flags(.*?)metadata'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedthumbnail, scrapedtitle, quality, scrapedurl, year, lang_data in matches:

            title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
            contentTitle = scrapedtitle
            thumbnail = scrapedthumbnail
            url = scrapedurl
            language = get_language(lang_data)

            itemlist.append(
                item.clone(action='findvideos',
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           contentTitle=contentTitle,
                           language=language,
                           quality=quality,
                           infoLabels={'year': year}))

    elif item.type == 'tvshows':
        patron = '<article id="post-\d+" class="item tvshows"><div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?'
        patron += '<a href="([^"]+)">.*?<\/h3><span>([^<]+)<\/span><\/div>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
            title = scrapedtitle
            contentSerieName = scrapedtitle
            thumbnail = scrapedthumbnail
            url = scrapedurl

            itemlist.append(
                item.clone(action='seasons',
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           contentSerieName=contentSerieName,
                           context=filtertools.context(item, list_language,
                                                       list_quality),
                           infoLabels={'year': year}))

    tmdb.set_infoLabels(itemlist, seekTmdb=True)
    #  Paginación

    url_next_page = scrapertools.find_single_match(
        data, '<link rel="next" href="([^"]+)" />')
    if url_next_page:
        itemlist.append(
            item.clone(title="Siguiente >>",
                       url=url_next_page,
                       action='list_all'))

    return itemlist
Ejemplo n.º 30
0
def fichas(item):
    logger.info()
    itemlist = []
    or_matches = ""
    textoidiomas = ''
    infoLabels = dict()
    ## Carga estados
    status = check_status()

    if item.title == "Buscar...":
        data = agrupa_datos(item.url, post=item.extra)
        s_p = scrapertools.find_single_match(
            data,
            '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
                '<h3 class="section-title">')
        if len(s_p) == 1:
            data = s_p[0]
            if 'Lo sentimos</h3>' in s_p[0]:
                return [
                    Item(channel=item.channel,
                         title=
                         "[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR steelblue]"
                         + item.texto.replace('%20', ' ') +
                         "[/COLOR] sin resultados")
                ]
        else:
            data = s_p[0] + s_p[1]
    elif 'series/abc' in item.url:
        data = agrupa_datos(item.url, referer=item.url)
    else:
        data = agrupa_datos(item.url)

    data = re.sub(
        r'<div class="span-6[^<]+<div class="item"[^<]+' + \
        '<a href="([^"]+)"[^<]+' + \
        '<img.*?src="([^"]+)".*?' + \
        '<div class="left"(.*?)</div>' + \
        '<div class="right"(.*?)</div>.*?' + \
        'title="([^"]+)".*?' + \
        'onclick="setFavorite.\d, (\d+),',
        r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';",
        data
    )
    patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';"
    matches = re.compile(patron, re.DOTALL).findall(data)

    if item.page != '':
        or_matches = matches
        matches = matches[item.page:item.page + 40]

    for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:

        thumbnail = scrapedthumbnail.replace('tthumb/130x190', 'thumbs')
        thumbnail += '|User-Agent=%s' % httptools.get_user_agent()
        language = ''
        title = scrapedtitle.strip()
        show = title

        #Valoración
        if scrapedrating != ">" and not unify:
            valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>',
                                r'\1,\2', scrapedrating)
            title += " [COLOR greenyellow](%s)[/COLOR]" % valoracion

        #Idiomas
        if scrapedlangs != ">":
            textoidiomas, language = extrae_idiomas(scrapedlangs)

            if show_langs:
                title += " [COLOR darkgrey]%s[/COLOR]" % textoidiomas

        url = urlparse.urljoin(item.url, scrapedurl)
        #Acción para series/peliculas
        if "/serie" in url or "/tags-tv" in url:
            action = "seasons"
            url += "###" + scrapedid + ";1"
            type = "shows"
            contentType = "tvshow"
        else:
            action = "findvideos"
            url += "###" + scrapedid + ";2"
            type = "movies"
            contentType = "movie"
            infoLabels['year'] = '-'
        #items usuario en titulo (visto, pendiente, etc)
        if account:
            str = get_status(status, type, scrapedid)
            if str != "": title += str
        #Muesta tipo contenido tras busqueda
        if item.title == "Buscar...":
            bus = host[-4:]
            #Cuestiones estéticas (TODO probar unify)
            c_t = "darkgrey"

            tag_type = scrapertools.find_single_match(url, '%s([^/]+)/' % bus)
            if tag_type == 'pelicula':
                c_t = "steelblue"
            title += " [COLOR %s](%s)[/COLOR]" % (c_t, tag_type.capitalize())

        if "/serie" in url or "/tags-tv" in url:
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     contentSerieName=show,
                     text_bold=True,
                     contentType=contentType,
                     language=language,
                     infoLabels=infoLabels,
                     thumbnail=thumbnail,
                     context=filtertools.context(item, list_language,
                                                 list_quality)))
        else:
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     text_bold=True,
                     contentTitle=show,
                     language=language,
                     infoLabels=infoLabels,
                     thumbnail=thumbnail))
    ## Paginación
    next_page_url = scrapertools.find_single_match(
        data, '<a href="([^"]+)">.raquo;</a>')
    if next_page_url != "":
        itemlist.append(
            Item(channel=item.channel,
                 action="fichas",
                 title=">> Página siguiente",
                 url=urlparse.urljoin(item.url, next_page_url),
                 text_bold=True))

        itemlist.append(
            Item(channel=item.channel,
                 action="get_page",
                 title=">> Ir a Página...",
                 url=urlparse.urljoin(item.url, next_page_url),
                 text_bold=True,
                 thumbnail=get_thumb('add.png'),
                 text_color='turquoise'))

    elif item.page != '':
        if item.page + 40 < len(or_matches):
            itemlist.append(
                item.clone(page=item.page + 40,
                           title=">> Página siguiente",
                           text_bold=True,
                           text_color="blue"))

    tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)

    return itemlist