예제 #1
0
def findvideos(item):
    logger.info("[thegroove360.cinemasubito] findvideos_tv")

    links = set()
    data = httptools.downloadpage(item.url, headers=headers).data
    p = scrapertools.find_single_match(
        data,
        r'var decrypted = CryptoJS\.AES\.decrypt\(vlinkCrypted, "([^"]+)",')
    urls = scrapertools.find_multiple_matches(
        data,
        r"<li><a rel=[^t]+target=[^c]+class=[^=]+=[^:]+:'(.*?)'[^:]+:'(.*?)'[^:]+:'(.*?)'"
    )
    for url, iv, salt in urls:
        salt = binascii.unhexlify(salt)
        iv = binascii.unhexlify(iv)
        url = jscrypto.decode(url, p, iv=iv, salt=salt)
        url = url.replace('\/', '/')
        links.add(url)

    itemlist = servertools.find_video_items(data=str(links) + data)
    for videoitem in itemlist:
        videoitem.title = item.title + videoitem.title
        videoitem.fulltitle = item.fulltitle
        videoitem.thumbnail = item.thumbnail
        videoitem.show = item.show
        videoitem.plot = item.plot
        videoitem.channel = __channel__

    return itemlist
예제 #2
0
def findvideos(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

    if "Las referencias de la transmisión van a ser publicadas" in data:
        itemlist.append(
            item.clone(
                title=
                "Los enlaces estarán disponibles entre 5-10 minutos antes de que empiece",
                action=""))
        return itemlist

    key_save = config.get_setting("key_cryp", "sport365")
    if not key_save:
        key = getkey()
    else:
        key = base64.b64decode(key_save)

    matches = scrapertools.find_multiple_matches(
        data, "<span id='span_watch_links'.*?, '([^']+)'")
    if not matches:
        matches = scrapertools.find_multiple_matches(
            data, "<span id='span_code_links'.*?, '([^']+)'")
    h = HTMLParser()
    for i, url in enumerate(matches):
        url = jsontools.load_json(base64.b64decode(url))
        try:
            url = jscrypto.decode(url["ct"], key, url["s"].decode("hex"))
        except:
            key = getkey(True)
            url = jscrypto.decode(url["ct"], key, url["s"].decode("hex"))
        data_url = url.replace('\\/', '/').replace("\\", "")
        data_url = h.unescape(data_url)

        url = scrapertools.find_single_match(data_url, 'src=[\'"](.*?)"')
        title = "[COLOR green]Stream %s - [/COLOR][COLOR darkorange](%s)[/COLOR]" % (
            i + 1, item.info)
        itemlist.append(item.clone(action="play", url=url, title=title))

    return itemlist
예제 #3
0
def entradas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

    key_save = config.get_setting("key_cryp", "sport365")
    if not key_save:
        key = getkey()
    else:
        key = base64.b64decode(key_save)

    colores_val = ['', 'yellow', 'blue', 'chocolate', 'violet', 'orange']
    color = colores_val[config.get_setting("resaltar_spanish", "sport365")]

    fechas = scrapertools.find_multiple_matches(
        data, '<td colspan=9[^>]+>(\d+\.\d+\.\d+)<')
    if "Redifusiones" in item.title and not fechas:
        itemlist.append(
            item.clone(action="",
                       title="No hay ninguna emisión prevista actualmente"))
        return itemlist

    for i, f in enumerate(fechas):
        delimit = '</table>'
        if i != len(fechas) - 1:
            delimit = fechas[i + 1]
        bloque = scrapertools.find_single_match(data,
                                                '%s<(.*?)%s' % (f, delimit))
        patron = 'onClick=.*?,\s*"([^"]+)".*?<td rowspan=2.*?src="([^"]+)".*?<td rowspan=2.*?>(\d+:\d+)<' \
                 '.*?<td.*?>([^<]+)<.*?<td.*?>(.*?)/td>.*?<tr.*?<td colspan=2.*?>([^<]+)<'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for url, thumb, hora, title, datos, deporte in matches:
            evento = title.replace("-", "vs")
            text_color = "red"
            if "green-big.png" in thumb:
                text_color = "green"
            if "/" in deporte:
                deporte = deporte.split(" /", 1)[0]
            if "<span" in datos:
                calidad, idioma = scrapertools.find_single_match(
                    datos, '>([^<]+)</span>([^<]+)<')
                datos = "%s/%s/%s" % (deporte, calidad.replace("HQ",
                                                               "HD"), idioma)
                if idioma == "Español" and color:
                    text_color = color
            else:
                datos = "%s/%s" % (deporte, datos[:-1])
                if "Español" in datos and color:
                    text_color = color

            fecha = f.replace(".", "/")

            url = jsontools.load_json(base64.b64decode(url))
            try:
                url = jscrypto.decode(url["ct"], key, url["s"].decode("hex"))
            except:
                key = getkey(True)
                url = jscrypto.decode(url["ct"], key, url["s"].decode("hex"))
            url = "http://www.sport365.live" + url.replace('\\/', '/').replace(
                '"', "")
            horas, minutos = hora.split(":")
            dia, mes, year = fecha.split("/")
            fecha_evento = datetime.datetime(int(year), int(mes), int(dia),
                                             int(horas), int(minutos))
            fecha_evento = fecha_evento + datetime.timedelta(hours=1)
            hora = fecha_evento.strftime("%H:%M")
            date = fecha_evento.strftime("%d/%m")
            if len(fechas) == 1:
                title = "[COLOR %s]%s - %s [/COLOR][COLOR darkorange](%s)[/COLOR]" % (
                    text_color, hora, title, datos)
            else:
                title = "[COLOR %s][%s] %s - %s[/COLOR] [COLOR darkorange](%s)[/COLOR]" % (
                    text_color, date, hora, title, datos)
            itemlist.append(
                item.clone(action="findvideos",
                           title=title,
                           url=url,
                           date=date,
                           time=hora,
                           evento=evento,
                           deporte=deporte,
                           context="info_partido",
                           info=datos))

    return itemlist
예제 #4
0
def play(item):
    itemlist = []

    data = httptools.downloadpage(item.url).data
    url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
    if not url:
        platformtools.dialog_notification(
            "Stream no disponible", "No es posible conectar con la emisión")
        return []
    elif "/matras.jpg" in data:
        platformtools.dialog_notification(
            "Stream caído", "Inténtalo de nuevo pasados unos minutos")
        return []

    h = HTMLParser()
    url = h.unescape(url)
    data = httptools.downloadpage(url).data
    f = scrapertools.find_single_match(data, 'name="f" value="([^"]+)"')
    d = scrapertools.find_single_match(data, 'name="d" value="([^"]+)"')
    r = scrapertools.find_single_match(data, 'name="r" value="([^"]+)"')
    url_post = scrapertools.find_single_match(data, "'action',\s*'([^']+)'")
    if not url_post:
        platformtools.dialog_notification(
            "Stream no disponible", "No es posible conectar con la emisión")
        return []

    post = {'r': r, 'd': d, 'f': f}
    post = urllib.urlencode(post)
    data = httptools.downloadpage(url_post, post).data
    try:
        get_links(data)
    except:
        pass

    key_save = config.get_setting("key_cryp", "sport365")
    if not key_save:
        key = getkey()
    else:
        key = base64.b64decode(key_save)
    data_crypto = scrapertools.find_single_match(
        data, "\};[A-z0-9]{43}\(.*?,.*?,\s*'([^']+)'")
    url = jsontools.load_json(base64.b64decode(data_crypto))
    try:
        url = jscrypto.decode(url["ct"], key, url["s"].decode("hex"))
    except:
        key = getkey(True)
        url = jscrypto.decode(url["ct"], key, url["s"].decode("hex"))

    url = url.replace('\\/', '/').replace("\\", "").replace('"', "")
    headers_test = {
        'Referer':
        url_post,
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'
    }
    response = httptools.downloadpage(url,
                                      headers=headers_test,
                                      follow_redirects=False,
                                      only_headers=True,
                                      replace_headers=True)
    if response.code == 406:
        response = httptools.downloadpage(url,
                                          headers=headers_test,
                                          follow_redirects=False,
                                          only_headers=True,
                                          replace_headers=True,
                                          cookies=False)
    if response.code == 406:
        platformtools.dialog_notification(
            "Stream no disponible", "No es posible conectar con la emisión")
        return []
    url += "ndex.m3u8|Referer=%s&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0" % url_post

    itemlist.append([".m3u8 [Sport365]", url])
    return itemlist