예제 #1
0
def findvideos(item):
    support.info()
    itemlist = []
    url = support.match(support.match(item).data.replace('"',
                                                         '"').replace(
                                                             '\\', ''),
                        patron=r'video_url"\s*:\s*"([^"]+)"').match
    playlist = support.match(url.replace('https', 'http'),
                             patron=r'\./([^.]+)').matches
    for res in playlist:
        itemlist.append(
            item.clone(title=support.config.get_localized_string(30137),
                       server='directo',
                       url=url.replace('playlist', res),
                       quality=res,
                       action='play'))
    return support.server(item, itemlist=itemlist)
예제 #2
0
def findvideos(item):
    logger.info("kod.casacinema findvideos")

    data = item.url if item.extra == "tvshow" else httptools.downloadpage(
        item.url, headers=headers).data

    html = httptools.downloadpage(data).data
    patron = '"http:\/\/shrink-service\.it\/[^\/]+\/[^\/]+\/([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(html)

    for url in matches:
        if url is not None:
            data = data
        else:
            continue

    return support.server(item, data=data)
예제 #3
0
def findvideos(item):
    if item.contentType != 'movie':
        links = support.match(item.url, patron=r'href="([^"]+)"').matches
    else:
        matchData = item.data if item.data else item
        links = support.match(
            matchData,
            patron=r'(?:SRC|href)="([^"]+)"',
            patronBlock=r'<div class="col-md-10">(.+?)<div class="ads">'
        ).matches
    data = ''
    from lib.unshortenit import unshorten_only
    for link in links:
        support.log('URL=', link)
        url, c = unshorten_only(link.replace('#', 'speedvideo.net'))
        data += url + '\n'
    return support.server(item, data)
예제 #4
0
def findvideos(item):

    itemlist = []
    for link in support.dooplay_get_links(item, host):
        if link['title'] != 'Guarda il trailer':
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     url=link['url'],
                     fulltitle=item.fulltitle,
                     thumbnail=item.thumbnail,
                     show=item.show,
                     quality=link['title'],
                     contentType=item.contentType,
                     folder=False))

    return support.server(item, itemlist=itemlist)
예제 #5
0
def findvideos(item):
    from core import jsontools
    itemlist = []
    matches = support.match(
        item,
        patron=
        r'<li id="player-option-[0-9]".*?data-type="([^"]+)" data-post="([^"]+)" data-nume="([^"]+)".*?<span class="title".*?>([^<>]+)</span>(?:<span class="server">([^<>]+))?'
    ).matches
    for Type, Post, Nume, Quality, Server in matches:
        dataAdmin = support.match(
            host + '/wp-json/dooplayer/v1/post/%s?type=%s&source=%s' %
            (Post, Type, Nume)).data
        js = jsontools.load(dataAdmin)
        link = js['embed_url'] if 'embed_url' in js else ''
        itemlist.append(
            item.clone(server=Server, quality=Quality, url=link,
                       action='play'))
    return support.server(item, itemlist=itemlist)
예제 #6
0
파일: aniplay.py 프로젝트: rrosajp/addon
def findvideos(item):
    logger.debug()

    url = '{}/api/{}/{}'.format(host, 'episode' if item.contentType == 'episode' else 'anime', item.id)

    json = httptools.downloadpage(url, CF=False ).json

    if json.get('episodes', []):
        json = httptools.downloadpage('{}/api/episode/{}'.format(host, json['episodes'][0]['id'])).json
    # logger.debug(json)

    videourl = json['videoUrl']

    itemlist = [item.clone(title=config.get_localized_string(30137),
                           url=videourl,
                           server='directo')]

    return support.server(item, itemlist=itemlist)
예제 #7
0
def findvideos(item):
    log()
    listurl = set()
    itemlist = []
    support.log("ITEMLIST: ", item)
    data = support.match(item.url, headers=headers).data
    check = support.match(
        data, patron=r'<div class="category-film">(.*?)</div>').match
    if 'sub' in check.lower():
        item.contentLanguage = 'Sub-ITA'
    support.log("CHECK : ", check)
    if 'anime' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        support.log('select = ### è una anime ###')
        try:
            return episodios(item)
        except:
            pass
    elif 'serie' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        return episodios(item)

    if 'protectlink' in data:
        urls = scrapertools.find_multiple_matches(
            data, r'<iframe src="[^=]+=(.*?)"')
        support.log("SONO QUI: ", urls)
        for url in urls:
            url = url.decode('base64')
            # tiro via l'ultimo carattere perchè non c'entra
            url, c = unshorten_only(url)
            if 'nodmca' in url:
                page = httptools.downloadpage(url, headers=headers).data
                url = '\t' + scrapertools.find_single_match(
                    page, '<meta name="og:url" content="([^=]+)">')
            if url:
                listurl.add(url)
    data += '\n'.join(listurl)

    itemlist = support.server(item,
                              data + item.otherLinks,
                              patronTag='Keywords:\s*<span>([^<]+)')
    return itemlist
예제 #8
0
def findvideos(item):
    log()
    listurl = set()
    # itemlist = []
    support.log("ITEMLIST: ", item)
    ##    if item.args == 'anime':
    ##        data = item.url
    ##    else:
    ##        data = httptools.downloadpage(item.url, headers=headers).data
    data = httptools.downloadpage(item.url, headers=headers).data

    data = re.sub('\n|\t', ' ', data)
    data = re.sub(r'>\s+<', '> <', data)
    check = scrapertools.find_single_match(
        data, r'<div class="category-film">\s+<h3>\s+(.*?)\s+</h3>\s+</div>')
    if 'sub' in check.lower():
        item.contentLanguage = 'Sub-ITA'
    support.log("CHECK : ", check)
    if 'anime' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        support.log('select = ### è una anime ###')
        return episodios(item)
    elif 'serie' in check.lower():
        item.contentType = 'tvshow'
        item.data = data
        return episodios(item)

    if 'protectlink' in data:
        urls = scrapertools.find_multiple_matches(
            data, r'<iframe src="[^=]+=(.*?)"')
        support.log("SONO QUI: ", urls)
        for url in urls:
            url = url.decode('base64')
            # tiro via l'ultimo carattere perchè non c'entra
            url, c = unshorten_only(url)
            if 'nodmca' in url:
                page = httptools.downloadpage(url, headers=headers).data
                url = '\t' + scrapertools.find_single_match(
                    page, '<meta name="og:url" content="([^=]+)">')
            if url:
                listurl.add(url)
    data += '\n'.join(listurl)
    return support.server(item, data)  #, headers=headers)
예제 #9
0
파일: animesaturn.py 프로젝트: iz8mbw/addon
def findvideos(item):
    support.log()
    itemlist = []
    urls = support.match(item, r'<a href="([^"]+)"><div class="downloadestreaming">', headers=headers)[0]
    if urls:
        links = support.match(item, r'(?:<source type="[^"]+"\s*src=|file:\s*)"([^"]+)"', url=urls[0], headers=headers)[0]
        for link in links:
            itemlist.append(
                support.Item(channel=item.channel,
                            action="play",
                            title='Diretto',
                            quality='',
                            url=link,
                            server='directo',
                            fulltitle=item.fulltitle,
                            show=item.show,
                            contentType=item.contentType,
                            folder=False))
    return support.server(item, itemlist=itemlist)
예제 #10
0
def findvideos(item):
    support.log(item)
    itemlist = []

    if item.args == 'updated':
        ep = support.match(item.fulltitle, r'(Episodio\s*\d+)')[0][0]
        item.url = support.re.sub(r'episodio-\d+-|oav-\d+-', '', item.url)
        if 'streaming' not in item.url:
            item.url = item.url.replace('sub-ita', 'sub-ita-streaming')
        item.url = support.match(
            item,
            r'<a href="([^"]+)"[^>]+>',
            ep + '(.*?)</tr>',
        )[0][0]

    urls = support.match(item.url, r'(episodio\d*.php.*)')[0]
    for url in urls:
        url = host + '/' + url
        headers['Referer'] = url
        data = support.match(item, headers=headers, url=url)[1]
        cookies = ""
        matches = support.re.compile(
            '(.%s.*?)\n' % host.replace("http://", "").replace("www.", ""),
            support.re.DOTALL).findall(support.config.get_cookie_data())
        for cookie in matches:
            cookies += cookie.split('\t')[5] + "=" + cookie.split(
                '\t')[6] + ";"

        headers['Cookie'] = cookies[:-1]

        url = support.match(data, r'<source src="([^"]+)"[^>]+>'
                            )[0][0] + '|' + support.urllib.urlencode(headers)
        itemlist.append(
            support.Item(channel=item.channel,
                         action="play",
                         title='diretto',
                         quality='',
                         url=url,
                         server='directo',
                         fulltitle=item.fulltitle,
                         show=item.show))

    return support.server(item, url, itemlist)
예제 #11
0
def findvid_serie(item):
    def load_vid_series(html, item, itemlist, blktxt):
        support.log('HTML', html)
        # Estrae i contenuti
        matches = support.match(
            html,
            patron=
            r'<a href="([^"]+)"[^=]+="_blank"[^>]+>(?!<!--)(.*?)(?:</a>|<img)'
        ).matches
        for url, server in matches:
            item = Item(channel=item.channel,
                        action="play",
                        title=server,
                        url=url,
                        server=server,
                        fulltitle=item.fulltitle,
                        show=item.show,
                        quality=blktxt,
                        contentType=item.contentType,
                        folder=False)
            if 'swzz' in item.url: item.url = support.swzz_get_url(item)
            itemlist.append(item)

    support.log()

    itemlist = []
    lnkblk = []
    lnkblkp = []

    data = re.sub(r'((?:<p>|<strong>)?[^\d]*\d*(?:&#215;|×)[0-9]+[^<]+)',
                  '', item.url)

    # Blocks with split
    blk = re.split(r"(?:>\s*)?([A-Za-z\s0-9]*):\s*<", data, re.S)
    blktxt = ""
    for b in blk:
        if b[0:3] == "a h" or b[0:4] == "<a h":
            load_vid_series("<%s>" % b, item, itemlist, blktxt)
            blktxt = ""
        elif len(b.strip()) > 1:
            blktxt = b.strip()

    return support.server(item, itemlist=itemlist)
예제 #12
0
def findvideos(item):
    itemlist = []
    matches = support.match(
        item,
        patron=
        r'<a href="([^"]+)[^>]+>Download[^>]+>[^>]+>[^>]+><strong class="quality">([^<]+)<'
    ).matches
    for url, quality in matches:
        itemlist.append(
            item.clone(caction="play",
                       url=unquote(
                           support.match(url,
                                         patron=[
                                             r'dest=([^"]+)"',
                                             r'/(http[^"]+)">Click'
                                         ]).match),
                       quality=quality))

    return support.server(item, itemlist=itemlist)
예제 #13
0
    def itemlistHook(itl):
        ret = []
        for it in itl:
            ep = scrapertools.find_single_match(it.title, r'(\d+x\d+)')
            if not ep and 'http' in it.data:  # stagione intera
                from lib import unshortenit
                data = unshortenit.findlinks(it.data)

                def get_ep(s):
                    srv_mod = __import__('servers.%s' % s.server, None, None,
                                         ["servers.%s" % s.server])
                    if hasattr(srv_mod, 'get_filename'):
                        title = srv_mod.get_filename(s.url)
                        ep = scrapertools.get_season_and_episode(title)
                        if ep:
                            if ep not in episodes:
                                episodes[ep] = []
                            episodes[ep].append(s)

                servers = support.server(item,
                                         data,
                                         CheckLinks=False,
                                         Download=False,
                                         Videolibrary=False)
                episodes = {}
                for s in servers:
                    get_ep(s)
                # ottengo l'episodio dal nome del file
                # with futures.ThreadPoolExecutor() as executor:
                #     for s in servers:
                #         executor.submit(get_ep, s)
                # logger.debug(it.contentLanguage)
                ret.extend([
                    it.clone(title=typo(ep, 'bold') +
                             typo(it.contentLanguage, '_ [] color kod bold'),
                             contentSeason=int(ep.split('x')[0]),
                             contentEpisodeNumber=int(ep.split('x')[1]),
                             servers=[srv.tourl() for srv in episodes[ep]])
                    for ep in episodes
                ])
            elif ep:
                ret.append(it)
        return sorted(ret, key=lambda i: i.title)
예제 #14
0
파일: animeforce.py 프로젝트: iz8mbw/addon
def findvideos(item):
    support.log(item)

    itemlist = []

    if item.episode:
        from lib import unshortenit
        url, c = unshortenit.unshorten(item.url)
        url = support.match(item,
                            r'<a href="([^"]+)"[^>]*>',
                            patronBlock=r'Episodio %s(.*?)</tr>' %
                            item.episode,
                            url=url)[0]
        item.url = url[0] if url else ''

    if 'vvvvid' in item.url:
        item.action = 'play'
        itemlist.append(item)

    if 'http' not in item.url:
        if '//' in item.url[:2]:
            item.url = 'http:' + item.url
        elif host not in item.url:
            item.url = host + item.url

    if 'adf.ly' in item.url:
        item.url = adfly.get_long_url(item.url)
    elif 'bit.ly' in item.url:
        item.url = support.httptools.downloadpage(
            item.url, only_headers=True,
            follow_redirects=False).headers.get("location")

    matches = support.match(item, r'button"><a href="([^"]+)"')[0]

    for video in matches:
        itemlist.append(
            support.Item(channel=item.channel,
                         action="play",
                         title='diretto',
                         url=video,
                         server='directo'))

    return support.server(item, itemlist=itemlist)
예제 #15
0
def findvideos(item):
    log()
    itemlist = []

    # data = httptools.downloadpage(item.url, headers=headers).data
    patron_block = '<div class="entry-content">(.*?)<footer class="entry-footer">'
    # bloque = scrapertools.find_single_match(data, patron_block)

    patron = r'<a href="([^"]+)">'
    # matches = re.compile(patron, re.DOTALL).findall(bloque)

    matches, data = support.match(item, patron, patron_block, headers)

    for scrapedurl in matches:
        if 'is.gd' in scrapedurl:
            resp = httptools.downloadpage(scrapedurl, follow_redirects=False)
            data += resp.headers.get("location", "") + '\n'

    return support.server(item, data)
예제 #16
0
def findvideos(item):
    video_urls = []
    headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14',
           'content-type': 'application/json;charset=UTF-8',
           'Referer': 'https://streamingcommunity.net'}
    data = support.match(item, headers=headers).data.replace('&quot;','"').replace('\\','')
    url = support.match(data, patron=r'video_url"\s*:\s*"([^"]+)"').match
    def videourls(res):
        newurl = '{}/{}'.format(url, res)
        if requests.head(newurl, headers=headers).status_code == 200:
            video_urls.append(["m3u8 {} [StreamingCommunity]".format(res), newurl])

    with futures.ThreadPoolExecutor() as executor:
        for res in ['480p', '720p', '1080p']:
            executor.submit(videourls, res) 
    if not video_urls: video_urls = [["m3u8 [StreamingCommunity]", url]]
    else: video_urls.sort(key=lambda url: int(support.match(url[0], patron=r'(\d+)p').match))
    itemlist = [item.clone(title = channeltools.get_channel_parameters(item.channel)['title'], server='directo', video_urls=video_urls, thumbnail=channeltools.get_channel_parameters(item.channel)["thumbnail"], forcethumb=True)]
    return support.server(item, itemlist=itemlist)
예제 #17
0
def findvideos(item):
    support.info()
    itemlist = []
    try:
        data = support.match(item.url, headers=headers).data
        json_object = jsontools.load(data)
        array_index=0
        if item.contentType!='movie':
            array_index=int(item.extra)
        for video in json_object['hydra:member'][array_index]['playlist']['videos']:
            itemlist.append(
                item.clone(action="play",
                           title='Direct',
                           url=video['src'],
                           server='directo',
                           quality=str(video['size'])+ 'p',
                           folder=False))
    except:
        pass
    return support.server(item, itemlist=itemlist)
예제 #18
0
def findvideos(item):
    log()
    data = httptools.downloadpage(item.url,
                                  headers=headers,
                                  ignore_response_code=True).data
    data = re.sub(r'\n|\t|\s+', ' ', data)
    # recupero il blocco contenente i link
    blocco = scrapertools.find_single_match(
        data, r'<div class="entry">([\s\S.]*?)<div class="post').replace(
            '..:: Episodio ', 'Episodio ').strip()
    matches = scrapertools.find_multiple_matches(blocco, '(S(\d*)E(\d*))\s')
    if len(matches) > 0:
        for fullseasonepisode, season, episode in matches:
            blocco = blocco.replace(fullseasonepisode + ' ',
                                    'Episodio ' + episode + ' ')

    blocco = blocco.replace('Episodio ', '..:: Episodio ')

    episodio = item.infoLabels['episode']
    patron = r'\.\.:: Episodio %s([\s\S]*?)(<div class="post|..:: Episodio)' % episodio
    log(patron)
    log(blocco)

    matches = scrapertools.find_multiple_matches(blocco, patron)
    if len(matches):
        data = matches[0][0]

    patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p(?:[0-9]*)/([^"]+))"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for keeplinks, id in matches:
        headers2 = [[
            'Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' +
            str(int(time.time()))
        ], ['Referer', keeplinks]]

        html = httptools.downloadpage(keeplinks, headers=headers2).data
        data += str(
            scrapertools.find_multiple_matches(
                html, '</lable><a href="([^"]+)" target="_blank"'))

    return support.server(item, data=data)
예제 #19
0
def findvideos(item):
    support.log('findvideos ->', item)
    itemlist = []
    patronBlock = '<div class="entry-content">(?P<block>.*)<footer class="entry-footer">'
    patron = r'<a href="([^"]+)">'
    html = support.match(item,
                         patron=patron,
                         patronBlock=patronBlock,
                         headers=headers)
    matches = html.matches
    data = html.data

    if item.args != 'episodios':
        item.infoLabels['mediatype'] = 'episode'
    for scrapedurl in matches:
        if 'is.gd' in scrapedurl:
            resp = httptools.downloadpage(scrapedurl, follow_redirects=False)
            data += resp.headers.get("location", "") + '\n'

    itemlist += support.server(item, data)

    data = support.match(item.url).data
    patron = r'>Posted in <a href="https?://fastsubita.com/serietv/([^/]+)/(?:[^"]+)?"'
    series = scrapertools.find_single_match(data, patron)
    titles = support.typo(series.upper().replace('-', ' '), 'bold color kod')
    goseries = support.typo("Vai alla Serie:", ' bold color kod')
    itemlist.append(
        Item(
            channel=item.channel,
            title=goseries + titles,
            fulltitle=titles,
            show=series,
            contentType='tvshow',
            contentSerieName=series,
            url=host + "/serietv/" + series,
            action='episodios',
            contentTitle=titles,
            plot="Vai alla Serie " + titles + " con tutte le puntate",
        ))

    return itemlist
예제 #20
0
def findvideos(item):
    support.log()
    if item.data:
        data = item.data
    else:
        matches = support.match(item,
                                '(?:str="([^"]+)"|iframe src="([^"]+)")')[0]
        data = ''
        if matches:
            for match in matches:
                try:
                    data += str(
                        jsfunctions.unescape(support.re.sub('@|g', '%',
                                                            match)))
                except:
                    data += ''
                data += str(match)
        else:
            data = ''

    return support.server(item, data)
예제 #21
0
def findvideos(item):
    support.info()
    itemlist = []
    if item.url.endswith('json'):
        json = current_session.get(item.url).json()

        if 'first_item_path' in json:
            url = current_session.get(getUrl(
                json['first_item_path'])).json()['video']['content_url']
        else:
            url = json['video']['content_url']
    else:
        url = item.url

    itemlist.append(
        item.clone(server='directo',
                   title=support.config.get_localized_string(30137),
                   fanart=item.json,
                   url=getUrl(url),
                   action='play'))
    return support.server(item, itemlist=itemlist, Download=False)
예제 #22
0
def findvideos(item):
    logger.debug()
    item.contentTitle = item.fulltitle
    itemlist = []
    if 'links' in item.url:
        json = item.url['links']
    else:
        json = item.url
    # support.dbg()
    for option in json:
        extra = set_extra_values(item, option, item.path)

        itemlist.append(
            item.clone(url=option['url'],
                       action='play',
                       quality=extra.quality,
                       contentLanguage=extra.language,
                       extraInfo=extra.info))

    item.url = ''  # do not pass referer
    return support.server(item, itemlist=itemlist)
예제 #23
0
파일: polpotv.py 프로젝트: iz8mbw/addon
def findvideos(item):
    support.log()
    itemlist = []
    try:
        data = httptools.downloadpage(item.url, headers=headers).data
        json_object = jsontools.load(data)
        for video in json_object['hydra:member'][0]['playlist']['videos']:
            # data = httptools.downloadpage(video['src'], headers={'Origin': host},follow_redirects=None).data
            # patron = 'href="([^"]+)"'
            # video_link = scrapertools.find_single_match(data, patron)
            itemlist.append(
                Item(channel=item.channel,
                     action="play",
                     thumbnail=item.thumbnail,
                     url=video['src'],
                     server='directo',
                     quality=str(video['size']) + 'p',
                     folder=False))
    except:
        pass
    return support.server(item, itemlist=itemlist)
예제 #24
0
파일: film4k.py 프로젝트: whiplash78/addon
def findvideos(item):
    itemlist = []
    if item.contentType == 'episode':
        linkHead = support.httptools.downloadpage(
            item.url, only_headers=True).headers['link']
        epId = support.scrapertools.find_single_match(linkHead,
                                                      r'\?p=([0-9]+)>')
        for link in support.dooplay_get_links(
                item, host, paramList=[['tv', epId, 1, 'title', 'server']]):
            itemlist.append(item.clone(action="play", url=link['url']))
    else:
        for link, quality in support.match(
                item.url,
                patron="(" + host +
                """links/[^"]+).*?class="quality">([^<]+)""").matches:
            srv = support.servertools.find_video_items(
                data=support.httptools.downloadpage(link).data)
            for s in srv:
                s.quality = quality
            itemlist.extend(srv)
    return support.server(item, itemlist=itemlist)
예제 #25
0
def findvideos(item):
    logger.debug()
    logger.debug()
    item.manifest = 'hls'
    mgid = support.match(item.url, patron=r'uri":"([^"]+)"').match
    url = 'https://media.mtvnservices.com/pmt/e1/access/index.html?uri=' + mgid + '&configtype=edge&ref=' + item.url
    ID, rootUrl = support.match(url, patron=[r'"id":"([^"]+)",',r'brightcove_mediagenRootURL":"([^"]+)"']).matches
    item.url = jsontools.load(support.match(rootUrl.replace('&device={device}','').format(uri = ID)).data)['package']['video']['item'][0]['rendition'][0]['src']
    return support.server(item, itemlist=[item.clone(title='Paramount', server='directo', action='play')], Download=False, Videolibrary=False)


# def play(item):
#     logger.debug()
#     item.manifest = 'hls'
#     mgid = support.match(item.url, patron=r'uri":"([^"]+)"').match
#     url = 'https://media.mtvnservices.com/pmt/e1/access/index.html?uri=' + mgid + '&configtype=edge&ref=' + item.url
#     ID, rootUrl = support.match(url, patron=[r'"id":"([^"]+)",',r'brightcove_mediagenRootURL":"([^"]+)"']).matches
#     item.url = jsontools.load(support.match(rootUrl.replace('&device={device}','').format(uri = ID)).data)['package']['video']['item'][0]['rendition'][0]['src']


#     return [item]
예제 #26
0
def findvideos(item):
    support.info()
    if item.data:
        data = item.data
    else:
        matches = support.match(
            item, patron=r'<iframe.*?src="(?P<url>[^"]+)"').matches
        data = ''
        if matches:
            for match in matches:
                try:
                    data += str(
                        jsfunctions.unescape(support.re.sub('@|g', '%',
                                                            match)))
                except:
                    data += ''
                data += str(match)
        else:
            data = ''

    return support.server(item, data)
예제 #27
0
def findvideos(item):
    support.info()
    itemlist = []
    page_data = ''
    titles =['Primario', 'Secondario', 'Alternativo Primario', 'Alternativo Secondario']
    url = support.match(item, patron=r'<a href="([^"]+)">[^>]+>[^>]+>G', headers=headers).match
    urls = [url, url+'&extra=1', url+'&s=alt', url+'&s=alt&extra=1']
    links = []
    for i, url in enumerate(urls):
        data = support.match(url, headers=headers).data
        if not '&s' in url:
            link = support.match(data, patron=r'(?:<source type="[^"]+"\s*src=|file:\s*)"([^"]+)"', headers=headers).match
        else:
            link = support.match(data, headers=headers, patron=r'file:\s*"([^"]+)"').match
        if not link:
            page_data += data
        if link and link not in links:
            links.append(link)
            link += '|Referer=' + item.url
            itemlist.append(item.clone(action="play", title=titles[i], url=link, server='directo'))
    return support.server(item, data=data, itemlist=itemlist)
예제 #28
0
def findvideos(item):
    itemlist=[]
    if item.args == 'last':
        match = support.match(item, patron=r'href="(?P<url>[^"]+)"[^>]+><strong>DOWNLOAD &amp; STREAMING</strong>').match
        if match:
            patronBlock = r'<h6>Episodio</h6>(?P<block>.*?)(?:<!--|</table>)'
            patron = r'<a href="http://link\.animetubeita\.com/2361078/(?P<url>[^"]+)"'
            match = support.match(match, patron=patron, patronBlock=patronBlock, headers=headers).match
        else: return itemlist

        if match: item.url = match[-1]
        else: return itemlist
    data = support.httptools.downloadpage(item.url, headers=headers).data
    cookies = ""
    matches = re.compile('(.animetubeita.com.*?)\n', re.DOTALL).findall(support.config.get_cookie_data())
    for cookie in matches:
        name = cookie.split('\t')[5]
        value = cookie.split('\t')[6]
        cookies += name + "=" + value + ";"

    headers['Referer'] =  item.url
    headers['Cookie'] = cookies[:-1]

    url = support.scrapertools.find_single_match(data, """<source src="([^"]+)" type='video/mp4'>""")
    if not url: url = support.scrapertools.find_single_match(data, 'file: "([^"]+)"')
    if url:
        url += '|' + urllib.urlencode(headers)
        itemlist.append(
            support.Item(channel=item.channel,
                        action="play",
                        title='diretto',
                        server='directo',
                        quality='',
                        url=url,
                        thumbnail=item.thumbnail,
                        fulltitle=item.fulltitle,
                        show=item.show,
                        contentType=item.contentType,
                        folder=False))
    return support.server(item, itemlist=itemlist)
예제 #29
0
    def itemlistHook(itl):
        ret = []
        for it in itl:
            if it.stagione:  # stagione intera

                def get_ep(s):
                    srv_mod = __import__('servers.%s' % s.server, None, None,
                                         ["servers.%s" % s.server])
                    if hasattr(srv_mod, 'get_filename'):
                        title = srv_mod.get_filename(s.url)
                        ep = scrapertools.get_season_and_episode(title)
                        if ep:
                            if ep not in episodes:
                                episodes[ep] = []
                            episodes[ep].append(s)

                servers = support.server(item,
                                         it.stagione,
                                         AutoPlay=False,
                                         CheckLinks=False,
                                         Download=False,
                                         Videolibrary=False)
                episodes = {}

                # ottengo l'episodio dal nome del file
                with futures.ThreadPoolExecutor() as executor:
                    for s in servers:
                        executor.submit(get_ep, s)
                ret.extend([
                    it.clone(title=ep +
                             typo(it.contentLanguage, '_ [] color kod'),
                             contentSeason=int(ep.split('x')[0]),
                             contentEpisodeNumber=int(ep.split('x')[1]),
                             servers=[srv.tourl() for srv in episodes[ep]])
                    for ep in episodes
                ])
            else:
                ret.append(it)
        return sorted(ret, key=lambda i: i.title)
예제 #30
0
def findvideos(item):
    log()
    itemlist = []

    matches, data = support.match(
        item,
        r'class="tab.*?data-name="([0-9]+)">([^<]+)</span',
        headers=headers)
    videoData = ''

    for serverid, servername in matches:
        block = scrapertoolsV2.find_multiple_matches(
            data, 'data-id="' + serverid + '">(.*?)<div class="server')
        log('ITEM= ', item)
        id = scrapertoolsV2.find_single_match(
            str(block),
            r'<a data-id="([^"]+)" data-base="' + item.number + '"')
        if id:
            dataJson = httptools.downloadpage(
                '%s/ajax/episode/info?id=%s&server=%s&ts=%s' %
                (host, id, serverid, int(time.time())),
                headers=[['x-requested-with', 'XMLHttpRequest']]).data
            json = jsontools.load(dataJson)
            videoData += '\n' + json['grabber']

            if serverid == '28':
                itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         title='diretto',
                         quality='',
                         url=json['grabber'],
                         server='directo',
                         show=item.show,
                         contentType=item.contentType,
                         folder=False))

    return support.server(item, videoData, itemlist)