コード例 #1
0
def calculate_stream(content, origin, referer):
    # get all links
    content = content.replace('//immortal.hydrax.net',
                              'http://immortal.hydrax.net')
    reg = re.findall('(http://immortal.hydrax.net/.*/.*/(.*)/(.*))/.*\n',
                     content)
    if reg:
        ms = list()
        for i in range(len(reg)):
            link = 'http://immortal.hydrax.net/%s/%s' % (reg[i][1], reg[i][2])
            if link not in ms:
                ms.append(link)
            content = content.replace(reg[i][0], link)
    else:
        ms = re.findall('(http://immortal.hydrax.net/.*/.*)/.*\n', content)
        ms = list(dict.fromkeys(ms))

    arequest = AsyncRequest()
    results = arequest.get(ms, headers={'Origin': origin, 'Referer': referer})

    max_targetduration = 12
    play_list = "#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-PLAYLIST-TYPE:VOD\n#EXT-X-TARGETDURATION:12\n#EXT-X-MEDIA-SEQUENCE:0\n"
    hash = re.search("(#EXT-X-KEY.*?)\n", content)
    if hash:
        play_list += hash.group(0)

    for i in range(len(ms)):
        link = ms[i]
        slashlink = link.replace('-', '\\-')
        slashlink = slashlink.replace('*', '\\*')
        slashlink = slashlink.replace('?', '\\?')

        duration = 0
        lengthbyte = 0
        startbyte = 999999999

        segments = re.findall(
            r"(#EXTINF:([0-9]*\.?[0-9]+),\n#EXT-X-BYTERANGE:([0-9]+)@([0-9]+)(?:(?!#EXTINF).)*"
            + slashlink + ").*?\n", content, re.DOTALL)

        for segment in segments:
            duration += float(segment[1])
            startbyte = int(segment[3]) < startbyte and int(
                segment[3]) or startbyte
            lengthbyte += int(segment[2])

        play_list += "#EXTINF:%s,\n" % duration
        play_list += "#EXT-X-BYTERANGE:%s@%s\n" % (lengthbyte, startbyte)
        play_list += "%s\n" % json.loads(results[i])['url']

        if duration > max_targetduration:
            max_targetduration = duration

    play_list = play_list.replace(
        "TARGETDURATION:12",
        "TARGETDURATION:" + str(int(math.ceil(max_targetduration))))
    play_list += "#EXT-X-ENDLIST\n"

    url = PasteBin().dpaste(play_list, name=referer, expire=60)
    return url
コード例 #2
0
    def get(self, response, origin_url=""):
        self.found_links = []
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }
        soup = BeautifulSoup(response, "html.parser")
        posts = soup.select(
            "ol.messageList > li.message > div.messageInfo > div.messageContent > article > blockquote"
        )
        for post in posts:
            self.extract_links(post)

        if len(self.found_links) > 0:
            arequest = AsyncRequest()
            results = arequest.get(self.found_links)
            for idx, result in enumerate(results):
                try:
                    name, size = FShareVN.get_info(content=result)
                except:
                    print('Link die %s' % self.found_links[idx])
                    continue

                if name:
                    movie['links'].append({
                        'link':
                        self.found_links[idx],
                        'title':
                        '[%s] %s' % (size, name),
                        'intro':
                        name,
                        'type':
                        'Unknown',
                        'resolve':
                        False,
                        'isFolder':
                        FShareVN.is_folder(self.found_links[idx]),
                        'originUrl':
                        origin_url
                    })
                else:
                    movie['links'].append({
                        'link':
                        self.found_links[idx],
                        'title':
                        self.found_links[idx],
                        'type':
                        'Unknown',
                        'resolve':
                        False,
                        'isFolder':
                        FShareVN.is_folder(self.found_links[idx]),
                        'originUrl':
                        origin_url
                    })

        return movie
コード例 #3
0
    def get_link(self, response, domain, originUrl, request):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }
        # get all movie links
        soup = BeautifulSoup(response, "html.parser")
        servers = soup.select('ul.list-episode > li.episode > a.episode-link')

        # find subtitle
        subtitle = None
        match_sub = re.search(r'window.subback\s?=\s?"(.*?)";', response)
        if match_sub:
            subtitle = match_sub.group(1)

        jobs = []
        links = []
        for server in servers:
            ep_id = server.get('data-ep')
            url = "%s/index.php" % domain
            params = {'ep': ep_id}
            jobs.append({
                'url': url,
                'params': params,
                'parser': Parser.extract_link
            })

        AsyncRequest(request=request).post(jobs, args=links)
        jobs = []
        movie_links = []

        for link in links:
            url = "%s/js/vkphp/plugins/gkpluginsphp.php" % domain
            jobs.append({
                'url': url,
                'params': {
                    'link': link
                },
                'parser': Parser.parse_link,
                'responseHeader': True
            })

        AsyncRequest(request=request).post(jobs, args=movie_links)

        for link in movie_links:
            movie['links'].append({
                'link': link[0],
                'title': 'Link %s' % link[1],
                'type': link[1],
                'resolve': False,
                'subtitle': subtitle,
                'originUrl': originUrl
            })

        return movie
コード例 #4
0
def get_stream(url, header, base_path=None, action="HEAD"):
    req = Request()
    r = req.get(url, headers=header)

    if not base_path:
        base_url = urlparse(url)
        base_url = base_url.scheme + '://' + base_url.netloc
    else:
        base_url= base_path

    if re.search('EXT-X-STREAM-INF', r):
        ad_url = get_adaptive_link(r)
        if 'http' not in ad_url:
            ad_url = base_url + ad_url
        r = req.get(ad_url, headers=header)

    playlist = ""
    links = []
    is_redirect = True
    lines = r.splitlines()
    for line in lines:
        if len(line) > 0:
            # guess link
            if '#' not in line[0]:
                if 'http' in line:
                    path = line
                elif '//' in line[0:2]:
                    path = "{}{}".format("https:", line)
                elif '/' in line[0]:
                    path = "{}/{}".format(base_url, line)
                else:
                    path = "{}/{}".format(base_url, line)

                if 'vdacdn.com' in path:
                    is_redirect = False
                    path = path.replace('https://', 'http://')

                if 'cdnplay.xyz' in path:
                    is_redirect = False

                # path += "|%s" % urlencode(header)
                links.append({'url': path, 'parser': parse_link, 'responseHeader': True})
            else:
                path = line
            playlist += '%s\n' % path

    if is_redirect and len(playlist) > 0:
        arequest = AsyncRequest(request=req)
        results = arequest.get(links, redirect=False, headers=header, verify=False)
        for i in range(len(links)):
            playlist = playlist.replace(links[i].get('url'), results[i])

    url = PasteBin().dpaste(playlist, name='adaptivestream', expire=60)
    return url
コード例 #5
0
ファイル: phimmoi.py プロジェクト: chiennv97/bimozie
def get_link_deprecated(url, originURL):
    req = Request()
    response = req.get(url)

    # found playlist
    if re.search('EXT-X-STREAM-INF', response):
        resolutions = re.findall('RESOLUTION=\d+x(\d+)', response)
        matches = re.findall('(http.*)\r', response)
        if '1080' in resolutions:
            idx = next(
                (resolutions.index(i) for i in resolutions if '1080' == i), -1)
            url = matches[idx]
        elif '720' in resolutions:
            idx = next(
                (resolutions.index(i) for i in resolutions if '720' == i), -1)
            url = matches[idx]
        elif '480' in resolutions:
            idx = next(
                (resolutions.index(i) for i in resolutions if '480' == i), -1)
            url = matches[idx]

        response = Request().get(url,
                                 headers={'origin': 'http://www.phimmoi.net'})

    links = re.findall('(https?://(?!so-trym).*)\r', response)
    if links:
        media_type = 'hls4'
        arequest = AsyncRequest(request=req, retry=2)
        results = arequest.head(links,
                                headers={
                                    'origin': 'http://www.phimmoi.net',
                                    'referer': originURL
                                },
                                redirect=False)

        for i in range(len(links)):
            try:
                response = response.replace(links[i],
                                            results[i].headers['location'])
            except:
                print(links[i], results[i].headers)
    else:
        media_type = 'hls3'

    stream_url = PasteBin().dpaste(response, name=url, expire=60)
    playlist = "#EXTM3U\n#EXT-X-VERSION:3\n"
    playlist += "#EXT-X-STREAM-INF:BANDWIDTH=3998000,RESOLUTION=9999x9999\n"
    playlist += "%s\n" % stream_url
    url = PasteBin().dpaste(playlist, name=url, expire=60)
    if 'hls' == media_type:
        url += '|referer=' + urllib.quote_plus(originURL)

    return url, media_type
コード例 #6
0
    def get_link(self, response, domain):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }

        soup = BeautifulSoup(response, "html.parser")
        servers = soup.select("div#ploption > a.btn-sv")
        jobs = [{'url': '%s%s' % (domain, i.get('data-href')), 'parser': Parser.extract_link} for i in servers]

        links = []
        AsyncRequest().get(jobs, args=links)

        if len(links) > 1:
            try:
                links = sorted(links, key=lambda elem: int(re.search(r'(\d+)', elem[1]).group(1)), reverse=True)
            except Exception as e: print(e)

        for link in links:
            movie['links'].append({
                'link': link[0],
                'title': 'Link %s' % link[1],
                'type': link[1],
                'resolve': False
            })

        return movie
コード例 #7
0
    def get_link(self, response, domain, originURL, request):
        helper.log(
            "***********************Get Movie Link*****************************"
        )
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }

        # get server list
        soup = BeautifulSoup(response, "html.parser")
        servers = soup.select('div#clicksv > span.btn')
        m_id = re.search(r'var id_ep = (.*)', response).group(1)

        jobs = [{
            'url': '%s/player/player.php' % domain,
            'parser': Parser.extract_link,
            'params': {
                'ID': m_id,
                'SV': i.get('id').replace('sv', '')
            }
        } for i in servers]
        group_links = AsyncRequest(request=request).post(jobs)
        for link in group_links:
            if link:
                movie['links'].append({
                    'link': link,
                    'title': 'Link %s' % 'HD',
                    'type': 'Unknown',
                    'originUrl': originURL,
                    'resolve': False
                })

        return movie
コード例 #8
0
    def get_link(self, response, domain, movie_url):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }

        soup = BeautifulSoup(response, "html.parser")
        servers = soup.select("div#ploption > a.btn-sv")

        jobs = [{
            'url': '%s%s' % (domain, i.get('data-href')),
            'parser': Parser.extract_link
        } for i in servers]
        group_links = AsyncRequest().get(jobs)

        for links in group_links:
            for link in links:
                movie['links'].append({
                    'link': link[0],
                    'title': 'Link %s' % link[1],
                    'type': link[1],
                    'originUrl': movie_url,
                    'resolve': False
                })

        return movie
コード例 #9
0
    def get_hydrax_stream(self, stream):
        txt = "#EXTM3U\n#EXT-X-VERSION:4\n#EXT-X-PLAYLIST-TYPE:VOD\n#EXT-X-TARGETDURATION:" + stream[
            'duration'] + "\n#EXT-X-MEDIA-SEQUENCE:0\n"
        links = []
        if stream['type'] == 2:
            i, j = 0, 0
            for ranges in stream['multiRange']:
                p = 0
                for xrange in ranges:
                    txt += "#EXTINF:%s,\n" % stream['extinf'][i]
                    txt += "#EXT-X-BYTERANGE:%s\n" % xrange
                    g, y = xrange.split('@')
                    g = int(g)
                    y = int(y)
                    f = i > 0 and p + 1 or y
                    p = y and f + g - 1 or g - 1
                    part = '%s-%s.js' % (f, p)

                    url = "%s/%s/%s/%s/%s/%s" % (
                        'http://immortal.hydrax.net',
                        stream['id'],
                        stream['range'][j],
                        stream['expired'],
                        stream['multiData'][j]['file'],
                        part
                    )

                    links.append(url)
                    txt += "%s\n" % url
                    i += 1
                j += 1

        txt += "#EXT-X-ENDLIST\n"

        arequest = AsyncRequest()
        results = arequest.head(links, headers={
            'origin': 'http://www.phimmoi.net'
        })
        for i in range(len(links)):
            try:
                txt.replace(links[i], results[i].headers['location'])
            except:
                print(links[i])
                # print(results[i].headers)

        url = PasteBin().dpaste(txt, name=stream['id'], expire=60)
        return url
コード例 #10
0
def get_stream(url, header):
    req = Request()
    r = req.get(url, headers=header)

    base_url = urlparse(url)
    base_url = base_url.scheme + '://' + base_url.netloc

    if re.search('EXT-X-STREAM-INF', r):
        r = get_adaptive_link(r, req, base_url, header)

    playlist = ""
    links = []
    is_redirect = True
    for line in r.splitlines():
        if len(line) > 0:
            # guess link
            if line[0] not in '#':
                if re.match('http', line):
                    path = line
                else:
                    path = "{}{}".format(base_url, line)

                if 'vdacdn.com' in path:
                    is_redirect = False
                    path = path.replace('https://', 'http://')

                # path += "|%s" % urlencode(header)
                links.append({
                    'url': path,
                    'parser': parse_link,
                    'responseHeader': True
                })
            else:
                path = line

            playlist += '%s\n' % path

    if is_redirect and len(playlist) > 0:
        arequest = AsyncRequest(request=req)
        results = arequest.get(links, redirect=False, headers=header)
        for i in range(len(links)):
            playlist = playlist.replace(links[i].get('url'), results[i])

    url = PasteBin().dpaste(playlist, name='dongphim', expire=60)
    return url
コード例 #11
0
    def get_stream(self, url):
        req = Request()
        r = req.get(url)
        str = ""
        links = []
        for line in r.splitlines():
            if len(line) > 0:
                if re.match('http', line):
                    links.append(line)
                str += '%s\n' % line

        arequest = AsyncRequest(request=req)
        results = arequest.head(links)
        for i in range(len(links)):
            str = str.replace(links[i], results[i].headers['Location '])

        url = PasteBin().dpaste(str, name='animiehay', expire=60)
        return url
コード例 #12
0
    def get_stream(self, url):
        req = Request()
        r = req.get(url)
        str = ""
        links = []
        for line in r.splitlines():
            if len(line) > 0:
                if re.match('http', line):
                    links.append(line)
                str += '%s\n' % line

        arequest = AsyncRequest(request=req)
        results = arequest.head(links)
        for i in range(len(links)):
            try:
                str = str.replace(links[i], results[i].headers['Location '])
            except:
                pass

        url = PasteBin().dpaste(str, name='animiehay', expire=60)
        return url
コード例 #13
0
    def get_hls_playlist_stream(self, url):
        req = Request()
        response = req.get(url)

        links = re.findall('(https?://(?!so-trym).*)\r', response)
        if links:
            arequest = AsyncRequest(request=req)
            results = arequest.head(links, headers={
                'origin': 'http://www.phimmoi.net',
                'referer': self.originURL
            }, redirect=False)

            for i in range(len(links)):
                response = response.replace(links[i], results[i].headers['location'])

        links = re.findall('(http://so-trym.*)\r', response)
        if links:
            for i in range(len(links)):
                url = '%s|referer=%s' % (links[i], self.originURL)
                response = response.replace(links[i], url)

        url = PasteBin().dpaste(response, name=url, expire=60)
        return url
コード例 #14
0
    def get(self, response, page=1, domain=''):

        channel = {
            'page': page,
            'page_patten': None,
            'movies': []
        }

        soup = BeautifulSoup(response, "html.parser")
        # get total page
        last_page = soup.select_one('div.PageNav')
        helper.log("*********************** Get pages ")
        if last_page is not None:
            channel['page'] = int(last_page.get('data-last'))

        jobs = []

        for movie in soup.select('li.discussionListItem'):
            # if 'sticky' in movie.get('class'): continue
            tag = movie.select_one('div.listBlock.main a.PreviewTooltip')
            try:
                title = py2_encode(tag.text.strip())
                thumb = None

                movie = {
                    'id': tag.get('href'),
                    'label': title,
                    'title': title,
                    'intro': title,
                    'realtitle': title,
                    'thumb': thumb,
                    'type': None
                }

                if 'true' in helper.getSetting('hdvietnam.extra'):
                    jobs.append({
                        'url': '%s/%s' % (domain, movie['id']),
                        'parser': Parser.parse_post,
                        'args': movie
                    })
                else:
                    channel['movies'].append(movie)
            except:
                helper.log(tag)

        if 'true' in helper.getSetting('hdvietnam.extra'):
            channel['movies'] = AsyncRequest(thread=10).get(jobs)
        return channel
コード例 #15
0
ファイル: movie.py プロジェクト: chiennv97/bimozie
    def get(self, response, skipEps=False):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }
        soup = BeautifulSoup(response, "html.parser")
        # get all server list
        servers = soup.select("ul.ipsDataList > div#extraFields > li")

        # get subtitle link
        subtitle = None
        try:
            subtitle = soup.select_one(
                "ul.ipsDataList > div#extraFields > li a.ipsType_success").get(
                    'href')
        except:
            pass

        server = servers[-1:][0]
        items = server.select('> span.ipsDataItem_main a')

        links = []
        for link in items:
            if link and 'fshare' in link.get('href'):
                links.append(link.get('href'))

        if len(links) > 0:
            results = AsyncRequest().get(links)
            for idx, result in enumerate(results):
                try:
                    name, size = FShareVN.get_info(content=result)
                    movie['links'].append({
                        'link': links[idx],
                        'title': '[%s] %s' % (size, name),
                        'type': 'Unknown',
                        'subtitle': subtitle,
                        'resolve': False
                    })
                except:
                    print('Link die %s' % links[idx])
                    continue

        return movie
コード例 #16
0
    def get(self, response, skipEps=False):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }
        soup = BeautifulSoup(response, "html.parser")
        # get all server list
        servers = soup.select("ul.ipsDataList > div#extraFields > li")

        # get subtitle link
        subtitle = None
        try:
            subtitle = soup.select_one("ul.ipsDataList > div#extraFields > li a.ipsType_success").get('href')
        except:
            pass

        server = servers[-1:][0]
        items = server.select('span.ipsDataItem_main a')

        links = []
        for link in items:
            f_url = 'https://www.fshare.vn/api/v3/files/folder?linkcode=%s' % FShareVN.extract_code(link.get('href'))
            if link and 'fshare' in link.get('href'): links.append(f_url)

        if len(links) > 0:
            results = AsyncRequest().get(links)
            for idx, result in enumerate(results):
                try:
                    link = items[idx].get('href')
                    name, size = FShareVN.get_asset_info(content=result)
                    movie['links'].append({
                        'link': link,
                        'title': '[%s] %s' % (size, name),
                        'type': 'Fshare',
                        'isFolder': FShareVN.is_folder(link),
                        'subtitle': subtitle,
                        'resolve': False
                    })
                except:
                    print('Link die %s' % items[idx].get('href'))
                    continue

        return movie
コード例 #17
0
    def get_link(self, response, domain, request, originURL):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }

        self.originURL = originURL
        response = re.search(r'"source":(\[.*?\])', response)
        if response:
            response = json.loads(response.group(1), encoding='utf-8')
            if len(response) > 0:
                jobs = []
                for file in response:
                    if 'HDX' not in file['namesv']:
                        url = CryptoAES().decrypt(file['link'], file['key'])
                        if 'stream' in file['typeplay']:
                            jobs.append({
                                'url': url,
                                'parser': Parser.extract_link
                            })
                        else:
                            movie['links'].append({
                                'link':
                                url,
                                'title':
                                'Link %s' % file['namesv'],
                                'type':
                                file['namesv'],
                                'resolve':
                                False,
                                'originUrl':
                                originURL
                            })

                AsyncRequest(request=request, retry=50, thread=1).get(
                    jobs,
                    headers={
                        # 'origin': 'https://xomphimhay.com',
                        'referer': originURL
                    },
                    args=(movie['links'], originURL))
        return movie
コード例 #18
0
    def get_link(self, response, domain, originUrl):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }
        # get all movie links
        soup = BeautifulSoup(response, "html.parser")
        servers = soup.select(
            'div.list-server > div.server-item > div.option > span')
        movie_id = re.search("MovieID\s?=\s?'(.*?)';", response).group(1)

        ep_id = soup.select_one('ul.list-episode > li > a.current')
        if ep_id:
            ep_id = ep_id.get('data-id')
        else:
            ep_id = re.search("EpisodeID\s?=\s?'(.*?)',", response).group(1)

        jobs = []
        links = []
        for server in servers:
            sv_id = server.get('data-index')
            url = "%s/ajax/player/" % domain
            params = {'id': movie_id, 'ep': ep_id, 'sv': sv_id}
            jobs.append({
                'url': url,
                'params': params,
                'parser': Parser.extract_link
            })

        AsyncRequest().post(jobs, args=links)

        for link in links:
            movie['links'].append({
                'link': link[0],
                'title': 'Link %s' % link[1],
                'type': link[1],
                'resolve': False,
                'originUrl': originUrl
            })

        return movie
コード例 #19
0
    def get_link(self, data, domain):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }

        # get all server list
        # data = postid|serverid|epid|nounce
        params = {
            'action': 'halim_get_listsv',
            'episode': data[2],
            'server': data[1],
            'postid': data[0],
            'nonce': data[3],
        }

        jobs = []
        url = "%s/wp-admin/admin-ajax.php" % domain
        response = Request().post(url, params)

        soup = BeautifulSoup(response, "html.parser")
        servers = soup.select("span")
        for server in servers:
            print(server)
            params = {
                'action': 'halim_ajax_player',
                # 'action': 'halim_play_listsv',
                'episode': data[2],
                'server': data[1],
                'postid': data[0],
                'nonce': data[3],
                # 'ep_link': server.get('data-url')
            }
            jobs.append({
                'url': url,
                'params': params,
                'parser': Parser.extract_link
            })
        AsyncRequest().post(jobs, args=movie['links'])

        return movie
コード例 #20
0
    def get_link(self, response, originUrl, domain, request):
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }

        sources = re.search('"sourceLinks": (\[.*\]),', response)
        if sources:
            sources = sources.group(1)
            for source in json.loads(sources):
                for link in source['links']:
                    movie['links'].append({
                        'link':
                        link['file'].replace('\\', ''),
                        'title':
                        'Link %s' % link['label'].encode('utf-8'),
                        'type':
                        link['label'].encode('utf-8'),
                        'resolve':
                        False
                    })

            return movie

        sources = re.search("var urlPlay = '(.*)';", response)
        if sources:
            sources = sources.group(1)
            response = Request().get(sources)
            sources = re.search("var sources = (.*);", response)
            if sources:
                sources = json.loads(sources.group(1))
                if type(sources) is dict:
                    if 'file' in sources:
                        movie['links'].append({
                            'link':
                            sources['file'].replace('\\', ''),
                            'title':
                            'Link %s' % sources['type'].encode('utf-8'),
                            'type':
                            sources['type'].encode('utf-8'),
                            'originUrl':
                            originUrl,
                            'resolve':
                            False
                        })
                else:
                    for source in sources:
                        movie['links'].append({
                            'link':
                            source['file'].replace('\\', ''),
                            'title':
                            'Link %s' % source['type'].encode('utf-8'),
                            'type':
                            source['type'].encode('utf-8'),
                            'originUrl':
                            originUrl,
                            'resolve':
                            False
                        })

                return movie

        sources = re.search("<iframe.*src=\"(.*)\"", response)
        if sources:
            source = sources.group(1)
            title = 'movie3s.net' in source and 'Movie3s' or 'Unknow'
            movie['links'].append({
                'link': source,
                'title': 'Link %s' % title,
                'type': 'file',
                'originUrl': originUrl,
                'resolve': False
            })
            return movie

        soup = BeautifulSoup(response, "html.parser")
        # get all server list
        servers = soup.select("span.btn-link-backup.episode-link")
        if len(servers) > 0:
            jobs = []
            links = []
            m_id = re.search(r'var\s?MovieID\s?=\s?(\d+);', response).group(1)
            ep_id = re.search(r'var\s?EpisodeID\s?=\s?(\d+);',
                              response).group(1)
            csrf = re.search(r'name="csrf-token"\s?content="(.*)">',
                             response).group(1)
            for server in servers:
                sv_id = server.get('data-index')
                url = "%s/api/player.html" % domain
                params = {'id': m_id, 'ep': ep_id, 'sv': sv_id}
                jobs.append({
                    'url': url,
                    'params': params,
                    'headers': {
                        'X-CSRF-TOKEN': csrf
                    },
                    'parser': Parser.extract_link
                })

            AsyncRequest(request=request).post(jobs, args=links)
            for link in links:
                title = 'movie3s.net' in link and 'Movie3s' or 'Unknow'
                movie['links'].append({
                    'link': link,
                    'title': 'Link %s' % title,
                    'type': 'file',
                    'originUrl': originUrl,
                    'resolve': False
                })

        return movie
コード例 #21
0
def get_hydrax_phimmoi_stream(stream, n):
    global origin

    txt = "#EXTM3U\n#EXT-X-VERSION:4\n#EXT-X-PLAYLIST-TYPE:VOD\n#EXT-X-TARGETDURATION:" + stream[
        'duration'] + "\n#EXT-X-MEDIA-SEQUENCE:0\n"

    if 'hash' in stream:
        txt += "#EXT-X-HASH:%s\n" % stream['hash']
        txt += "#EXT-X-KEY:METHOD=AES-128,URI=\"%s\",IV=%s\n" % (
            stream['hash'], stream['iv'])

    links = []

    r = len(stream['range'])
    o = len(n)
    a = 'expired' in stream and stream['expired'] or None
    s = 0
    l = stream['multiRange']
    h = len(l)

    if stream['type'] == 2:
        r = 0
        for t in range(h):
            u = stream['multiData'][t]['file']
            f = 0
            p = 0

            for d in range(len(l[t])):
                if s < o:
                    c = n[s]
                    s += 1
                else:
                    s = 1
                    c = n[0]

                txt += "#EXTINF:%s,\n" % stream['extinf'][r]
                txt += "#EXT-X-BYTERANGE:%s\n" % l[t][d]

                y = l[t][d]

                c = "http://" + c
                # c += stream['id'] and "/" + stream['id'] + "/" + stream['range'][t] or ""
                if '@' in l[t][d]:
                    if l[t][d].find('@') == -1: continue
                    g, y = l[t][d].split('@')
                    g, y = int(g), int(y)
                    f = d and p + 1 or y
                    p = y and f + g - 1 or g - 1
                    y = '%s-%s' % (f, p)

                if a:
                    url = a and c + "/" + a + "/" + u
                else:
                    url = c + "/" + str(r) + "/" + str(u)
                # url += stream['id'] and "/" + y + ".js" or "/" + y + ".jpg"
                if url not in links:
                    links.append(url)

                txt += url + "\n"
                r += 1
            if h == t + 1:
                txt += "#EXT-X-ENDLIST"

    elif stream['type'] == 3:
        for t in range(h):
            u = stream['multiData'][t]['file']
            if s < o:
                c = n[s]
                s += 1
            else:
                s = 1
                c = n[0]

            txt += "#EXTINF:" + stream['extinf'][t] + ",\n"
            c = "http://" + c
            # e.id && (c = c + "/" + e.id)
            c += stream['id'] and "/" + stream['id'] or ""
            url = a and c + "/basic/" + a + "/" + u + "." + (
                stream['id'] and "js"
                or "jpg") or c + "/basic/" + r + "/" + u + "." + (
                    stream['id'] and "js" or "jpg")

            if url not in links:
                links.append(url)

            txt += url + "\n"
            r += 1
            if h == t + 1:
                txt += "#EXT-X-ENDLIST"

    arequest = AsyncRequest()

    results = arequest.get(links, headers={'origin': origin})

    media_urls = []
    for i in range(len(links)):
        try:
            media_url = json.loads(results[i])['url']
            txt = txt.replace(links[i], media_url)
            if media_url not in media_urls:
                media_urls.append(media_url)
        except:
            print(links[i])

    if stream['type'] == 2:
        max_targetduration = 12
        play_list = "#EXTM3U\n#EXT-X-VERSION:4\n#EXT-X-PLAYLIST-TYPE:VOD\n#EXT-X-TARGETDURATION:12\n#EXT-X-MEDIA-SEQUENCE:0\n"
        if 'hash' in stream:
            path = helper.write_file('hydrax.m3u8',
                                     stream['hash'].encode(),
                                     binary=True)
            path = path.replace('\\', '/')
            # url = PasteBin().dpaste(stream['hash'], name='hydrax.key', expire=60)
            play_list += "#EXT-X-KEY:METHOD=AES-128,URI=\"file://%s\",IV=%s\n" % (
                path, stream['iv'])

        for link in media_urls:
            slashlink = link.replace('-', '\\-')
            slashlink = slashlink.replace('*', '\\*')
            slashlink = slashlink.replace('?', '\\?')
            segments = re.findall(
                r"(#EXTINF:([0-9]*\.?[0-9]+),\n#EXT-X-BYTERANGE:([0-9]+)@([0-9]+)(?:(?!#EXTINF).)*"
                + slashlink + ")", txt, re.DOTALL)
            duration = 0
            lengthbyte = 0
            startbyte = 999999999
            for segment in segments:
                duration += float(segment[1])
                startbyte = int(segment[3]) < startbyte and int(
                    segment[3]) or startbyte
                lengthbyte += int(segment[2])

            play_list += "#EXTINF:%s,\n" % duration
            play_list += "#EXT-X-BYTERANGE:%s@%s\n" % (lengthbyte, startbyte)
            play_list += "%s\n" % link
            if duration > max_targetduration:
                max_targetduration = duration

        play_list = play_list.replace(
            "TARGETDURATION:12",
            "TARGETDURATION:" + str(int(math.ceil(max_targetduration))))
        play_list += "#EXT-X-ENDLIST"
    elif stream['type'] == 3:
        play_list = txt

    url = PasteBin().dpaste(play_list, name=stream['id'], expire=60)
    return url
コード例 #22
0
def get_hydrax_phimmoi_stream(stream, n):
    global origin

    txt = "#EXTM3U\n#EXT-X-VERSION:4\n#EXT-X-PLAYLIST-TYPE:VOD\n#EXT-X-TARGETDURATION:" + str(
        stream['duration']) + "\n#EXT-X-MEDIA-SEQUENCE:0\n"

    if 'hash' in stream:
        txt += "#EXT-X-HASH:%s\n" % stream['hash']
        txt += "#EXT-X-KEY:METHOD=AES-128,URI=\"%s\",IV=%s\n" % (
            stream['hash'], stream['iv'])
        # helper.message('Encrypt not supported', 'Hydrax')
        # return ""

    links = []
    hashlist = []

    r = s = 0
    a = 'expired' in stream and stream['expired'] or None

    if stream['type'] == 2:
        o = len(n)
        l = stream['multiRange']
        h = len(l)

        for t in range(h):
            u = stream['multiData'][t]['file']
            f = 0
            p = 0

            for d in range(len(l[t])):
                if s < o:
                    c = n[s]
                    s += 1
                else:
                    s = 1
                    c = n[0]

                txt += "#EXTINF:%s,\n" % stream['extinf'][r]
                txt += "#EXT-X-BYTERANGE:%s\n" % l[t][d]

                y = l[t][d]

                c = "http://" + c
                # c += stream['id'] and "/" + stream['id'] + "/" + stream['range'][t] or ""
                if '@' in l[t][d]:
                    if l[t][d].find('@') == -1: continue
                    g, y = l[t][d].split('@')
                    g, y = int(g), int(y)
                    f = d and p + 1 or y
                    p = y and f + g - 1 or g - 1
                    y = '%s-%s' % (f, p)

                if a:
                    url = a and c + "/" + a + "/" + u
                else:
                    url = c + "/" + str(r) + "/" + str(u)
                # url += stream['id'] and "/" + y + ".js" or "/" + y + ".jpg"
                if url not in links:
                    # find has
                    match = re.search(r"immortal.hydrax.net/\d+/(.*?)$", url)
                    if match and match.group(1) not in hashlist:
                        links.append(url)
                        hashlist.append(match.group(1))
                    elif not match:
                        links.append(url)

                txt += url + "\n"
                r += 1
            if h == t + 1:
                txt += "#EXT-X-ENDLIST"

    elif stream['type'] == 3:
        d = stream['ranges']
        l = len(d)
        o = stream['expired']
        a = s = 0
        u = stream['datas']
        for t in range(l):
            f = u[t]['file']
            for p in range(len(d[t])):
                if a < r:
                    c = n[a]
                    a += 1
                else:
                    a = 1
                    c = n[0]
                    y = d[t][p]
                    c = "http://" + c

                    txt += "#EXTINF:%s,\n" % stream['extinfs'][s]
                    txt += "#EXT-X-BYTERANGE:%s\n" % y
                    if o:
                        url = c + "/" + o + "/" + f + "/" + y
                    else:
                        url = c + "/" + s + "/" + f + "/" + y

                    txt += "%s\n" % url
                    s += 1
            if l == t + 1:
                txt += "#EXT-X-ENDLIST"

        # for t in range(l):
        #     u = stream['datas'][t]['file']
        #     if s < o:
        #         c = n[s]
        #         s += 1
        #     else:
        #         s = 1
        #         c = n[0]
        #
        #     txt += "#EXTINF:" + stream['extinfs'][t] + ",\n"
        #     c = "http://" + c
        #     # e.id && (c = c + "/" + e.id)
        #     c += stream['id'] and "/" + stream['id'] or ""
        #     url = a and c + "/basic/" + a + "/" + u + "." + (
        #             stream['id'] and "js" or "jpg") or c + "/basic/" + r + "/" + u + "." + (
        #                   stream['id'] and "js" or "jpg")
        #
        #     if url not in links:
        #         links.append(url)
        #
        #     txt += url + "\n"
        #     r += 1
        #     if h == t + 1:
        #         txt += "#EXT-X-ENDLIST"

    arequest = AsyncRequest()
    results = arequest.get(links, headers={'origin': origin})

    media_urls = []
    for i in range(len(links)):
        try:
            media_url = json.loads(results[i])['url']
            txt = txt.replace(links[i], media_url)
            if media_url not in media_urls:
                media_urls.append(media_url)
        except:
            print(links[i])

    if stream['type'] == 2:
        max_targetduration = 12
        play_list = "#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-PLAYLIST-TYPE:VOD\n#EXT-X-TARGETDURATION:12\n#EXT-X-MEDIA-SEQUENCE:0\n"
        if 'hash' in stream:
            # path = helper.write_file('hydrax.m3u8', stream['hash'], binary=True)
            # path = path.replace('\\', '/')
            # path = "http://localhost/portal/hydrax.m3u8"
            path = "encrypted-file://" + stream['hash']
            # url = PasteBin().dpaste(stream['hash'], name='hydrax.key', expire=60)
            play_list += "#EXT-X-KEY:METHOD=AES-128,URI=\"%s\",IV=%s\n" % (
                path, stream['iv'])

        for index, link in enumerate(media_urls):
            if len(hashlist) > 0:
                slashlink = hashlist[index]
            else:
                slashlink = link.replace('-', '\\-')
                slashlink = slashlink.replace('*', '\\*')
                slashlink = slashlink.replace('?', '\\?')

            segments = re.findall(
                r"(#EXTINF:([0-9]*\.?[0-9]+),\n#EXT-X-BYTERANGE:([0-9]+)@([0-9]+)(?:(?!#EXTINF).)*"
                + slashlink + ")", txt, re.DOTALL)
            duration = 0
            lengthbyte = 0
            startbyte = 999999999
            for segment in segments:
                duration += float(segment[1])
                startbyte = int(segment[3]) < startbyte and int(
                    segment[3]) or startbyte
                lengthbyte += int(segment[2])

            play_list += "#EXTINF:%s,\n" % duration
            play_list += "#EXT-X-BYTERANGE:%s@%s\n" % (lengthbyte, startbyte)
            play_list += "%s\n" % link
            if duration > max_targetduration:
                max_targetduration = duration

        play_list = play_list.replace(
            "TARGETDURATION:12",
            "TARGETDURATION:" + str(int(math.ceil(max_targetduration))))
        play_list += "#EXT-X-ENDLIST"
    elif stream['type'] == 3:
        play_list = txt

    url = PasteBin().dpaste(play_list, name='hydrax', expire=60)
    return url
コード例 #23
0
    def get_link(self, response, url, request):
        print("***********************Get Movie Link*****************************")
        movie = {
            'group': {},
            'episode': [],
            'links': [],
        }
        self.originURL = url
        url = self.get_token_url(response)
        response = Request().get(url)

        self.key = self.get_decrypt_key(response)
        if not self.key:
            return movie

        jsonresponse = re.search("_responseJson='(.*)';", response).group(1)
        jsonresponse = json.loads(jsonresponse.decode('utf-8'))

        # if jsonresponse['medias']:
        #     media = sorted(jsonresponse['medias'], key=lambda elem: elem['resolution'], reverse=True)
        #     for item in media:
        #         url = CryptoAES().decrypt(item['url'], bytes(self.key.encode('utf-8')))
        #         if not re.search('hls.phimmoi.net', url):
        #             movie['links'].append({
        #                 'link': url,
        #                 'title': 'Link %s' % item['resolution'],
        #                 'type': item['resolution'],
        #                 'resolve': False,
        #                 'originUrl': self.originURL
        #             })
        #         else:
        #             # hls.phimmoi.net
        #             movie['links'].append({
        #                 'link': url,
        #                 'title': 'Link hls',
        #                 'type': 'hls',
        #                 'resolve': False,
        #                 'originUrl': self.originURL
        #             })

        if jsonresponse.get('embedUrls'):
            for item in jsonresponse.get('embedUrls'):
                url = self.get_url(CryptoAES().decrypt(item, bytes(self.key.encode('utf-8'))))
                if not re.search('hydrax', url):
                    movie['links'].append({
                        'link': url,
                        'title': 'Link Unknow',
                        'type': 'mp4',
                        'resolve': False,
                        'originUrl': self.originURL
                    })
                else:
                    movie['links'].append({
                        'link': url,
                        'title': 'Link hydrax',
                        'type': 'hls',
                        'resolve': False,
                        'originUrl': self.originURL
                    })

        if jsonresponse['thirdParty']:
            jobs = []
            for item in jsonresponse['thirdParty']:
                if 'hydrax.html' not in item.get('embed'):
                    jobs.append({'url': item.get('embed'), 'headers': {
                        "Referer": self.originURL
                    }, 'parser': self.parse_thirdparty_link})

            AsyncRequest(request=request).get(jobs, args=movie['links'])

        return movie