예제 #1
0
    def search(cls, query):
        # V3 not supported
        v1 = helpers.soupify(helpers.post("https://v1.nmtvjxdtx42qdwktdxjfoikjq.workers.dev/",
                                          data={"q2": query}, verify=False).json()['result']).select('p.name > a')
        v5 = helpers.soupify(helpers.post("https://animixplay.to/api/search/",
                                          data={"qfast": query}, verify=False).json()['result']).select('p.name > a')

        # v3 = helpers.soupify(helpers.post("https://v3.w0ltfgqz8y3ygjozgs4v.workers.dev/",
        #                                  data={"q3": query}, verify=False).json()['result'])

        # Gives 400 error on v3 and v4 if there's no results.
        # HTTPError doesn't seem to play along helpers hence why it's not expected.

        try:
            v4 = helpers.soupify(helpers.post("https://v4.w0ltfgqz8y3ygjozgs4v.workers.dev/",
                                              data={"q": query}, verify=False).json()['result']).select('p.name > a')
        except:
            v4 = []

        # Meta will be name of the key in versions_dict
        versions_dict = {'v1': v1, 'v4': v4, 'v5': v5}
        logger.debug('Versions: {}'.format(versions_dict))
        data = []
        for i in versions_dict:
            for j in versions_dict[i]:
                data.append(SearchResult(
                    title=j.text,
                    url='https://animixplay.com' + j.get('href'),
                    meta={'version': i},
                    meta_info={
                        'version_key_dubbed': '(Dub)',
                    }
                ))

        return data
예제 #2
0
    def _scrape_episodes(self):
        url = self.url
        soup = helpers.soupify(helpers.get(url))
        # v1 and v3 is embedded video player
        # v2 and v4 is json post request
        if '/v2/' in self.url or '/v4/' in self.url:
            # Uses the id in the url and encodes it twice
            # NaN and N4CP9Eb6laO9N are permanent encoded variables found in
            # https://animixplay.com/assets/v4.min.js
            url_id = str.encode(self.url.split("/")[4])
            post_id = f'NaN{base64.b64encode(url_id).decode()}N4CP9Eb6laO9N'.encode()
            post_id = base64.b64encode(post_id).decode()
            data_id = 'id2' if '/v4/' in self.url else 'id'
            # In extremely rare cases the anime isn't loaded and must be generated by the server first
            try:
                data = (helpers.post('https://animixplay.com/raw/2ENCwGVubdvzrQ2eu4hBH',
                        data={data_id: post_id}).json())
            # 400 HTTPError here
            except:
                if '/v4/' in self.url:
                    data = (helpers.post('https://animixplay.com/e4/5SkyXQULLrn9OhR',
                            data={'id': url.split('/')[-1]}).json())['epstream']
                if '/v2' in self.url:
                    data = (helpers.post('https://animixplay.com/e2/T23nBBj3NfRzTQx',
                            data={'id': url.split('/')[-1]}).json())['epstream']

            logger.debug(data)
            if '/v4/' in self.url:
                # Has a list of mp4 links.
                return data
            elif '/v2/' in self.url:
                # Has elaborate list for all metadata on episodes.
                episodes = []
                for i in data:
                    info_dict = i.get('src', None)
                    # Looks like mp4 is always first in the list
                    # Sometimes it returns None
                    if info_dict:
                        episodes.append(info_dict[0].get('file', ''))
                    else:
                        episodes.append('')
                return episodes
        else:
            try:
                ep_list = soup.find('div', {'id': 'epslistplace'}).get_text()
                logger.debug(ep_list)
                jdata = json.loads(ep_list)
                keyList = list(jdata.keys())
                del keyList[0]
                logger.debug(keyList)
                return [jdata[x] for x in keyList if '.' in jdata[x]]
            except json.decoder.JSONDecodeError:
                # Link generation
                data = (helpers.post('https://animixplay.com/e1/9DYiGVLD7ASqZ5p',
                        data={'id': url.split('/')[-1]}).json())['epstream']
                logger.debug('Data: {}'.format(data))
                return [data[i] for i in data if i != 'eptotal']
예제 #3
0
    def search(cls, query):
        search_results = helpers.post(
            f'https://ww5.dubbedanime.net/ajax/paginate',
            data={
                'query[search]': query,
                'what': 'query',
                'model': 'Anime',
                'size': 30,
                'letter': 'all',
            }).json()

        title_data = {'data': []}
        for a in range(len(search_results['results'])):
            url = cls.url + search_results['results'][a]['url']
            title = search_results['results'][a]['title']
            data = {
                'url': url,
                'title': title,
            }
            title_data['data'].append(data)

        search_results = [
            SearchResult(title=result["title"], url=result["url"])
            for result in title_data.get('data', [])
        ]
        return (search_results)
예제 #4
0
    def _get_sources(self):
        ids = self.url.split(",")
        ep = ids[0]
        realId = int(ids[0]) + int(ids[1]) + 2
        _referer = ids[2]

        realUrl = helpers.post(
            "https://kissanimefree.net/wp-admin/admin-ajax.php",
            referer=
            f"https://kissanimefree.net/episode/{_referer}-episode-{realId}/",
            data={
                "action": "kiss_player_ajax",
                "server": "vidcdn",
                "filmId": realId
            }).text

        realUrl = realUrl if realUrl.startswith('http') else "https:" + realUrl

        txt = helpers.get(realUrl).text
        # gets src="//vidstreaming.io/loadserver.php?id=MTIyNjM4&title=Naruto"></iframe>
        vidstream_regex = r'src=[^\s]*(((vidstreaming\.io)|(gogo-stream\.com))[^"\']*)'
        surl = re.search(vidstream_regex, txt)
        if surl:
            if surl.group(1):
                return [('vidstream', surl.group(1))]

        logger.debug('Failed vidstream text: {}'.format(txt))
        return ''
예제 #5
0
    def search(cls, query):
        sel = helpers.get("https://kissanime.ru",sel=True)
        cookies = sel.cookies
        agent = sel.user_agent # Note that the user agent must be the same as the one which generated the cookies
        cookies = {c['name']: c['value'] for c in cookies}
        soup = helpers.soupify((helpers.post("https://kissanime.ru/Search/Anime", headers = {
            "User-Agent": agent,
            "Referer": "https://kissanime.ru/Search/Anime"
            },data = {"keyword": query},cookies=cookies)))

        # If only one anime found, kissanime redirects to anime page.
        # We don't want that
        if soup.title.text.strip().lower() != "find anime":
            return [SearchResult(
                title=soup.find('a', 'bigChar').text,
                url=cls.domain +
                    soup.find('a', 'bigChar').get('href'),
                poster='',
            )]

        searched = [s for i, s in enumerate(soup.find_all('td')) if not i % 2]

        ret = []
        for res in searched:
            res = SearchResult(
                title=res.text.strip(),
                url=cls.domain + res.find('a').get('href'),
                poster='',
            )
            logger.debug(res)
            ret.append(res)

        return ret
예제 #6
0
    def _get_sources(self):
        ids = self.url.split(",")
        ep = ids[0]
        realId = int(ids[0]) + int(ids[1]) + 2
        _referer = ids[2]

        d = helpers.post(
            "https://kissanimefree.xyz/wp-admin/admin-ajax.php",
            referer=
            f"https://kissanimefree.xyz/episode/{_referer}-episode-{realId}/",
            data={
                "action": "kiss_player_ajax",
                "server": "vidcdn",
                "filmId": realId
            })
        realUrl = d.text[d.text.find('url=') + 4:]
        if (realUrl[0:4] != "http"):
            realUrl = "https:" + d.text

        txt = helpers.get(realUrl).text
        vidstream_regex = r"(\"|)file(\"|):.*?('|\")([^^('|\")]*)"  # you could add the vidstream extractor and qualities here
        surl = re.search(vidstream_regex, txt).group(4)

        return [(
            'no_extractor',
            surl,
        )]
예제 #7
0
    def _get_sources(self):
        resp = helpers.get(self.url)
        # Gets the ctk and id from the page used for a post request.
        ctk = re.search(r"ctk\s+=\s+'(.*)?';", resp.text).group(1)
        _id = re.search(r"episode_id\s*=\s*([^;]*)", resp.text).group(1)

        logger.info('ctk: {}'.format(ctk))
        logger.info('id: {}'.format(_id))

        # The post request returns an embed.
        resp = helpers.post(
            "https://anime8.ru/ajax/anime/load_episodes_v2?s=fserver",
            data={
                "episode_id": _id,
                "ctk": ctk
            })
        # Gets the real embed url. Json could be used on the post request, but this is probably more reliable.

        # Skips if no episode found.
        if not resp.json().get('status'):
            return ''

        embed = re.search(r"iframe\s*src.*?\"([^\"]*)",
                          resp.text).group(1).replace('\\', '')
        return [('streamx', embed)]
예제 #8
0
    def _get_data(self):
        # Kwik servers don't have direct link access you need to be referred
        # from somewhere, I will just use the url itself. We then
        # have to rebuild the url. Hopefully kwik doesn't block this too

        # Necessary
        self.url = self.url.replace(".cx/e/", ".cx/f/")
        self.headers.update({"referer": self.url})

        cookies = util.get_hcaptcha_cookies(self.url)

        if not cookies:
            resp = util.bypass_hcaptcha(self.url)
        else:
            resp = requests.get(self.url, cookies=cookies)

        title_re = re.compile(r'title>(.*)<')

        kwik_text = resp.text
        deobfuscated = None

        loops = 0
        while not deobfuscated and loops < 6:
            try:
                deobfuscated = helpers.soupify(
                    util.deobfuscate_packed_js(
                        re.search(r'<(script).*(var\s+_.*escape.*?)</\1>(?s)',
                                  kwik_text).group(2)))
            except (AttributeError, CalledProcessError) as e:
                if type(e) == AttributeError:
                    resp = util.bypass_hcaptcha(self.url)
                    kwik_text = resp.text

                if type(e) == CalledProcessError:
                    resp = requests.get(self.url, cookies=cookies)
            finally:
                cookies = resp.cookies
                title = title_re.search(kwik_text).group(1)
                loops += 1

        post_url = deobfuscated.form["action"]
        token = deobfuscated.input["value"]

        resp = helpers.post(post_url,
                            headers=self.headers,
                            params={"_token": token},
                            cookies=cookies,
                            allow_redirects=False)
        stream_url = resp.headers["Location"]

        logger.debug('Stream URL: %s' % stream_url)

        return {
            'stream_url': stream_url,
            'meta': {
                'title': title,
                'thumbnail': ''
            },
            'referer': None
        }
예제 #9
0
    def _get_sources(self):
        ids = self.url.split(",")
        ep = ids[0]
        realId = int(ids[0]) + int(ids[1]) + 2
        _referer = ids[2]

        realUrl = helpers.post(
            "https://kissanimefree.xyz/wp-admin/admin-ajax.php",
            referer=
            f"https://kissanimefree.xyz/episode/{_referer}-episode-{realId}/",
            data={
                "action": "kiss_player_ajax",
                "server": "vidcdn",
                "filmId": realId
            }).text

        realUrl = realUrl if realUrl.startswith('http') else "https:" + realUrl

        txt = helpers.get(realUrl).text
        # Group 2 and/or 3 is the vidstreaming links without https://
        # Not used because I've yet to test if goto always leads to mp4
        # vidstream_regex = r"window\.location\s=\s(\"|').*?(vidstreaming\.io/[^(\"|')]*?)\"|(vidstreaming\.io/goto\.php[^(\"|')]*?)(\"|')"

        vidstream_regex = r"window\.location\s=\s(\"|').*?(vidstreaming\.io/[^(\"|')]*?)\""
        surl = re.search(vidstream_regex, txt)
        if surl:
            if surl.group(2):
                return [(
                    'vidstreaming',
                    surl.group(2),
                )]
        return ''
예제 #10
0
    def search(cls, query):
        soup = helpers.soupify(
            helpers.post('https://kissanime.ru/Search/Anime',
                         data=dict(keyword=query),
                         referer=cls._referer,
                         cf=True))

        # If only one anime found, kissanime redirects to anime page.
        # We don't want that
        if soup.title.text.strip().lower() != "find anime":
            return [
                SearchResult(
                    title=soup.find('a', 'bigChar').text,
                    url='https://kissanime.ru' +
                    soup.find('a', 'bigChar').get('href'),
                    poster='',
                )
            ]

        searched = [s for i, s in enumerate(soup.find_all('td')) if not i % 2]

        ret = []
        for res in searched:
            res = SearchResult(
                title=res.text.strip(),
                url='https://kissanime.ru' + res.find('a').get('href'),
                poster='',
            )
            logger.debug(res)
            ret.append(res)

        return ret
예제 #11
0
    def _get_data(self):
        # TODO: Provide referer by source
        referer = 'https://anistream.xyz'

        req = helpers.get(self.url, referer=referer)
        source_regex = r'<source src="(.*?)"'
        source = re.search(source_regex, req.text)

        # Matches: token="eyJ0eXA"
        token_regex = r"token\s*=\s*['\"|']([^\"']*)"
        token = re.search(token_regex, req.text)

        if source:
            return {
                'stream_url': source.group()
            }

        elif token:
            token = token.group(1)
            trollvid_id = self.url.split('/')[-1]  # something like: 084df78d215a
            post = helpers.post(f'https://mp4.sh/v/{trollvid_id}',
                                data={'token': token},
                                referer=self.url,
                                ).json()

            # {'success':True} on success.
            if post.get('success') and post.get('data'):
                return {
                    'stream_url': post['data']
                }

        # In case neither methods work.
        return {'stream_url': ''}
예제 #12
0
    def _get_sources(self):
        servers = self.config['servers']
        url = ''
        for i in servers:
            params = {
                's': i,
                'episode_id': self.url.split('id=')[-1],
            }
            api = helpers.post(self._episode_list_url,
                               params=params,
                               referer=self.url).json()
            if api.get('status', False):
                iframe_regex = r'<iframe src="([^"]*?)"'
                url = re.search(iframe_regex, api['value']).group(1)
                if url.startswith('//'):
                    url = 'https:' + url
                if url.endswith('mp4upload.com/embed-.html') or url.endswith(
                        'yourupload.com/embed/'
                ):  # Sometimes returns empty link
                    url = ''
                    continue
                break

        extractor = 'streamx'  # defaut extractor
        extractor_urls = {  # dumb, but easily expandable, maps urls to extractors
            "mp4upload.com": "mp4upload",
            "yourupload.com": "yourupload"
        }
        for i in extractor_urls:
            if i in url:
                extractor = extractor_urls[i]

        return [(extractor, url)]
예제 #13
0
    def _get_data(self):
        post_data = helpers.post("https://www.xstreamcdn.com/api/source/" + self.url.split("/")[-1]).json()
        data = post_data["data"]
        link = data[-1]["file"]

        return {
            'stream_url': link,
        }
예제 #14
0
 def search(cls, query):
     soup = helpers.soupify(helpers.post('https://vostfree.com', data={'do': 'search', 'subaction': 'search', 'story': query}))
     return [
         SearchResult(
             title=re.sub('\s+?FRENCH(\s+)?$', '', x.text.strip()),
             url=x['href']
         )
         for x in soup.select('div.title > a')
     ]
예제 #15
0
    def search(cls, query):
        # V3 not supported
        v1 = helpers.soupify(
            helpers.post("https://animixplay.com/api/search/v1",
                         data={
                             "q2": query
                         },
                         verify=False).json()['result']).select('p.name > a')
        v2 = helpers.soupify(
            helpers.post("https://animixplay.com/api/search/",
                         data={
                             "qfast2": query
                         },
                         verify=False).json()['result']).select('p.name > a')
        #v3 = helpers.soupify(helpers.post("https://animixplay.com/api/search/v3",
        #    data = {"q3": query}, verify = False).json()['result'])

        # Gives 400 error on v3 and v4 if there's no results.
        # HTTPError doesn't seem to play along helpers hence why it's not expected.
        try:
            v4 = helpers.soupify(
                helpers.post(
                    "https://animixplay.com/api/search/v4",
                    data={
                        "q": query
                    },
                    verify=False).json()['result']).select('p.name > a')
        except:
            v4 = []

        # Meta will be name of the key in versions_dict
        versions_dict = {'v1': v1, 'v2': v2, 'v4': v4}
        logger.debug('Versions: {}'.format(versions_dict))
        data = []
        for i in versions_dict:
            for j in versions_dict[i]:
                data.append(
                    SearchResult(title=j.text,
                                 url='https://animixplay.com' + j.get('href'),
                                 meta={'version': i}))

        return data
예제 #16
0
    def _get_sources(self):
        soup = helpers.soupify(helpers.get(self.url))
        soup = helpers.soupify(helpers.get(soup.iframe.get("src")))
        id_ = re.findall(r"data: {id: [\"'](.*?)[\"']}", str(soup))[0]

        response = helpers.post('https://genoanime.com/player/genovids.php',
                                data={
                                    "id": id_
                                }).json()  # noqa

        return [("no_extractor", x['src']) for x in response['url']]
예제 #17
0
    def search(cls, query):
        soup = helpers.soupify(
            helpers.post("https://genoanime.com/data/searchdata.php",
                         data={"anime": query}))

        search_results = [
            SearchResult(title=x.text,
                         url=x.get("href").replace("./",
                                                   "https://genoanime.com/"))
            for x in soup.select("h5 > a[href]")
        ]

        return search_results
예제 #18
0
    def _get_data(self):
        url = self.url
        end = url[url.find('=') + 1:]
        beg = json.loads(
            helpers.post(
                'https://ping.idocdn.com/',
                data={
                    'slug': end
                },
                referer=url,
            ).text)['url']

        link = f'https://{beg}'
        return {'stream_url': link, 'referer': url}
예제 #19
0
    def search(query):
        ani_query = """
            query ($id: Int, $page: Int, $search: String, $type: MediaType) {
                Page (page: $page, perPage: 10) {
                    media (id: $id, search: $search, type: $type) {
                        id
                        idMal
                        description(asHtml: false)
                        seasonYear
                        title {
                            english
                            romaji
                            native
                        }
                        coverImage {
                            extraLarge
                        }
                        bannerImage
                        averageScore
                        status
                        episodes
                        }
                    }
                }
            """
        url = 'https://graphql.anilist.co'

        # TODO check in case there's no results
        # It seems to error on no results (anime -ll DEBUG dl "nev")
        results = helpers.post(url,
                               json={
                                   'query': ani_query,
                                   'variables': {
                                       'search': query,
                                       'page': 1,
                                       'type': 'ANIME'
                                   }
                               }).json()['data']['Page']['media']
        if not results:
            logger.error('No results found in anilist')
            raise NameError

        search_results = [
            AnimeInfo(url='https://anilist.co/anime/' + str(i['id']),
                      title=i['title']['romaji'],
                      jp_title=i['title']['native'],
                      episodes=int(i['episodes']),
                      metadata=i) for i in results if i['episodes'] != None
        ]
        return search_results
예제 #20
0
    def _get_data(self):
        url = self.url + '&q=' + self.quality
        logger.debug('Calling Rapid url: {}'.format(url))
        headers = self.headers
        headers['referer'] = url

        try:
            r = helpers.get(url, headers=headers)
            soup = helpers.soupify(r)
            stream_url = get_source(soup)
        except Exception as e:
            logger.debug('Exception happened when getting normally')
            logger.debug(e)
            r = helpers.post(url,
                             data={
                                 'confirm.x': 12,
                                 'confirm.y': 12,
                                 'block': 1,
                             },
                             headers=headers)
        soup = helpers.soupify(r)

        # TODO: Make these a different function. Can be reused in other classes
        #       too
        title_re = re.compile(r'"og:title" content="(.*)"')
        image_re = re.compile(r'"og:image" content="(.*)"')

        try:
            stream_url = get_source(soup)
        except IndexError:
            stream_url = None

        try:
            title = str(title_re.findall(r.text)[0])
            thumbnail = str(image_re.findall(r.text)[0])
        except Exception as e:
            title = ''
            thumbnail = ''
            logger.debug(e)
            pass

        return {
            'stream_url': stream_url,
            'meta': {
                'title': title,
                'thumbnail': thumbnail,
            },
        }
예제 #21
0
    def search(cls, query):
        data = {
            "action": "ajaxsearchlite_search",
            "aslp": query,
            "asid": 1,
            "options":
            "qtranslate_lang=0&set_intitle=None&customset%5B%5D=anime"
        }
        soup = helpers.soupify(
            helpers.post("https://4anime.to/wp-admin/admin-ajax.php",
                         data=data)).select('div.info > a')

        search_results = [
            SearchResult(title=i.text, url=i['href']) for i in soup
        ]
        return search_results
예제 #22
0
 def bypass(self):
     host = "https://erai-raws.info"
     url = "https://erai-raws.info/anime-list/"
     u = base64.b64encode(url.encode('utf-8'))
     h = base64.b64encode(host.encode('utf-8'))
     bypass_link = helpers.post('https://ddgu.ddos-guard.net/ddgu/',
                                data={
                                    'u': u,
                                    'h': h,
                                    'p': ''
                                },
                                headers={
                                    'Referer': url
                                },
                                allow_redirects=False).headers["Location"]
     helpers.get(bypass_link, allow_redirects=False)
예제 #23
0
    def _get_data(self):
        url = self.url
        url = url.replace('gcloud.live/v/','gcloud.live/api/source/')
        if url.find('#') != -1:url = url[:url.find('#')] 
        url = (url[-url[::-1].find('/'):])
        data = helpers.post(f'https://gcloud.live/api/source/{url}').json()['data']

        if data == 'Video not found or has been removed':
            logger.warning('File not found (Most likely deleted)')
            return {'stream_url': ''}
        
        for a in data:
            if a['label'] == self.quality:
                return {'stream_url': a['file']}

        return {'stream_url': ''}
예제 #24
0
 def search(cls, query):
     search_results = helpers.post(
         f'https://ww5.dubbedanime.net/ajax/paginate',
         data={
             'query[search]': query,
             'what': 'query',
             'model': 'Anime',
             'size': 30,
             'letter': 'all',
         }).json()
     search_results = [
         SearchResult(title=search_results['results'][a]['title'],
                      url=cls.url + search_results['results'][a]['url'])
         for a in range(len(search_results['results']))
     ]
     return (search_results)
예제 #25
0
    def _get_data(self):
        url = self.url
        end = url[url.find('=') + 1:]
        obfuscated_url = helpers.post('https://ping.idocdn.com/',
                                      data={'slug': end},
                                      referer=url,
                                      ).json()['url']

        decoded_url = base64.b64decode(obfuscated_url[-1] + obfuscated_url[:-1]).decode('utf-8')

        # HydraX uses www.url for high quality and url for low quality
        quality = '' if self.quality in ['360p', '480p'] else 'www.'

        return {
            'stream_url': f'https://{quality}{decoded_url}',
            'referer': url
        }
예제 #26
0
    def _get_data(self):
        logger.debug('Gcloud url: {}'.format(self.url)) #Surprisingly not debug printed in anime.py
        """gcloud uses the same video ID as other sites"""
        id_regex = r'(gcloud\.live|fembed\.com|feurl\.com)/(v|api/source)/([^(?|#)]*)' #Group 3 for id
        gcloud_id = re.search(id_regex,self.url)
        if not gcloud_id:
            logger.error('Unable to get ID for url "{}"'.format(self.url)) 
            return {'stream_url': ''}

        gcloud_id = gcloud_id.group(3)
        data = helpers.post(f'https://gcloud.live/api/source/{gcloud_id}').json()['data']
        
        if data == 'Video not found or has been removed':
            logger.warning('File not found (Most likely deleted)')
            return {'stream_url': ''}
        
        for a in data:
            if a['label'] == self.quality:
                return {'stream_url': a['file']}

        return {'stream_url': ''}
예제 #27
0
    def _get_data(self):
        # Kwik servers don't have direct link access you need to be referred
        # from somewhere, I will just use the url itself. We then
        # have to rebuild the url. Hopefully kwik doesn't block this too

        #Necessary
        self.url = self.url.replace(".cx/e/", ".cx/f/")

        title_re = re.compile(r'title>(.*)<')

        resp = helpers.get(self.url, headers={"referer": self.url})
        kwik_text = resp.text
        cookies = resp.cookies

        title = title_re.search(kwik_text).group(1)
        deobfuscated = helpers.soupify(
            util.deobfuscate_packed_js(
                re.search(r'<(script).*(var\s+_.*escape.*?)</\1>(?s)',
                          kwik_text).group(2)))

        post_url = deobfuscated.form["action"]
        token = deobfuscated.input["value"]

        resp = helpers.post(post_url,
                            headers={"referer": self.url},
                            params={"_token": token},
                            cookies=cookies,
                            allow_redirects=False)
        stream_url = resp.headers["Location"]

        logger.debug('Stream URL: %s' % stream_url)
        return {
            'stream_url': stream_url,
            'meta': {
                'title': title,
                'thumbnail': ''
            },
            'referer': None
        }
예제 #28
0
    def _get_data(self):
        url = self.url
        # Should probably be urlparse.
        end = url[url.find('=') + 1:]
        # Note that this url can change.
        obfuscated_url = helpers.post(
            'https://ping.iamcdn.net/',
            data={
                'slug': end
            },
            referer=f'https://play.hydracdn.network/watch?v={end}',
        ).json()['url']

        decoded_url = base64.b64decode(obfuscated_url[-1] +
                                       obfuscated_url[:-1]).decode('utf-8')

        # HydraX uses www.url for high quality and url for low quality
        quality = '' if self.quality in ['360p', '480p'] else 'www.'

        return {
            'stream_url': f'https://{quality}{decoded_url}',
            'referer': url
        }
예제 #29
0
    def _get_data(self):

        # Need a javascript deobsufication api/python, so someone smarter
        # than me can work on that for now I will add the pattern I observed

        # alternatively you can pattern match on `src` for stream_url part
        source_parts_re = re.compile(
            r'action=\"([^"]+)\".*value=\"([^"]+)\".*Click Here to Download',
            re.DOTALL)

        # Kwik servers don't have direct link access you need to be referred
        # from somewhere, I will just use the url itself.

        download_url = self.url.replace('kwik.cx/e/', 'kwik.cx/f/')

        kwik_text = helpers.get(download_url, referer=download_url).text
        post_url, token = source_parts_re.search(kwik_text).group(1, 2)

        stream_url = helpers.post(post_url,
                                  referer=download_url,
                                  data={
                                      '_token': token
                                  },
                                  allow_redirects=False).headers['Location']

        title = stream_url.rsplit('/', 1)[-1].rsplit('.', 1)[0]

        logger.debug('Stream URL: %s' % stream_url)
        return {
            'stream_url': stream_url,
            'meta': {
                'title': title,
                'thumbnail': ''
            },
            'referer': None
        }
예제 #30
0
    def getTorrents(self, soup, cookies):
        # Clickable nodes, such as: Notifications, Episodes, Batch, etc
        # We are only interested in Episode/Batch
        nodes = soup.select("a.aa_ss")
        episode_nodes = [x for x in nodes if x.text == "Episodes"]
        load = "load_more_0"

        if not episode_nodes:
            logger.warn("Episodic torrents not found, using batch torrents...")
            batch_torrents = [x for x in nodes if x.text == "Batch"]

            if not batch_torrents:
                logger.warning(
                    "Neither episode torrents nor batch torrents were found.")

            load = "load_more_3"

        max_page_regex = "{}_params.*?max_page.*?(\d+)"
        max_page = int(
            re.search(max_page_regex.format(load), str(soup)).group(1))
        max_page_special = int(
            re.search(max_page_regex.format("load_more_2"),
                      str(soup)).group(1))

        post_data = {"action": load}

        # Get data to post and map to query, e.g:
        """
        {
            'anime-list': 'one-piece', 
             ...
            'order': 'DESC'
        }
        """
        post_data["query"] = json.dumps(json.loads(
            re.search("posts.*?(\{.*?order.*?\})",
                      str(soup)).group(1).replace("\\", "")),
                                        separators=(",", ":"))

        episodes = []

        for page in range(max_page + max_page_special):
            post_data["page"] = page if page < max_page else page - max_page

            if page >= max_page:
                post_data["action"] = "load_more_2"

            resp = helpers.post(
                "https://erai-raws.info/wp-admin/admin-ajax.php",
                data=post_data,
                cookies=cookies)

            if resp:
                soup = helpers.soupify(resp)

                # List of tuples of (quality, magnet)
                eps = [(x[0].text, x[1]["href"]) for y in [
                    list(
                        zip(x.select("i.sp_p_q"),
                            x.select("a.load_more_links[href*=magnet]")))
                    for x in soup.select(
                        "article div:has(i.sp_p_q):has(a.load_more_links[href*=magnet])"
                    )
                ] for x in y]

                # Filter by quality
                filtered_eps = [x[1] for x in eps if self.quality in x[0]]

                if not filtered_eps:
                    logger.warning(
                        f"Quality {self.quality} not found. Trying {self.QUALITIES[not self.QUALITIES.index(self.quality)]}"
                    )
                    filtered_eps = [
                        x[1] for x in eps if
                        self.QUALITIES[not self.QUALITIES.index(self.quality)]
                    ]

                for ep in filtered_eps:
                    # Sometimes duplication happens
                    if ep not in episodes:
                        episodes.append(ep)

        return episodes