Exemplo n.º 1
0
    def _get_sources(self):
        params = {'id': self.url, 'server': '33', 'ts': self.ts}

        def get_stream_url(base_url, params, DD=None):
            params['_'] = int(generate_(params, DD=DD))
            data = util.get_json(base_url, params=params)

            return data['target']

        try:
            url = get_stream_url(self._base_url, params)
        except KeyError:
            try:
                del params['_']
                del params['ts']
                # I don't know if this is reliable or not.
                # For now it works.
                data = util.get_json('http://9anime.cloud/ajax/episode/info',
                                     params=params)
                url = data['target']
            except Exception as e:
                raise AnimeDLError(
                    '9anime probably changed their API again. Check the issues'
                    'here https://github.com/vn-ki/anime-downloader/issues. '
                    'If it has not been reported yet, please open a new issue'
                ) from e

        return [
            ('rapidvideo', url),
        ]
Exemplo n.º 2
0
    def get_data(self):
        # Extract anime id from page, using this shoddy approach as
        # I have neglected my regular expression skills to the point of
        # disappointment
        resp = helpers.get(self.url, cf=True).text
        first_search = '$.getJSON(\'/api?m=release&id='
        last_search = '&l=\' + limit + \'&sort=\' + sort + \'&page=\' + page'

        anime_id = (resp[resp.find(first_search) +
                         len(first_search):resp.find(last_search)])

        self.params = {
            'm': 'release',
            'id': anime_id,
            'sort': 'episode_asc',
            'page': 1
        }

        resp = util.get_json(self.api_url, params=self.params)

        self._scrape_metadata(resp['data'])

        self._episode_urls = self._scrape_episodes(resp)
        self._len = len(self._episode_urls)

        return self._episode_urls
Exemplo n.º 3
0
    def _get_source(self, episode_id, server):

        # We will extract the episodes data through the animepahe api
        # which returns the available qualities and the episode sources.
        params = {'id': episode_id, 'm': 'embed', 'p': server}

        episode = util.get_json('https://animepahe.com/api', params=params)
        sources = episode['data'][episode_id]

        if self.quality in sources:
            return (server, sources[self.quality]['url'])
        return
Exemplo n.º 4
0
    def _get_sources(self):
        episode_id = self.url.rsplit('/', 1)[-1]

        # We will extract the episodes data through the animepahe api
        # which returns the available qualities and the episode sources.
        # We rely on mp4upload for animepahe as it is the most used provider.
        params = {'id': episode_id, 'm': 'embed', 'p': 'mp4upload'}

        episode = util.get_json('https://animepahe.com/api', params=params)
        sources = episode['data'][episode_id]

        if self.quality in sources:
            return [('mp4upload', sources[self.quality]['url'])]
        raise NotFoundError
Exemplo n.º 5
0
    def search(cls, query):
        params = {'l': 8, 'm': 'search', 'q': query}

        search_results = util.get_json(
            cls.api_url,
            params=params,
        )

        results = []

        for search_result in search_results['data']:
            search_result_info = SearchResult(title=search_result['title'],
                                              url=cls.base_anime_url +
                                              search_result['slug'],
                                              poster=search_result['image'])

            logger.debug(search_result_info)
            results.append(search_result_info)

        return results
Exemplo n.º 6
0
    def _scrape_episodes(self, ani_json):
        episodes = self._collect_episodes(ani_json['data'])

        if not episodes:
            raise NotFoundError(
                'No episodes found in url "{}"'.format(self.url), self.url)
        else:
            # Check if other pages exist since animepahe only loads
            # first page and make subsequent calls to the api for every
            # page
            start_page = ani_json['current_page'] + 1
            end_page = ani_json['last_page'] + 1

            for i in range(start_page, end_page):
                self.params['page'] = i
                resp = util.get_json(self.api_url, params=self.params)

                episodes = self._collect_episodes(resp['data'], episodes)

        return episodes
Exemplo n.º 7
0
    def search(cls, query):
        r = util.get_json('https://masterani.me/api/anime/filter?', {
            'search': query,
            'order': 'relevance_desc'
        })

        search_result = r['data']

        ret = []

        logging.debug('Search results')

        for item in search_result:
            s = SearchResult(
                title=item['title'],
                url='https://masterani.me/anime/info/{}'.format(item['slug']),
                poster='https://cdn.masterani.me/{}{}'.format(
                    item['poster']['path'], item['poster']['file']))
            logging.debug(s)
            ret.append(s)

        return ret
Exemplo n.º 8
0
    def search(cls, query):
        resp = util.get_json(
            cls._search_api_url,
            params={
                'keyword': query,
                'id': -1,
                'link_web': 'https://www1.gogoanime.sh/'
            }
        )

        search_results = []

        soup = BeautifulSoup(resp['content'], 'html.parser')
        for element in soup('a', class_='ss-title'):
            search_result = SearchResult(
                title=element.text,
                url=element.attrs['href'],
                poster=''
            )
            logging.debug(search_result)
            search_results.append(search_result)
        return search_results
Exemplo n.º 9
0
        def get_stream_url(base_url, params, DD=None):
            params['_'] = int(generate_(params, DD=DD))
            data = util.get_json(base_url, params=params)

            return data['target']