Esempio n. 1
0
    def search(self):
        html = requests.post('http://animehaven.org/wp-admin/admin-ajax.php',
                             data={
                                 'action': 'search_ajax',
                                 'keyword': self.media.metadata['name']
                             }).text

        html = html.replace('\\n', '')
        html = html.replace('\\t', '')
        html = html.replace('\\', '')

        soup = BeautifulSoup(html, 'html.parser')

        results = []

        for result in soup.find_all('div', {'class': 'sa_post'}):
            title_block = result.find('h6')
            link = title_block.find('a')  # The first one is the good one
            title, href = link.get('title'), link.get('href')

            self.logger.debug('Found block {} ({})'.format(title, href))

            versions_soup = self._get(href)

            versions = list(
                ('Sub' if 'sub' in x.text.lower() else 'Dub', x.get('href'))
                for x in versions_soup.find_all('a', {'class': 'ah_button'}))

            for version, url in versions:
                self.logger.debug('-> Found version {}'.format(url))
                results.append(('{} ({})'.format(title, version), url))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 2
0
    def search(self):
        # Fetch all the pages
        page = 0
        pages_count = 1
        results = []

        while page < pages_count:
            response = requests.post(
                'https://sjzdlecc2z-dsn.algolia.net/1/indexes/Anime/query?x-'
                'algolia-agent=Algolia%20for%20vanilla%20JavaScript%203.14.6&x'
                '-algolia-application-id=SJZDLECC2Z&x-algolia-api-key='
                'caa4878bcf6f8aea380e73b5840ff22b',
                json={
                    'params':
                    urllib.parse.urlencode({
                        'query': self.media.metadata['name'],
                        'hitsPerPage': 10,
                        'page': page
                    })
                }).json()

            if 'status' in response and response['status'] == 403:
                return []

            page += 1

            pages_count = int(response['nbPages'])

            for hit in response['hits']:
                results.append((hit['title'], hit['slug']))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 3
0
    def search(self):
        # You actually really need to lower the query, else the search method
        # just returns an empty list every time.
        soup = self._get('http://anilinkz.tv/search',
                         params={'q': self.media.metadata['name'].lower()})

        return SearchResult.from_links(
            self.media, soup.select('ul#seariessearchlist a[title]'))
Esempio n. 4
0
    def search(self):
        self._disable_cloudflare()

        soup = self._post('http://kissanime.to/Search/SearchSuggest', data={
            'type': 'Anime',
            'keyword': self.media.metadata['name']
        })

        return SearchResult.from_links(self.media, soup.find_all('a'))
Esempio n. 5
0
    def search(self):
        html = requests.post(self._get_url() + 'ajax/suggest_search',
                             data={
                                 'keyword': self.media.metadata['name']
                             }).json()['content']

        soup = BeautifulSoup(html, 'html.parser')
        return SearchResult.from_links(
            self.media, soup.find_all('a', {'class': 'ss-title'}))
Esempio n. 6
0
    def search(self):
        # The manual encoding is intentionnal: the server does not handle
        # encoded requests.
        soup = self._post(
            'http://chiaanime.co/SearchSuggest/index.php',
            params='type=Anime&keyword={}'.format(self.media.metadata['name']),
        )

        return SearchResult.from_links(self.media, soup.find_all('a'))
Esempio n. 7
0
    def search(self):
        soup = self._get('http://m.chia-anime.tv/catlist.php',
                         data={'tags': self.media.metadata['name']})

        results = []

        for link in soup.select('div.title > a'):
            results.append(
                (link.text, ('http://m.chia-anime.tv' + link.get('href'))))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 8
0
    def search(self):
        data = self.session.get(
            'http://www.masterani.me/api/anime-search', params={
                'keyword': self.media.metadata['name']
            }
        ).json()

        results = []

        for item in data:
            results.append((item['title'], (item['id'], item['slug'])))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 9
0
    def search(self):
        soup = self._get('http://moetube.net/searchapi.php', params={
            'page': 1,
            'keyword': self.media.metadata['name']
        })

        results = []

        for link in soup.select('.series a'):
            title = link.find('div', {'id': 'stitle'}).text
            results.append((title, link.get('href')))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 10
0
    def search(self):
        soup = self._get('http://www.icefilms.info/search.php',
                         params={
                             'q': self.media.metadata['name'],
                             'x': 0,
                             'y': 0
                         })

        results = []

        for result in soup.select('.title a'):
            results.append((result.text, result.get('href')))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 11
0
    def search(self):
        self._disable_cloudflare()

        soup = self._post('http://gogoanime.io/site/loadSearch', data={
            'data': self.media.metadata['name'],
            'id': '-1'
        })

        results = []

        for item in soup.select('#header_search_autocomplete_body > div'):
            link = item.find('a')
            results.append((link.text.strip(), link.get('href')))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 12
0
    def search(self):
        html = requests.get('http://rawranime.tv/index.php',
                            params={
                                'ajax': 'anime',
                                'do': 'search',
                                's': self.media.metadata['name']
                            }).text.replace('\\', '')

        soup = BeautifulSoup(html, 'html.parser')

        results = []

        for link in soup.find_all('a'):
            title = link.find('div', {'class': 'quicksearch-title'}).text
            id = link.get('href')  # With a leading slash

            results += [(title + ' (Sub)', ('Subbed', id)),
                        (title + ' (Dub)', ('Dubbed', id))]

        return SearchResult.from_tuples(self.media, results)
Esempio n. 13
0
    def search(self):
        soup = self._get('http://www.animechiby.com', params={
            's': self.media.metadata['name']
        })

        # Those are only 'top-level' links. The site organize contents in such
        # a way that the users can post an item, and in this item, multiple
        # medias can be found. We wanna fetch all those links.
        links = soup.select('.post h2 > a')

        # Well, the website uses window.open instead of href, for no apparent
        # reason.
        onclick_regex = r'window\.open\([\'|\'](.+)[\'|\']\);return false;'

        results = []

        for link in links:
            link_soup = self._get(link.get('href'))

            # We get all the available sub-links for each link
            for section in link_soup.select('.su-spoiler'):
                # The section title
                sub_title = section.select('.su-spoiler-title')[0].text

                # We get all the sources.
                available_sources = list((
                    x.get('value'),
                    re.search(onclick_regex, x.get('onclick')).group(1)
                ) for x in section.find_all('input'))

                self.logger.debug('Section {} has {} sources'.format(
                    sub_title, len(available_sources)
                ))

                # We determine the section title, that is the top-level title
                # concatenated with the section title.
                block_title = '-> {} ({})'.format(link.text, sub_title)

                results.append((block_title, available_sources))

        return SearchResult.from_tuples(self.media, results)
Esempio n. 14
0
    def search(self):
        soup = self._get('http://beta.animefrost.tv',
                         params={'s': self.media.metadata['name']})

        return SearchResult.from_links(self.media, soup.select('.wrap a'))
Esempio n. 15
0
    def search(self):
        {% if disable_cloudflare %}self._disable_cloudflare()

        {% endif %}soup = self._(self._get_url() + '')

        return SearchResult.from_links(self.media, soup.find_all('a'))