Пример #1
0
    def search(cls, query):
        soup = helpers.soupify(
            helpers.post('https://kissanime.ru/Search/Anime',
                         data=dict(keyword=query),
                         referer=cls._referer,
                         cf=True))

        # If only one anime found, kissanime redirects to anime page.
        # We don't want that
        if soup.title.text.strip().lower() != "find anime":
            return [
                SearchResult(
                    title=soup.find('a', 'bigChar').text,
                    url='https://kissanime.ru' +
                    soup.find('a', 'bigChar').get('href'),
                    poster='',
                )
            ]

        searched = [s for i, s in enumerate(soup.find_all('td')) if not i % 2]

        ret = []
        for res in searched:
            res = SearchResult(
                title=res.text.strip(),
                url='https://kissanime.ru' + res.find('a').get('href'),
                poster='',
            )
            logger.debug(res)
            ret.append(res)

        return ret
Пример #2
0
    def search(cls, query):
        sel = helpers.get("https://kissanime.ru",sel=True)
        cookies = sel.cookies
        agent = sel.user_agent # Note that the user agent must be the same as the one which generated the cookies
        cookies = {c['name']: c['value'] for c in cookies}
        soup = helpers.soupify((helpers.post("https://kissanime.ru/Search/Anime", headers = {
            "User-Agent": agent,
            "Referer": "https://kissanime.ru/Search/Anime"
            },data = {"keyword": query},cookies=cookies)))

        # If only one anime found, kissanime redirects to anime page.
        # We don't want that
        if soup.title.text.strip().lower() != "find anime":
            return [SearchResult(
                title=soup.find('a', 'bigChar').text,
                url=cls.domain +
                    soup.find('a', 'bigChar').get('href'),
                poster='',
            )]

        searched = [s for i, s in enumerate(soup.find_all('td')) if not i % 2]

        ret = []
        for res in searched:
            res = SearchResult(
                title=res.text.strip(),
                url=cls.domain + res.find('a').get('href'),
                poster='',
            )
            logger.debug(res)
            ret.append(res)

        return ret
Пример #3
0
    def search(cls, query):
        res = scraper.post(
            'http://kissanime.ru/Search/Anime',
            data={
                'type': 'Anime',
                'keyword': query,
            },
            headers=desktop_headers,
        )

        soup = BeautifulSoup(res.text, 'html.parser')

        if soup.title.text.strip().lower() != "find anime":
            return [
                SearchResult(
                    title=soup.find('a', 'bigChar').text,
                    url='https://kissanime.ru' +
                    soup.find('a', 'bigChar').get('href'),
                    poster='',
                )
            ]

        searched = [s for i, s in enumerate(soup.find_all('td')) if not i % 2]

        ret = []
        for res in searched:
            res = SearchResult(
                title=res.text.strip(),
                url='https://kissanime.ru' + res.find('a').get('href'),
                poster='',
            )
            logging.debug(res)
            ret.append(res)

        return ret
Пример #4
0
    def search(cls, query):
        r = session.get('https://www4.9anime.is/search?',
                        params={'keyword': query},
                        headers=desktop_headers)

        logging.debug(r.url)

        soup = BeautifulSoup(r.text, 'html.parser')

        search_results = soup.find('div', {
            'class': 'film-list'
        }).find_all('div', {'class': 'item'})

        ret = []

        logging.debug('Search results')

        for item in search_results:
            s = SearchResult(title=item.find('a', {
                'class': 'name'
            }).contents[0],
                             url=item.find('a')['href'],
                             poster=item.find('img')['src'])
            meta = dict()
            m = item.find('div', {'class': 'status'})
            for item in m.find_all('div'):
                meta[item.attrs['class'][0]] = item.text.strip()
            s.meta = meta
            logging.debug(s)
            ret.append(s)

        return ret
Пример #5
0
    def search(cls, query):
        r = requests.get('https://www4.9anime.is/search?',
                         params={'keyword': query})

        logging.debug(r.url)

        soup = BeautifulSoup(r.text, 'html.parser')

        # 9anime has search result in
        # <div class="item">
        #   <div class="inner">
        #    <a href="https://www4.9anime.is/watch/dragon-ball-super.7jly"
        #       class="poster tooltipstered" data-tip="ajax/film/tooltip/7jly?5827f020">
        #       <img src="http://static.akacdn.ru/static/images/2018/03/43012fe439631a2cecfcf248841e15f7.jpg"
        #            alt="Dragon Ball Super">
        #       <div class="status">
        #           <span class="bar">
        #           </span>
        #           <div class="ep"> Ep 131/131 </div>
        #       </div>
        #     </a>
        #    <a href="https://www4.9anime.is/watch/dragon-ball-super.7jly"
        #      data-jtitle="Dragon Ball Super"
        #      class="name">
        #           Dragon Ball Super
        #    </a>
        #   </div>
        # </div>

        search_results = soup.find('div', {
            'class': 'film-list'
        }).find_all('div', {'class': 'item'})

        ret = []

        logging.debug('Search results')

        for item in search_results:
            s = SearchResult(title=item.find('a', {
                'class': 'name'
            }).contents[0],
                             url=item.find('a')['href'],
                             poster=item.find('img')['src'])
            meta = dict()
            m = item.find('div', {'class': 'status'})
            for item in m.find_all('div'):
                meta[item.attrs['class'][0]] = item.text.strip()
            s.meta = meta
            logging.debug(s)
            ret.append(s)

        return ret
Пример #6
0
 def search(query):
     soup = helpers.soupify(helpers.get('https://myanimelist.net/anime.php', params={'q': query}))
     search_results = soup.select("a.hoverinfo_trigger.fw-b.fl-l")
     return [SearchResult(
         url=i.get('href'),
         title=i.select('strong')[0].text
     ) for i in search_results]
Пример #7
0
    def search(cls, query):
        filters = {"No filter": 0, "No remakes": 1, "Trusted only": 2}
        categories = {
            "Anime Music Video": "1_1",
            "English-translated": "1_2",
            "Non-English-translated": "1_3"
        }

        rex = r'(magnet:)+[^"]*'
        self = cls()

        parameters = {
            "f": filters[self.config["filter"]],
            "c": categories[self.config["category"]],
            "q": query,
            "s": "size",
            "o": "desc"
        }
        search_results = helpers.soupify(
            helpers.get(f"https://nyaa.si/", params=parameters))

        search_results = [
            SearchResult(
                title=i.select("a:not(.comments)")[1].get("title"),
                url=i.find_all('a', {'href': re.compile(rex)})[0].get('href'),
                meta={
                    'peers':
                    i.find_all('td', class_='text-center')[3].text + ' peers',
                    'size': i.find_all('td', class_='text-center')[1].text
                }) for i in search_results.select("tr.default, tr.success")
        ]

        return search_results
Пример #8
0
    def search(cls, query):
        """
        #Use below code for live ajax search.
        #Will show max 10 search results

        search_results = helpers.get('https://vidstreaming.io/ajax-search.html', 
            params = {'keyword': query},
            headers = {
                'X-Requested-With':'XMLHttpRequest',
            }
        ).json()
        search_results = helpers.soupify(search_results['content']).select('li > a')
        return [
            SearchResult(
                title=i.text,
                url=f"https://vidstreaming.io{i.get('href')}")
            for i in search_results
        ]
        """
        # Only using page 1, resulting in max 30 results
        # Very few shows will get impacted by this
        search_results = helpers.soupify(
            helpers.get('https://vidstreaming.io/search.html',
                        params={'keyword': query
                                })).select('ul.listing > li.video-block > a')
        # Regex to cut out the "Episode xxx"

        return [
            SearchResult(title=re.sub(r"(E|e)pisode\s*[0-9]*", '',
                                      i.select('div.name')[0].text.strip()),
                         url=f"https://vidstreaming.io{i.get('href')}",
                         meta_info={'version_key_dubbed': '(Dub)'})
            for i in search_results
        ]
Пример #9
0
 def search(self, query):
     headers = {
         'user-agent':
         'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/605.1.15 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/605.1.15',
         'x-access-token': '0df14814b9e590a1f26d3071a4ed7974'
     }
     # soup = helpers.soupify(helpers.get('https://twist.moe/', allow_redirects=True, headers=headers))
     req = helpers.get('https://twist.moe/api/anime', headers=headers)
     if 'being redirected' in req.text:
         logger.debug('Tring to extract cookie')
         cookie = get_cookie(req)
         logger.debug('Got cookie: ' + cookie)
         headers['cookie'] = cookie
         # XXX: Can't use helpers.get here becuse that one is cached. Investigate
         req = helpers.get('https://twist.moe/api/anime', headers=headers)
     all_anime = req.json()
     animes = []
     for anime in all_anime:
         animes.append(
             SearchResult(
                 title=anime['title'],
                 url='https://twist.moe/a/' + anime['slug']['slug'] + '/',
                 poster=
                 f"https://media.kitsu.io/anime/poster_images/{anime['hb_id']}/large.jpg"
             ))
     animes = [ani[0] for ani in process.extract(query, animes)]
     return animes
Пример #10
0
 def search(cls, query):
     soup = helpers.soupify(helpers.get(f"{cls.DOMAIN}browse?q={query}"))
     results = [
         SearchResult(title=v.h3.text, url=cls.DOMAIN + v.a['href'], poster=v.img['src'])
         for v in soup.select('ul.ListAnimes > li')
     ]
     return results
Пример #11
0
 def search(self, query):
     headers = {
         'user-agent':
         'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.46 Safari/537.36'
     }
     first_time = helpers.soupify(
         helpers.get('https://twist.moe',
                     allow_redirects=True,
                     headers=headers))
     js = first_time.select_one('script').text
     js = "location = {'reload': ()=>true};document = {}; \n" + js + f"console.log(document.cookie)"
     cookie = eval_in_node(js).strip()
     with requests_cache.disabled():
         headers['cookie'] = cookie
         r = requests.get('https://twist.moe/', headers=headers)
         soup = helpers.soupify(r)
     all_anime = soup.select_one('nav.series').select('li')
     animes = []
     for anime in all_anime:
         animes.append(
             SearchResult(
                 title=anime.find('span').contents[0].strip(),
                 url='https://twist.moe' + anime.find('a')['href'],
             ))
     animes = [ani[0] for ani in process.extract(query, animes)]
     return animes
Пример #12
0
    def search(cls, query):

        # Get cookies for the site if we havent already
        if not cls.cookies or not cls.agent:
            cls._get_cookies(cls.url)

        # Only uses the first page of search results, but it's sufficent.
        search_results = helpers.soupify(
            helpers.get(cls.search_url,
                        params={'keyword': query},
                        headers={
                            "User-Agent": cls.agent,
                        },
                        cookies=cls.cookies)).select('a.name')
        search_results = [
            SearchResult(title=i.text,
                         url=cls.url + i.get('href')
                         if i.get("href").startswith("/") else i.get("href"),
                         meta_info={
                             'version_key_dubbed': '(Dub)',
                             'version_key_subbed': ''
                         }) for i in search_results
        ]

        return search_results
Пример #13
0
    def search(cls, query):
        search_results = helpers.post(
            f'https://ww5.dubbedanime.net/ajax/paginate',
            data={
                'query[search]': query,
                'what': 'query',
                'model': 'Anime',
                'size': 30,
                'letter': 'all',
            }).json()

        title_data = {'data': []}
        for a in range(len(search_results['results'])):
            url = cls.url + search_results['results'][a]['url']
            title = search_results['results'][a]['title']
            data = {
                'url': url,
                'title': title,
            }
            title_data['data'].append(data)

        search_results = [
            SearchResult(title=result["title"], url=result["url"])
            for result in title_data.get('data', [])
        ]
        return (search_results)
Пример #14
0
 def search(self, query):
     headers = {
         'user-agent':
         'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.46 Safari/537.36',
         'x-access-token': '1rj2vRtegS8Y60B3w3qNZm5T2Q0TN2NR'
     }
     soup = helpers.soupify(
         helpers.get('https://twist.moe/',
                     allow_redirects=True,
                     headers=headers))
     if 'being redirected' in soup.text:
         logger.debug('Tring to extract cookie')
         cookie = get_cookie(soup)
         logger.debug('Got cookie: ' + cookie)
         headers['cookie'] = cookie
         # XXX: Can't use helpers.get here becuse that one is cached. Investigate
         r = helpers.get('https://twist.moe/api/anime', headers=headers)
         all_anime = r.json()
     animes = []
     for anime in all_anime:
         animes.append(
             SearchResult(
                 title=anime['title'],
                 url='https://twist.moe/a/' + anime['slug']['slug'] + '/',
             ))
     animes = [ani[0] for ani in process.extract(query, animes)]
     return animes
Пример #15
0
    def search(cls, query):
        # V3 not supported
        v1 = helpers.soupify(helpers.post("https://v1.nmtvjxdtx42qdwktdxjfoikjq.workers.dev/",
                                          data={"q2": query}, verify=False).json()['result']).select('p.name > a')
        v5 = helpers.soupify(helpers.post("https://animixplay.to/api/search/",
                                          data={"qfast": query}, verify=False).json()['result']).select('p.name > a')

        # v3 = helpers.soupify(helpers.post("https://v3.w0ltfgqz8y3ygjozgs4v.workers.dev/",
        #                                  data={"q3": query}, verify=False).json()['result'])

        # Gives 400 error on v3 and v4 if there's no results.
        # HTTPError doesn't seem to play along helpers hence why it's not expected.

        try:
            v4 = helpers.soupify(helpers.post("https://v4.w0ltfgqz8y3ygjozgs4v.workers.dev/",
                                              data={"q": query}, verify=False).json()['result']).select('p.name > a')
        except:
            v4 = []

        # Meta will be name of the key in versions_dict
        versions_dict = {'v1': v1, 'v4': v4, 'v5': v5}
        logger.debug('Versions: {}'.format(versions_dict))
        data = []
        for i in versions_dict:
            for j in versions_dict[i]:
                data.append(SearchResult(
                    title=j.text,
                    url='https://animixplay.com' + j.get('href'),
                    meta={'version': i},
                    meta_info={
                        'version_key_dubbed': '(Dub)',
                    }
                ))

        return data
Пример #16
0
 def search(cls, query):
     cls.token = get_token()
     params = {
         'search': query,
         'token': cls.token
     }
     results = helpers.get('https://ani.api-web.site/advanced', params=params).json()['data']  # noqa
     if 'nav' in results:
         results = results['nav']['currentPage']['items']
         search_results = [
             SearchResult(
                 title=i['name'],
                 url='https://shiro.is/anime/' + i['slug'],
                 poster='https://ani-cdn.api-web.site/' + i['image'],
                 meta={'year': i['year']},
                 meta_info={
                     'version_key_dubbed': '(Sub)' if i['language'] == 'subbed' else '(Dub)'  # noqa
                 }
             )
             for i in results
         ]
         search_results = sorted(search_results, key=lambda x: int(x.meta['year']))
         return search_results
     else:
         return []
Пример #17
0
 def search(cls, query):
     search_results = helpers.soupify(helpers.get(cls.url, params={'q': query})).select('div.card-body > div > a')
     return [
         SearchResult(
             title=i.get('title') if i.get('title') else i.select('img')[0].get('alt'),
             url=i.get('href'))
         for i in search_results
     ]
Пример #18
0
 def search(cls, query):
     # Only uses the first page of search results, but it's sufficent.
     search_results = helpers.soupify(
         helpers.get(cls.url, params={'keyword': query})).select('a.name')
     return [
         SearchResult(title=i.text, url=i.get('href'))
         for i in search_results
     ]
Пример #19
0
 def search(self, query):
     soup = helpers.soupify(
         helpers.get(f"https://anistream.xyz/search?term={query}"))
     results = soup.select_one('.card-body').select('a')
     results = [
         SearchResult(title=v.text, url=v.attrs['href']) for v in results
     ]
     return results
Пример #20
0
 def search(cls, query):
     search_results = helpers.soupify(
         helpers.get(cls.url, params={'s':
                                      query})).select('h3.post-title > a')
     return [
         SearchResult(title=i.text, url=i.get('href'))
         for i in search_results
     ]
Пример #21
0
 def search(cls, query):
     search_results = helpers.soupify(
         helpers.get(cls.url, params={'q': query})).select('article > a')
     search_results = [
         SearchResult(title=a.select('h3')[0].text,
                      url='https://animedaisuki.moe' + a.get('href'))
         for a in search_results
     ]
     return (search_results)
Пример #22
0
 def search(cls, query):
     soup = helpers.soupify(helpers.post('https://vostfree.com', data={'do': 'search', 'subaction': 'search', 'story': query}))
     return [
         SearchResult(
             title=re.sub('\s+?FRENCH(\s+)?$', '', x.text.strip()),
             url=x['href']
         )
         for x in soup.select('div.title > a')
     ]
Пример #23
0
 def search(cls, query):
     search_results = helpers.soupify(helpers.get(f'{cls.url}/search?url=search&q={query}')).select('div.main-con > a')
     search_results = [
         SearchResult(
             title=search_results[a].get('title'),
             url=cls.url + search_results[a].get('href'))
         for a in range(len(search_results))
     ]
     return(search_results)
Пример #24
0
 def search(cls, query):
     search_results = helpers.soupify(
         helpers.get(cls.url,
                     params={'s':
                             query})).select('h5.title-av-search-res > a')
     return [
         SearchResult(title=a.text, url=a.get('href'))
         for a in search_results
     ]
Пример #25
0
 def search(cls, query):
     # Be aware of CSS selectors changing.
     search_results = helpers.soupify(
         helpers.get(cls.url, params={'q': query})).select('div.card > a')
     return [
         SearchResult(title=i.get('title')
                      if i.get('title') else i.select('img')[0].get('alt'),
                      url=i.get('href')) for i in search_results
     ]
Пример #26
0
 def search(cls, query):
     search_results = helpers.soupify(
         helpers.get(cls.url,
                     params={'s': query})).select('div.item-head > h3 > a')
     search_results = [
         SearchResult(title=i.text, url=i.get('href'))
         for i in search_results
     ]
     return search_results
Пример #27
0
 def search(cls, query):
     search_results = helpers.soupify(
         helpers.get(cls.url, params={'keyword':
                                      query})).select('div.ml-item > a')
     return [
         SearchResult(title=i.get('title'),
                      url=i.get('href') + '/watching.html')
         for i in search_results
     ]
Пример #28
0
 def search(cls, query):
     soup = helpers.soupify(
         helpers.get(f"{cls.DOMAIN}/search/", params={'q': query}))
     results = [
         SearchResult(title=t.text,
                      url=cls.DOMAIN + t.attrs['href'],
                      poster=t.img.attrs.get('data-src', None))
         for t in soup.select('.preview > a')
     ]
     return results
Пример #29
0
    def search(cls, query):
        search_results = helpers.soupify(
            helpers.get(cls.url + '/search.html',
                        params={'keyword': query})).select('a.videoHname')

        search_results = [
            SearchResult(title=a.get('title'), url=cls.url + a.get('href'))
            for a in search_results
        ]
        return (search_results)
Пример #30
0
 def search(cls, query):
     r = helpers.get("https://animefrenzy.net/search", params={"q": query})
     soup = helpers.soupify(r)
     titleName = soup.select("div.conm > a.cona")
     search_results = [
         SearchResult(title=a.text,
                      url='https://animefrenzy.net/' + a.get('href'))
         for a in titleName
     ]
     return (search_results)