Пример #1
0
def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
            force_download, download_dir, file_format, provider,
            external_downloader, chunk_size, disable_ssl, fallback_qualities,
            choice):
    """ Download the anime using the url or search for it.
    """
    util.print_info(__version__)
    # TODO: Replace by factory
    cls = get_anime_class(anime_url)

    disable_ssl = cls and cls.__name__ == 'Masterani' or disable_ssl
    session.get_session().verify = not disable_ssl

    if not cls:
        anime_url = util.search(anime_url, provider, choice)
        cls = get_anime_class(anime_url)

    anime = cls(anime_url,
                quality=quality,
                fallback_qualities=fallback_qualities)
    logger.info('Found anime: {}'.format(anime.title))

    animes = util.parse_ep_str(anime, episode_range)

    # TODO:
    # Two types of plugins:
    #   - Aime plugin: Pass the whole anime
    #   - Ep plugin: Pass each episode
    if url or player:
        skip_download = True

    if download_dir and not skip_download:
        logger.info('Downloading to {}'.format(os.path.abspath(download_dir)))

    for episode in animes:
        if url:
            util.print_episodeurl(episode)

        if player:
            util.play_episode(episode, player=player)

        if not skip_download:
            if external_downloader:
                logging.info('Downloading episode {} of {}'.format(
                    episode.ep_no, anime.title))
                util.external_download(external_downloader,
                                       episode,
                                       file_format,
                                       path=download_dir)
                continue
            if chunk_size is not None:
                chunk_size *= 1e6
                chunk_size = int(chunk_size)
            with requests_cache.disabled():
                episode.download(force=force_download,
                                 path=download_dir,
                                 format=file_format,
                                 range_size=chunk_size)
            print()
Пример #2
0
    def setup_func(url: str,
                   cf: bool = False,
                   sel: bool = False,
                   referer: str = None,
                   cache: bool = True,
                   headers=None,
                   **kwargs):
        '''
        {0} performs a {0} request

        Parameters
        ----------
        url : str
            url is the url of the request to be performed
        cf : bool
            cf if True performs the request through cfscrape.
            For cloudflare protected sites.
        referer : str
            a url sent as referer in request headers
        '''
        selescrape = None
        if cf:
            sess = cf_session
        elif sel:
            try:
                from selenium import webdriver
                from anime_downloader.sites.helpers import selescrape
                sess = selescrape
            except ImportError:
                sess = cf_session
                logger.warning(
                    "This provider may not work correctly because it requires selenium to work.\nIf you want to install it then run:  'pip install selenium' ."
                )
        else:
            sess = session.get_session(cache=cache)

        if headers:
            default_headers.update(headers)
        if referer:
            default_headers['referer'] = referer

        logger.debug('-----')
        logger.debug('{} {}'.format(func.__name__.upper(), url))
        logger.debug(kwargs)
        logger.debug(default_headers)
        logger.debug('-----')

        res = sess.request(func.__name__.upper(),
                           url,
                           headers=default_headers,
                           **kwargs)

        if sess != selescrape:  # TODO fix this for selescrape too
            res.raise_for_status()
            logger.debug(res.url)
            # logger.debug(res.text)
            if logger.getEffectiveLevel() == logging.DEBUG:
                _log_response_body(res)
        return res
Пример #3
0
def get_json(url, params=None):
    logger.debug('API call URL: {} with params {!r}'.format(url, params))
    res = session.get_session().get(url, headers=desktop_headers, params=params)
    logger.debug('URL: {}'.format(res.url))
    data = res.json()
    logger.debug('Returned data: {}'.format(data))

    return data
Пример #4
0
    def check_if_exists(self):
        # Added Referer Header as kwik needd it.
        r = session.get_session().get(self.url,
                                      headers={'referer': self.referer},
                                      stream=True)

        self._total_size = int(r.headers['Content-length'])
        if os.path.exists(self.path):
            if abs(os.stat(self.path).st_size - self._total_size) < 10 \
               and not self.options['force']:
                logger.warning('File already downloaded. Skipping download.')
                return
            else:
                os.remove(self.path)
Пример #5
0
    def _get_data(self):
        url = self.url
        res = session.get_session().get(url)
        content_re = re.compile(r"= atob\('(.*?)'\)")
        source_re = re.compile(r'source src="(.*?)"')

        enc_cont = content_re.findall(res.text)[0]
        content = base64.b64decode(enc_cont).decode('utf-8')

        stream_url = source_re.findall(content)[0]

        return {
            'stream_url': stream_url,
            'meta': {
                'title': '',
                'thumbnail': '',
            }
        }
Пример #6
0
    def check_if_exists(self):
        # Added Referer Header as kwik needd it.
        headers = {
            'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Gecko/20100101Firefox/56.0",
        }
        if self.source.referer:
            headers['referer'] = self.source.referer
        r = session.get_session().get(
            self.source.stream_url, headers=headers, stream=True)

        self._total_size = int(r.headers['Content-length'])
        logger.debug('total size: ' + str(self._total_size))
        if os.path.exists(self.path):
            if abs(os.stat(self.path).st_size - self._total_size) < 10 \
               and not self.force:
                logger.warning('File already downloaded. Skipping download.')
                return
            else:
                os.remove(self.path)
Пример #7
0
    def get_data(self):
        self._episode_urls = []
        r = session.get_session().get(self.url, headers=desktop_headers)
        soup = BeautifulSoup(r.text, 'html.parser')

        try:
            self._scrape_metadata(soup)
        except Exception as e:
            logging.debug('Metadata scraping error: {}'.format(e))

        self._episode_urls = self._scarpe_episodes(soup)
        self._len = len(self._episode_urls)

        logging.debug('EPISODE IDS: length: {}, ids: {}'.format(
            self._len, self._episode_urls))

        self._episode_urls = [(no + 1, id)
                              for no, id in enumerate(self._episode_urls)]

        return self._episode_urls
Пример #8
0
    def __init__(self, source, path, force, range_size=None):
        logging.info(path)

        self.url = source.stream_url
        self.referer = source.referer
        self.path = path
        self.range_size = range_size

        util.make_dir(path.rsplit('/', 1)[0])

        self.chunksize = 16384

        #Added Referer Header as kwik needd it.
        r = session.get_session().get(self.url, headers={'referer': self.referer}, stream=True)

        self.total_size = int(r.headers['Content-length'])
        if os.path.exists(path):
            if abs(os.stat(path).st_size - self.total_size)<10 and not force:
                logging.warning('File already downloaded. Skipping download.')
                return
            else:
                os.remove(path)
Пример #9
0
from hashlib import md5
from bs4 import BeautifulSoup
import warnings

from anime_downloader import session
from anime_downloader.sites.anime import BaseAnime, BaseEpisode, SearchResult


# Don't warn if not using fuzzywuzzy[speedup]
with warnings.catch_warnings():
    warnings.simplefilter('ignore')
    from fuzzywuzzy import process

BLOCK_SIZE = 16
KEY = b"k8B$B@0L8D$tDYHGmRg98sQ7!%GOEGOX27T"
session = session.get_session()


class TwistMoeEpisode(BaseEpisode):
    QUALITIES = ['360p', '480p', '720p', '1080p']

    def _get_sources(self):
        return [('no_extractor', self.url)]


class TwistMoe(BaseAnime):
    sitename = 'twist.moe'
    QUALITIES = ['360p', '480p', '720p', '1080p']
    _episodeClass = TwistMoeEpisode
    _api_url = "https://twist.moe/api/anime/{}/sources"
Пример #10
0
def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
            force_download, download_dir, file_format, provider,
            external_downloader, chunk_size, disable_ssl, fallback_qualities, choice, skip_fillers, speed_limit):
    """ Download the anime using the url or search for it.
    """
    query = anime_url[:]

    util.print_info(__version__)
    # TODO: Replace by factory
    cls = get_anime_class(anime_url)

    disable_ssl = cls and cls.__name__ == 'Masterani' or disable_ssl
    session.get_session().verify = not disable_ssl

    if not cls:
        anime_url, _ = util.search(anime_url, provider, choice)
        cls = get_anime_class(anime_url)

    anime = cls(anime_url, quality=quality,
                fallback_qualities=fallback_qualities)
    logger.info('Found anime: {}'.format(anime.title))

    animes = util.parse_ep_str(anime, episode_range)
    if not animes:
        # Issue #508.
        raise exceptions.NotFoundError('No episodes found within index.')

    # TODO:
    # Two types of plugins:
    #   - Aime plugin: Pass the whole anime
    #   - Ep plugin: Pass each episode
    if url or player:
        skip_download = True

    if download_dir and not skip_download:
        logger.info('Downloading to {}'.format(os.path.abspath(download_dir)))
    if skip_fillers:
        fillers = util.get_filler_episodes(query)
    if speed_limit:
        logger.info("Speed is being limited to {}".format(speed_limit))
    for episode in animes:
        if skip_fillers and fillers:
            if episode.ep_no in fillers:
                logger.info("Skipping episode {} because it is a filler.".format(episode.ep_no))
                continue

        if url:
            util.print_episodeurl(episode)

        if player:
            util.play_episode(episode, player=player, title=f'{anime.title} - Episode {episode.ep_no}')

        if not skip_download:
            if external_downloader:
                logging.info('Downloading episode {} of {}'.format(
                    episode.ep_no, anime.title)
                )
                util.external_download(external_downloader, episode,
                                       file_format, speed_limit, path=download_dir)
                continue
            if chunk_size is not None:
                chunk_size *= 1e6
                chunk_size = int(chunk_size)
            with requests_cache.disabled():
                episode.download(force=force_download,
                                 path=download_dir,
                                 format=file_format,
                                 range_size=chunk_size)
            print()
Пример #11
0
def command(ctx, anime_url, episode_range, player, force_download, provider,
            skip_fillers, ratio, url, choice, download_metadata):
    """
    dl with fallback providers\n
    Will use another provider even if the chosen one fails.\n
    """

    # Borrows some config from the original dl command.
    # This can all be flags, but ezdl is made to be easy.
    fallback_qualities = Config['dl']['fallback_qualities']
    download_dir = Config['dl']['download_dir']
    quality = Config['dl']['quality']
    url = Config['dl']['url'] if not url else url
    skip_download = Config['dl']['skip_download']
    chunk_size = Config['dl']['chunk_size']
    speed_limit = Config['dl']['speed_limit']

    external_downloader = Config['dl']['external_downloader']
    file_format = Config['ezdl']['file_format']
    fallback_providers = Config['ezdl']['fallback_providers']

    query = anime_url[:]
    util.print_info(__version__)

    fallback_providers.insert(0, provider)
    # Eliminates duplicates while keeping order
    providers = sorted(set(fallback_providers), key=fallback_providers.index)

    info = animeinfo.search_anilist(query, choice)

    logger.info('Selected "{}" '.format(info.title))
    episode_count = info.episodes - 1
    # Interprets the episode range for use in a for loop.
    # 1:3 -> for _episode in range(1, 4):
    episode_range = util.parse_episode_range(episode_count, episode_range)
    episode_range_split = episode_range.split(':')
    # Issue #508.
    if episode_range_split[0] > episode_range_split[-1]:
        raise exceptions.NotFoundError('No episodes found within index.')

    # Stores the choices for each provider, to prevent re-prompting search.
    # As the current setup runs episode wise without this a 12 episode series would give 12+ prompts.
    choice_dict = {}

    # Doesn't work on nyaa since it only returns one episode.
    for episode_range in range(int(episode_range_split[0]),
                               int(episode_range_split[-1]) + 1):
        # Exits if all providers are skipped.
        if [choice_dict[i] for i in choice_dict] == [0] * len(providers):
            logger.info('All providers skipped, exiting')
            exit()

        for provider in providers:
            if not get_anime_class(provider):
                logger.info('"{}" is an invalid provider'.format(provider))
                continue

            logger.debug('Current provider: {}'.format(provider))
            # TODO: Replace by factory
            cls = get_anime_class(anime_url)

            # To make the downloads use the correct name if URL:s are used.
            real_provider = cls.sitename if cls else provider
            # This will allow for animeinfo metadata in filename and one filename for multiple providers.
            rep_dict = {
                'animeinfo_anime_title': util.slugify(info.title),
                'provider': util.slugify(real_provider),
                'anime_title': '{anime_title}',
                'ep_no': '{ep_no}'
            }
            fixed_file_format = file_format.format(**rep_dict)
            # Keeping this as I don't know the impact of removing it.
            # It's False by default in normal dl.
            disable_ssl = False
            session.get_session().verify = not disable_ssl

            # This is just to make choices in providers presistent between searches.
            choice_provider = choice_dict.get(provider)

            if not cls:
                _anime_url, choice_provider = util.search(anime_url,
                                                          provider,
                                                          val=choice_provider,
                                                          season_info=info,
                                                          ratio=ratio)
                choice_dict[provider] = choice_provider
                if choice_provider == 0 or not _anime_url:
                    logger.info('Skipped')
                    continue

                cls = get_anime_class(_anime_url)

            try:
                anime = cls(_anime_url,
                            quality=quality,
                            fallback_qualities=fallback_qualities)
            # I have yet to investigate all errors this can output
            # No sources found gives error which exits the script
            except:
                continue

            logger.debug('Found anime: {}'.format(anime.title))

            try:
                animes = util.parse_ep_str(anime, str(episode_range))
            except RuntimeError:
                logger.error(
                    'No episode found with index {}'.format(episode_range))
                continue
            except:
                logger.error('Unknown provider error')
                continue

            # TODO:
            # Two types of plugins:
            #   - Aime plugin: Pass the whole anime
            #   - Ep plugin: Pass each episode
            if url or player:
                skip_download = True

            if download_dir and not skip_download:
                logger.info('Downloading to {}'.format(
                    os.path.abspath(download_dir)))
            if skip_fillers:
                fillers = util.get_filler_episodes(query)
            for episode in animes:
                if skip_fillers and fillers:
                    if episode.ep_no in fillers:
                        logger.info(
                            "Skipping episode {} because it is a filler.".
                            format(episode.ep_no))
                        continue

                if download_metadata:
                    util.download_metadata(fixed_file_format, info.metadata,
                                           episode)

                if url:
                    util.print_episodeurl(episode)

                if player:
                    util.play_episode(
                        episode,
                        player=player,
                        title=f'{anime.title} - Episode {episode.ep_no}')

                if not skip_download:
                    if external_downloader:
                        logging.info('Downloading episode {} of {}'.format(
                            episode.ep_no, anime.title))
                        util.external_download(external_downloader,
                                               episode,
                                               fixed_file_format,
                                               path=download_dir,
                                               speed_limit=speed_limit)
                        continue
                    if chunk_size is not None:
                        chunk_size = int(chunk_size)
                        chunk_size *= 1e6
                    with requests_cache.disabled():
                        episode.download(force=force_download,
                                         path=download_dir,
                                         format=fixed_file_format,
                                         range_size=chunk_size)
                    print()

            # If it's all successfull proceeds to next ep instead of looping.
            break
Пример #12
0
import cfscrape
from bs4 import BeautifulSoup
import logging

from anime_downloader.sites.anime import BaseAnime
from anime_downloader.const import get_random_header
from anime_downloader.session import get_session

scraper = get_session(cfscrape.create_scraper())


class BaseAnimeCF(BaseAnime):
    def get_data(self):
        headers = get_random_header()
        if hasattr(self, '_referer'):
            headers['referer'] = self._referer

        r = scraper.get(self.url, headers=get_random_header())
        soup = BeautifulSoup(r.text, 'html.parser')

        self._scrape_metadata(soup)

        self._episode_urls = self._scarpe_episodes(soup)
        self._len = len(self._episode_urls)

        logging.debug('EPISODE IDS: length: {}, ids: {}'.format(
            self._len, self._episode_urls))

        self._episode_urls = [(no + 1, id)
                              for no, id in enumerate(self._episode_urls)]
Пример #13
0
def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
            force_download, download_dir, file_format, provider,
            external_downloader, chunk_size, disable_ssl, fallback_qualities,
            choice, skip_fillers, speed_limit, sub, dub):
    """ Download the anime using the url or search for it.
    """
    """if episode_range:
        regexed_range = re.compile("^:?(\d+)?:?(\d+)?$").search(episode_range)
        # Prevent such cases as: :5: and :1:1
        if not regexed_range or (len(regexed_range.groups()) >= episode_range.count(":") and episode_range.count(":") != 1):
            raise click.UsageError(
                "Invalid value for '--episode' / '-e': {} is not a valid range".format(episode_range))
"""
    if sub and dub:
        raise click.UsageError(
            "--dub/-d and --sub/-s flags cannot be used together")

    query = anime_url[:]

    util.print_info(__version__)
    # TODO: Replace by factory
    cls = get_anime_class(anime_url)

    disable_ssl = cls and cls.__name__ == 'Masterani' or disable_ssl
    session.get_session().verify = not disable_ssl

    if not cls:
        anime_url, _ = util.search(anime_url, provider, choice)
        cls = get_anime_class(anime_url)

    subbed = None

    if sub or dub:
        subbed = subbed is not None

    anime = cls(anime_url,
                quality=quality,
                fallback_qualities=fallback_qualities,
                subbed=subbed)
    logger.info('Found anime: {}'.format(anime.title))

    animes = util.parse_ep_str(anime, episode_range)
    if not animes:
        # Issue #508.
        raise exceptions.NotFoundError('No episodes found within index.')

    # TODO:
    # Two types of plugins:
    #   - Aime plugin: Pass the whole anime
    #   - Ep plugin: Pass each episode
    if url or player:
        skip_download = True

    if download_dir and not skip_download:
        logger.info('Downloading to {}'.format(os.path.abspath(download_dir)))
    if skip_fillers:
        fillers = util.get_filler_episodes(query)
    if speed_limit:
        logger.info("Speed is being limited to {}".format(speed_limit))
    for episode in animes:
        if skip_fillers and fillers:
            if episode.ep_no in fillers:
                logger.info(
                    "Skipping episode {} because it is a filler.".format(
                        episode.ep_no))
                continue

        if url:
            util.print_episodeurl(episode)

        if player:
            util.play_episode(episode,
                              player=player,
                              title=f'{anime.title} - Episode {episode.ep_no}')

        if not skip_download:
            if external_downloader:
                logging.info('Downloading episode {} of {}'.format(
                    episode.ep_no, anime.title))
                util.external_download(external_downloader,
                                       episode,
                                       file_format,
                                       speed_limit,
                                       path=download_dir)
                continue
            if chunk_size is not None:
                chunk_size *= 1e6
                chunk_size = int(chunk_size)
            with requests_cache.disabled():
                episode.download(force=force_download,
                                 path=download_dir,
                                 format=file_format,
                                 range_size=chunk_size)
            print()
Пример #14
0
def dl(ctx, anime_url, episode_range, url, player, skip_download, quality,
       force_download, log_level, download_dir, file_format, provider,
       external_downloader, chunk_size, disable_ssl, fallback_qualities):
    """ Download the anime using the url or search for it.
    """

    util.setup_logger(log_level)
    util.print_info(__version__)

    cls = get_anime_class(anime_url)

    disable_ssl = cls and cls.__name__ == 'Masterani' or disable_ssl
    session.get_session().verify = not disable_ssl

    if not cls:
        anime_url = util.search(anime_url, provider)
        cls = get_anime_class(anime_url)

    try:
        anime = cls(anime_url,
                    quality=quality,
                    fallback_qualities=fallback_qualities)
    except Exception as e:
        if log_level != 'DEBUG':
            echo(click.style(str(e), fg='red'))
        else:
            raise
        return

    logging.info('Found anime: {}'.format(anime.title))

    anime = util.parse_ep_str(anime, episode_range)

    if url or player:
        skip_download = True

    if download_dir and not skip_download:
        logging.info('Downloading to {}'.format(os.path.abspath(download_dir)))

    for episode in anime:
        if url:
            util.print_episodeurl(episode)

        if player:
            util.play_episode(episode, player=player)

        if not skip_download:
            if external_downloader:
                logging.info('Downloading episode {} of {}'.format(
                    episode.ep_no, anime.title))
                util.external_download(external_downloader,
                                       episode,
                                       file_format,
                                       path=download_dir)
                continue
            if chunk_size is not None:
                chunk_size *= 1e6
                chunk_size = int(chunk_size)
            episode.download(force=force_download,
                             path=download_dir,
                             format=file_format,
                             range_size=chunk_size)
            print()
Пример #15
0
import cfscrape
from bs4 import BeautifulSoup
import re
import logging

from anime_downloader.sites.anime import BaseEpisode, SearchResult
from anime_downloader.sites.baseanimecf import BaseAnimeCF
from anime_downloader.sites.exceptions import NotFoundError
from anime_downloader.const import get_random_header
from anime_downloader.session import get_session

scraper = get_session(cfscrape.create_scraper(delay=10))


class KissanimeEpisode(BaseEpisode):
    QUALITIES = ['360p', '480p', '720p', '1080p']
    _base_url = 'http://kissanime.ru'
    VERIFY_HUMAN = True

    def _get_sources(self):
        episode_url = self.url + '&s=rapidvideo'
        logging.debug('Calling url: {}'.format(episode_url))

        ret = scraper.get(episode_url)
        data = self._scrape_episode(ret)

        return data

    def _scrape_episode(self, response):
        rapid_re = re.compile(r'iframe.*src="https://(.*?)"')
        rapid_url = rapid_re.findall(response.text)[0]
Пример #16
0
from anime_downloader import session
from anime_downloader.sites.kissanime import KissAnime
from anime_downloader.sites.anime import BaseEpisode, SearchResult
from anime_downloader.sites.exceptions import NotFoundError
from anime_downloader.const import desktop_headers, get_random_header

from bs4 import BeautifulSoup
import cfscrape
import logging

scraper = session.get_session(cfscrape.create_scraper())
session = session.get_session()


class KisscartoonEpisode(BaseEpisode):
    _base_url = ''
    VERIFY_HUMAN = False
    _episode_list_url = 'https://kisscartoon.ac/ajax/anime/load_episodes'
    QUALITIES = ['720p']

    def _get_sources(self):
        params = {
            'v': '1.1',
            'episode_id': self.url.split('id=')[-1],
        }
        headers = desktop_headers
        headers['referer'] = self.url
        res = session.get(self._episode_list_url,
                          params=params,
                          headers=headers)
        url = res.json()['value']