Beispiel #1
0
class EdXBrowser(object):
    def __init__(self, config):
        self._br = mechanize.Browser()
        self._cj = mechanize.LWPCookieJar()
        csrftoken = makeCsrf()
        self._cj.set_cookie(csrfCookie(csrftoken))
        self._br.set_handle_robots(False)
        self._br.set_cookiejar(self._cj)
        self._br.addheaders.append(('X-CSRFToken',csrftoken))
        self._br.addheaders.append(('Referer',base_url))
        self._logged_in = False
        self._fd = YoutubeDL(config.YDL_PARAMS)
        self._fd.add_info_extractor(YoutubeIE())
        self._config = config

    def login(self):
        try:
            login_resp = self._br.open(base_url + login_url, urlencode({'email':self._config.EMAIL, 'password':self._config.PASSWORD}))
            login_state = json.loads(login_resp.read())
            self._logged_in = login_state.get('success')
            if not self._logged_in:
                print login_state.get('value')
            return self._logged_in
        except mechanize.HTTPError, e:
            sys.exit('Can\'t sign in')
Beispiel #2
0
class EdXBrowser(object):
    def __init__(self, config):
        self._br = mechanize.Browser()
        self._cj = mechanize.LWPCookieJar()
        csrftoken = makeCsrf()
        self._cj.set_cookie(csrfCookie(csrftoken))
        self._br.set_handle_robots(False)
        self._br.set_cookiejar(self._cj)
        self._br.addheaders.append(('X-CSRFToken', csrftoken))
        self._br.addheaders.append(('Referer', base_url))
        self._logged_in = False
        self._fd = YoutubeDL(config.YDL_PARAMS)
        self._fd.add_info_extractor(YoutubeIE())
        self._config = config

    def login(self):
        try:
            login_resp = self._br.open(
                base_url + login_url,
                urlencode({
                    'email': self._config.EMAIL,
                    'password': self._config.PASSWORD
                }))
            login_state = json.loads(login_resp.read())
            self._logged_in = login_state.get('success')
            if not self._logged_in:
                print login_state.get('value')
            return self._logged_in
        except mechanize.HTTPError, e:
            sys.exit('Can\'t sign in')
Beispiel #3
0
 def get_info(self):
     ydl = YoutubeDL({"quiet": True, "prefer_insecure": True})
     ydl.add_default_info_extractors()
     try:
         info = ydl.extract_info(self.url, download=False)
     except DownloadError:
         info = None
     return info
Beispiel #4
0
 def get_info(self):
     ydl = YoutubeDL({"quiet": True, "prefer_insecure": True})
     ydl.add_default_info_extractors()
     try:
         info = ydl.extract_info(self.url, download=False)
     except DownloadError:
         info = None
     return info
Beispiel #5
0
def download(id):
    with app.app_context():
        d = Download.find(id)
        opts = {
            'outtmpl': '/downloads/%(title)s-%(id)s.%(ext)s',
            'progress_hooks': [d.set_details]
        }
        y = YoutubeDL(params=opts)
        y.download([d.url])
Beispiel #6
0
 def __init__(self, config):
     self._br = mechanize.Browser()
     self._cj = mechanize.LWPCookieJar()
     csrftoken = makeCsrf()
     self._cj.set_cookie(csrfCookie(csrftoken))
     self._br.set_handle_robots(False)
     self._br.set_cookiejar(self._cj)
     self._br.addheaders.append(('X-CSRFToken', csrftoken))
     self._br.addheaders.append(('Referer', base_url))
     self._logged_in = False
     self._fd = YoutubeDL(config.YDL_PARAMS)
     self._fd.add_info_extractor(YoutubeIE())
     self._config = config
 def extractYoutubeDL(self, url, video):
     links = {}
     try:
         from youtube_dl.YoutubeDL import YoutubeDL, ExtractorError
     except ImportError:
         util.error("no youtube-dl installed!")
         return links
     ydl = YoutubeDL({'youtube_include_dash_manifest':False}, auto_init=False)
     ydl.add_info_extractor(ydl.get_info_extractor("Youtube"))
     try:
         res = ydl.extract_info(url, False, ie_key="Youtube")
     except ExtractorError as e:
         util.error(str(e))
         return links
     for e in res['formats']:
         links[int(e['format_id'])] = e['url']
     return links
Beispiel #8
0
def ydl_download(self, download_id):
    with app.app_context():
        d = Download.query.filter_by(id=download_id).first()
        opts = {
            'noplaylist': not d.playlist,
            'outtmpl': d.outtmpl,
            'progress_hooks': [d.progress_hook],
            'format': d.df.ydl_format,
            'sleep_interval': 60,
            'max_sleep_interval': 300,
        }
        y = YoutubeDL(params=opts)
        try:
            y.download([d.url])
        except DownloadError:
            d.status = DownloadStatus.ERROR
            d.save()
Beispiel #9
0
 def __init__(self, url: str, width: int, height: int, cols: int, rows: int, n_frames: int):
     self.set_downloader(YoutubeDL({'source_address': '0.0.0.0', 'logger': logger}))
     self.url = url
     self.width = width
     self.height = height
     self.cols = cols
     self.rows = rows
     self.n_frames = n_frames
     self.mime_type = None
     self._image = None  # type: Optional[bytes]
Beispiel #10
0
class TwitchVideoUrlResolver(object):
    _ytdl = YoutubeDL(params={"nocheckcertificate": True, "noplaylist": False})

    KEY_FORMAT_ID = u"format_id"
    KEY_URL = u"url"
    KEY_HEIGHT = u"height"
    KEY_FORMATS = u"formats"

    def __init__(self, baseurl, callbacks=[], async=True):
        self._canceled = False
        self._callbacks = callbacks
        self._baseurl = baseurl
        self._async = async
Beispiel #11
0
 def getVideoUrlDirect(self, vid):
     # blacklist webm, since most players cannot read
     yd = YoutubeIE(downloader=YoutubeDL(params={"quiet": True}))
     info = yd._real_extract('https://www.youtube.com/watch?v=%s' % vid)
     _max = -1
     url = ''
     result = {}
     for fmt in info['formats']:
         if fmt.get('height', 0) > _max and result.get('ext', '') != 'webm':
             _max = fmt.get('height', 0)
             result = fmt
             url = fmt['url']
     return url, result.get('ext', ''), info.get('duration', 0)
Beispiel #12
0
 def __init__(self, config):
     self._br = mechanize.Browser()
     self._cj = mechanize.LWPCookieJar()
     csrftoken = makeCsrf()
     self._cj.set_cookie(csrfCookie(csrftoken))
     self._br.set_handle_robots(False)
     self._br.set_cookiejar(self._cj)
     self._br.addheaders.append(('X-CSRFToken',csrftoken))
     self._br.addheaders.append(('Referer',base_url))
     self._logged_in = False
     self._fd = YoutubeDL(config.YDL_PARAMS)
     self._fd.add_info_extractor(YoutubeIE())
     self._config = config
Beispiel #13
0
 def get_media_url(self, host, media_id):
     opts = {
         'quiet': True,
         'no_color': True,
     }
     with YoutubeDL(opts) as ydl:
         try:
             # host is full URL in our case
             info = ydl.extract_info(host, download=False)
         except DownloadError as e:
             raise ResolverError
         if not 'url' in info:
             raise ResolverError
         return info['url']
Beispiel #14
0
def download(id, path):
    postprocessors = [{
        'key': 'FFmpegExtractAudio',
        'preferredcodec': 'mp3',
        'preferredquality': '0',
        'nopostoverwrites': False
    }]
    without_suffix = os.path.splitext(path)[0]
    ydl_opts = {
        'ratelimit': 1048576,
        'postprocessors': postprocessors,
        'outtmpl': without_suffix + '.%(ext)s',
        'format': 'bestaudio/best',
        'quiet': True
    }
    with YoutubeDL(ydl_opts) as ydl:
        ydl.download([YOUTUBE_WATCH + id])
Beispiel #15
0
async def youtube_extraction(client, ctx, queue, bot_info, counter):
    if not queue: return False

    music_url = queue[0]["url"]

    with YoutubeDL(YDL_OPTIONS) as ydl:
        try:
            print(" [!] Extracting music info in " + ctx.guild.name)
            info = ydl.extract_info(music_url, download=False)

        except HTTPError as e:
            if e.code == 429:  # Limit of videos exceeded, chama os donos do bot
                print(
                    " [!!] Error in \'play\' function\n      * Ydl limit exceeded"
                )
                await embedded_message(
                    ctx, "**Something broke** :cry:",
                    "Bot will probably be out for a while\n" +
                    "Contact the devs asap!")
            await call_next_song(client, ctx, queue, bot_info, counter)
            return False

        except DownloadError as e:
            print(" [!!] Error in \'play\' function\n      * {}".format(e))
            await embedded_message(
                ctx, "**Error in extraction**", "`" + str(queue[0]["title"]) +
                "`\n" + "_was removed from the queue_\n" +
                "_for being age restricted_\n")
            queue.remove(0)
            await call_next_song(client, ctx, queue, bot_info, counter)
            return False

        except:
            queue.remove(0)
            print(" [!!] Error in \'play\' function\n      * Unknown error")
            await call_next_song(client, ctx, queue, bot_info, counter)
            await embedded_message(ctx, "**Error in extraction**  :cry:",
                                   "Sorry, I am unable to play this song")
            return False

    return info
 def extractYoutubeDL(self, url, video):
     links = {}
     try:
         from youtube_dl.YoutubeDL import YoutubeDL, ExtractorError
     except ImportError:
         util.error("no youtube-dl installed!")
         return links
     ydl = YoutubeDL({'youtube_include_dash_manifest': False},
                     auto_init=False)
     ydl.add_info_extractor(ydl.get_info_extractor("Youtube"))
     try:
         res = ydl.extract_info(url, False, ie_key="Youtube")
     except ExtractorError as e:
         util.error(str(e))
         return links
     for e in res['formats']:
         links[int(e['format_id'])] = e['url']
     return links
Beispiel #17
0
from youtube_dl.YoutubeDL import YoutubeDL

downloader = YoutubeDL(
    dict(
        #quiet=True,
        noplaylist=True,
        skip_download=True,
        format_limit=18,  # 360p
        outtmpl='%(title)s'))
downloader.add_default_info_extractors()


def get_video_meta(page_url):
    meta = downloader.extract_info(page_url, download=False)
    try:
        return [e for e in meta['entries'] if e.get('url')][0]
    except KeyError:
        return meta


def get_video_url(page_url):
    return get_video_meta(page_url)['url']
Beispiel #18
0
    def run(self):
        estado: dict = self.state.get_state()
        canciones = estado.get(self.state.CANCIONES)
        formato = estado.get(self.state.TIPO_FORMATO)
        localizacion = estado.get(self.state.DIRECTORIO_GUARDAR)
        opciones: dict = {}
        for i, cancion in enumerate(canciones):
            self.padre.label_subtitulo.setText(
                f"Descargando url <{cancion}> - {i+1} | {len(canciones)}")
            try:
                if formato == 'mp3':
                    if name == 'nt':
                        if not Path(
                                abspath(
                                    join(self.DIRECTORIO_PRINCIPAL,
                                         'ffmpeg-4.0.2-win64-static'))).exists(
                                         ):
                            with ZipFile(
                                    abspath(
                                        join(self.DIRECTORIO_PRINCIPAL,
                                             'ffmpeg-4.0.2-win64-static.zip'))
                            ) as myzip:
                                myzip.extractall(
                                    path=self.DIRECTORIO_PRINCIPAL)
                        opciones = {
                            'format':
                            'bestaudio/best',
                            'progress_hooks': [self.proceso],
                            'logger':
                            Log(),
                            'outtmpl':
                            join(localizacion, "%(title)s-%(id)s.%(ext)s"),
                            'ffmpeg_location':
                            abspath(
                                join(self.DIRECTORIO_PRINCIPAL,
                                     "ffmpeg-4.0.2-win64-static", "bin",
                                     "ffmpeg.exe")),
                            'postprocessors': [{
                                'key': 'FFmpegExtractAudio',
                                'preferredcodec': 'mp3',
                                'preferredquality': '800',
                            }]
                        }
                    else:
                        opciones = {
                            'format':
                            'bestaudio/best',
                            'outtmpl':
                            join(localizacion, "%(title)s-%(id)s.%(ext)s"),
                            'progress_hooks': [self.proceso],
                            'logger':
                            Log(),
                            'postprocessors': [{
                                'key': 'FFmpegExtractAudio',
                                'preferredcodec': 'mp3',
                                'preferredquality': '800',
                            }]
                        }
                elif formato == 'mp4a':
                    opciones = {
                        'outtmpl': join(localizacion,
                                        "%(title)s-%(id)s.%(ext)s"),
                        'format': 'm4a',
                        'logger': Log(),
                        'progress_hooks': [self.proceso],
                    }
                elif formato == 'mp4':
                    opciones = {
                        'outtmpl': join(localizacion,
                                        "%(title)s-%(id)s.%(ext)s"),
                        'logger': Log(),
                        'progress_hooks': [self.proceso],
                    }

                with YoutubeDL(opciones) as ydl:
                    ydl.download([cancion])
            except (youtube_dl.utils.DownloadError,
                    youtube_dl.utils.ExtractorError) as error:
                self.is_error.emit(True, str(error), len(canciones))

        self.is_error.emit(False, None, len(canciones))
Beispiel #19
0
from youtube_dl.YoutubeDL import YoutubeDL

downloader = YoutubeDL(dict(
    #quiet=True,
    noplaylist=True,
    skip_download=True,
    format_limit=18, # 360p
    outtmpl='%(title)s'
))
downloader.add_default_info_extractors()

def get_video_meta(page_url):
    meta = downloader.extract_info(page_url, download=False)
    try:
        return [e for e in meta['entries'] if e.get('url')][0]
    except KeyError:
        return meta

def get_video_url(page_url):
    return get_video_meta(page_url)['url']
Beispiel #20
0
def _real_main(myurl,argv=None):
    # Compatibility fixes for Windows
    if sys.platform == 'win32':
        # https://github.com/rg3/youtube-dl/issues/820
        codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)

    workaround_optparse_bug9161()

    setproctitle('youtube-dl')
    parser, opts, args = parseOpts(argv)

    # Set user agent
    if opts.user_agent is not None:
        std_headers['User-Agent'] = opts.user_agent

    # Set referer
    if opts.referer is not None:
        std_headers['Referer'] = opts.referer

    # Custom HTTP headers
    if opts.headers is not None:
        for h in opts.headers:
            if ':' not in h:
                parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
            key, value = h.split(':', 1)
            if opts.verbose:
                write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
            std_headers[key] = value

    # Dump user agent
    if opts.dump_user_agent:
        write_string(std_headers['User-Agent'] + '\n', out=sys.stdout)
        sys.exit(0)

    # Batch file verification
    batch_urls = []
    if opts.batchfile is not None:
        try:
            if opts.batchfile == '-':
                batchfd = sys.stdin
            else:
                batchfd = io.open(
                    expand_path(opts.batchfile),
                    'r', encoding='utf-8', errors='ignore')
            batch_urls = read_batch_urls(batchfd)
            if opts.verbose:
                write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
        except IOError:
            sys.exit('ERROR: batch file could not be read')
    all_urls = batch_urls + [url.strip() for url in args]  # batch_urls are already striped in read_batch_urls
    _enc = preferredencoding()
    all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]

    if opts.list_extractors:
        for ie in list_extractors(opts.age_limit):
            write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout)
            matchedUrls = [url for url in all_urls if ie.suitable(url)]
            for mu in matchedUrls:
                write_string('  ' + mu + '\n', out=sys.stdout)
        sys.exit(0)
    if opts.list_extractor_descriptions:
        for ie in list_extractors(opts.age_limit):
            if not ie._WORKING:
                continue
            desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
            if desc is False:
                continue
            if hasattr(ie, 'SEARCH_KEY'):
                _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
                _COUNTS = ('', '5', '10', 'all')
                desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
            write_string(desc + '\n', out=sys.stdout)
        sys.exit(0)
    if opts.ap_list_mso:
        table = [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]
        write_string('Supported TV Providers:\n' + render_table(['mso', 'mso name'], table) + '\n', out=sys.stdout)
        sys.exit(0)

    if opts.usenetrc and (opts.username is not None or opts.password is not None):
        parser.error('using .netrc conflicts with giving username/password')
    if opts.password is not None and opts.username is None:
        parser.error('account username missing\n')
    if opts.ap_password is not None and opts.ap_username is None:
        parser.error('TV Provider account username missing\n')
    if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
        parser.error('using output template conflicts with using title, video ID or auto number')
    if opts.autonumber_size is not None:
        if opts.autonumber_size <= 0:
            parser.error('auto number size must be positive')
    if opts.autonumber_start is not None:
        if opts.autonumber_start < 0:
            parser.error('auto number start must be positive or 0')
    if opts.usetitle and opts.useid:
        parser.error('using title conflicts with using video ID')
    if opts.username is not None and opts.password is None:
        opts.password = compat_getpass('Type account password and press [Return]: ')
    if opts.ap_username is not None and opts.ap_password is None:
        opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ')
    if opts.ratelimit is not None:
        numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
        if numeric_limit is None:
            parser.error('invalid rate limit specified')
        opts.ratelimit = numeric_limit
    if opts.min_filesize is not None:
        numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
        if numeric_limit is None:
            parser.error('invalid min_filesize specified')
        opts.min_filesize = numeric_limit
    if opts.max_filesize is not None:
        numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
        if numeric_limit is None:
            parser.error('invalid max_filesize specified')
        opts.max_filesize = numeric_limit
    if opts.sleep_interval is not None:
        if opts.sleep_interval < 0:
            parser.error('sleep interval must be positive or 0')
    if opts.max_sleep_interval is not None:
        if opts.max_sleep_interval < 0:
            parser.error('max sleep interval must be positive or 0')
        if opts.max_sleep_interval < opts.sleep_interval:
            parser.error('max sleep interval must be greater than or equal to min sleep interval')
    else:
        opts.max_sleep_interval = opts.sleep_interval
    if opts.ap_mso and opts.ap_mso not in MSO_INFO:
        parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers')

    def parse_retries(retries):
        if retries in ('inf', 'infinite'):
            parsed_retries = float('inf')
        else:
            try:
                parsed_retries = int(retries)
            except (TypeError, ValueError):
                parser.error('invalid retry count specified')
        return parsed_retries
    if opts.retries is not None:
        opts.retries = parse_retries(opts.retries)
    if opts.fragment_retries is not None:
        opts.fragment_retries = parse_retries(opts.fragment_retries)
    if opts.buffersize is not None:
        numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
        if numeric_buffersize is None:
            parser.error('invalid buffer size specified')
        opts.buffersize = numeric_buffersize
    if opts.http_chunk_size is not None:
        numeric_chunksize = FileDownloader.parse_bytes(opts.http_chunk_size)
        if not numeric_chunksize:
            parser.error('invalid http chunk size specified')
        opts.http_chunk_size = numeric_chunksize
    if opts.playliststart <= 0:
        raise ValueError('Playlist start must be positive')
    if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
        raise ValueError('Playlist end must be greater than playlist start')
    if opts.extractaudio:
        if opts.audioformat not in ['best', 'aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
            parser.error('invalid audio format specified')
    if opts.audioquality:
        opts.audioquality = opts.audioquality.strip('k').strip('K')
        if not opts.audioquality.isdigit():
            parser.error('invalid audio quality specified')
    if opts.recodevideo is not None:
        if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
            parser.error('invalid video recode format specified')
    if opts.convertsubtitles is not None:
        if opts.convertsubtitles not in ['srt', 'vtt', 'ass', 'lrc']:
            parser.error('invalid subtitle format specified')

    if opts.date is not None:
        date = DateRange.day(opts.date)
    else:
        date = DateRange(opts.dateafter, opts.datebefore)

    # Do not download videos when there are audio-only formats
    if opts.extractaudio and not opts.keepvideo and opts.format is None:
        opts.format = 'bestaudio/best'

    # --all-sub automatically sets --write-sub if --write-auto-sub is not given
    # this was the old behaviour if only --all-sub was given.
    if opts.allsubtitles and not opts.writeautomaticsub:
        opts.writesubtitles = True

    outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
               (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
               (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
               (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
               (opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
               (opts.useid and '%(id)s.%(ext)s') or
               (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
               DEFAULT_OUTTMPL)
    if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
        parser.error('Cannot download a video and extract audio into the same'
                     ' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
                     ' template'.format(outtmpl))

    any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
    any_printing = opts.print_json
    download_archive_fn = expand_path(opts.download_archive) if opts.download_archive is not None else opts.download_archive

    # PostProcessors
    postprocessors = []
    if opts.metafromtitle:
        postprocessors.append({
            'key': 'MetadataFromTitle',
            'titleformat': opts.metafromtitle
        })
    if opts.extractaudio:
        postprocessors.append({
            'key': 'FFmpegExtractAudio',
            'preferredcodec': opts.audioformat,
            'preferredquality': opts.audioquality,
            'nopostoverwrites': opts.nopostoverwrites,
        })
    if opts.recodevideo:
        postprocessors.append({
            'key': 'FFmpegVideoConvertor',
            'preferedformat': opts.recodevideo,
        })

    if opts.addmetadata:
        postprocessors.append({'key': 'FFmpegMetadata'})
    if opts.convertsubtitles:
        postprocessors.append({
            'key': 'FFmpegSubtitlesConvertor',
            'format': opts.convertsubtitles,
        })
    if opts.embedsubtitles:
        postprocessors.append({
            'key': 'FFmpegEmbedSubtitle',
        })
    if opts.embedthumbnail:
        already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
        postprocessors.append({
            'key': 'EmbedThumbnail',
            'already_have_thumbnail': already_have_thumbnail
        })
        if not already_have_thumbnail:
            opts.writethumbnail = True
    if opts.xattrs:
        postprocessors.append({'key': 'XAttrMetadata'})
    if opts.exec_cmd:
        postprocessors.append({
            'key': 'ExecAfterDownload',
            'exec_cmd': opts.exec_cmd,
        })
    external_downloader_args = None
    if opts.external_downloader_args:
        external_downloader_args = compat_shlex_split(opts.external_downloader_args)
    postprocessor_args = None
    if opts.postprocessor_args:
        postprocessor_args = compat_shlex_split(opts.postprocessor_args)
    match_filter = (
        None if opts.match_filter is None
        else match_filter_func(opts.match_filter))
    ydl_opts = {
        'usenetrc': opts.usenetrc,
        'username': opts.username,
        'password': opts.password,
        'twofactor': opts.twofactor,
        'videopassword': opts.videopassword,
        'ap_mso': opts.ap_mso,
        'ap_username': opts.ap_username,
        'ap_password': opts.ap_password,
        'quiet': (opts.quiet or any_getting or any_printing),
        'no_warnings': opts.no_warnings,
        'forceurl': opts.geturl,
        'forcetitle': opts.gettitle,
        'forceid': opts.getid,
        'forcethumbnail': opts.getthumbnail,
        'forcedescription': opts.getdescription,
        'forceduration': opts.getduration,
        'forcefilename': opts.getfilename,
        'forceformat': opts.getformat,
        'forcejson': opts.dumpjson or opts.print_json,
        'dump_single_json': opts.dump_single_json,
        'simulate': opts.simulate or any_getting,
        'skip_download': opts.skip_download,
        'format': opts.format,
        'listformats': opts.listformats,
        'outtmpl': outtmpl,
        'autonumber_size': opts.autonumber_size,
        'autonumber_start': opts.autonumber_start,
        'restrictfilenames': opts.restrictfilenames,
        'ignoreerrors': opts.ignoreerrors,
        'force_generic_extractor': opts.force_generic_extractor,
        'ratelimit': opts.ratelimit,
        'nooverwrites': opts.nooverwrites,
        'retries': opts.retries,
        'fragment_retries': opts.fragment_retries,
        'skip_unavailable_fragments': opts.skip_unavailable_fragments,
        'keep_fragments': opts.keep_fragments,
        'buffersize': opts.buffersize,
        'noresizebuffer': opts.noresizebuffer,
        'http_chunk_size': opts.http_chunk_size,
        'continuedl': opts.continue_dl,
        'noprogress': opts.noprogress,
        'progress_with_newline': opts.progress_with_newline,
        'playliststart': opts.playliststart,
        'playlistend': opts.playlistend,
        'playlistreverse': opts.playlist_reverse,
        'playlistrandom': opts.playlist_random,
        'noplaylist': opts.noplaylist,
        'logtostderr': opts.outtmpl == '-',
        'consoletitle': opts.consoletitle,
        'nopart': opts.nopart,
        'updatetime': opts.updatetime,
        'writedescription': opts.writedescription,
        'writeannotations': opts.writeannotations,
        'writeinfojson': opts.writeinfojson,
        'writethumbnail': opts.writethumbnail,
        'write_all_thumbnails': opts.write_all_thumbnails,
        'writesubtitles': opts.writesubtitles,
        'writeautomaticsub': opts.writeautomaticsub,
        'allsubtitles': opts.allsubtitles,
        'listsubtitles': opts.listsubtitles,
        'subtitlesformat': opts.subtitlesformat,
        'subtitleslangs': opts.subtitleslangs,
        'matchtitle': decodeOption(opts.matchtitle),
        'rejecttitle': decodeOption(opts.rejecttitle),
        'max_downloads': opts.max_downloads,
        'prefer_free_formats': opts.prefer_free_formats,
        'verbose': opts.verbose,
        'dump_intermediate_pages': opts.dump_intermediate_pages,
        'write_pages': opts.write_pages,
        'test': opts.test,
        'keepvideo': opts.keepvideo,
        'min_filesize': opts.min_filesize,
        'max_filesize': opts.max_filesize,
        'min_views': opts.min_views,
        'max_views': opts.max_views,
        'daterange': date,
        'cachedir': opts.cachedir,
        'youtube_print_sig_code': opts.youtube_print_sig_code,
        'age_limit': opts.age_limit,
        'download_archive': download_archive_fn,
        'cookiefile': opts.cookiefile,
        'nocheckcertificate': opts.no_check_certificate,
        'prefer_insecure': opts.prefer_insecure,
        'proxy': opts.proxy,
        'socket_timeout': opts.socket_timeout,
        'bidi_workaround': opts.bidi_workaround,
        'debug_printtraffic': opts.debug_printtraffic,
        'prefer_ffmpeg': opts.prefer_ffmpeg,
        'include_ads': opts.include_ads,
        'default_search': opts.default_search,
        'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
        'encoding': opts.encoding,
        'extract_flat': opts.extract_flat,
        'mark_watched': opts.mark_watched,
        'merge_output_format': opts.merge_output_format,
        'postprocessors': postprocessors,
        'fixup': opts.fixup,
        'source_address': opts.source_address,
        'call_home': opts.call_home,
        'sleep_interval': opts.sleep_interval,
        'max_sleep_interval': opts.max_sleep_interval,
        'list_thumbnails': opts.list_thumbnails,
        'playlist_items': opts.playlist_items,
        'xattr_set_filesize': opts.xattr_set_filesize,
        'match_filter': match_filter,
        'no_color': opts.no_color,
        'ffmpeg_location': opts.ffmpeg_location,
        'hls_prefer_native': opts.hls_prefer_native,
        'hls_use_mpegts': opts.hls_use_mpegts,
        'external_downloader_args': external_downloader_args,
        'postprocessor_args': postprocessor_args,
        'cn_verification_proxy': opts.cn_verification_proxy,
        'geo_verification_proxy': opts.geo_verification_proxy,
        'config_location': opts.config_location,
        'geo_bypass': opts.geo_bypass,
        'geo_bypass_country': opts.geo_bypass_country,
        'geo_bypass_ip_block': opts.geo_bypass_ip_block,
        'autonumber': opts.autonumber if opts.autonumber is True else None,
        'usetitle': opts.usetitle if opts.usetitle is True else None,
    }
    url = " "
    with YoutubeDL(ydl_opts) as ydl:
        url = ydl.extract_info(myurl)
    return url
Beispiel #21
0
def get_watch_page():
    video_id = request.args['v']
    if len(video_id) < 11:
        flask.abort(404)
        flask.abort(
            flask.Response('Incomplete video id (too short): ' + video_id))

    lc = request.args.get('lc', '')
    if settings.route_tor:
        proxy = 'socks5://127.0.0.1:9150/'
    else:
        proxy = ''
    yt_dl_downloader = YoutubeDL(params={
        'youtube_include_dash_manifest': False,
        'proxy': proxy
    })
    tasks = (gevent.spawn(comments.video_comments,
                          video_id,
                          int(settings.default_comment_sorting),
                          lc=lc),
             gevent.spawn(extract_info,
                          yt_dl_downloader,
                          "https://www.youtube.com/watch?v=" + video_id,
                          download=False))
    gevent.joinall(tasks)
    comments_info, info = tasks[0].value, tasks[1].value

    if isinstance(info, str):  # youtube error
        return flask.render_template('error.html', error_message=info)

    video_info = {
        "duration": util.seconds_to_timestamp(info["duration"]),
        "id": info['id'],
        "title": info['title'],
        "author": info['uploader'],
    }

    upload_year = info["upload_date"][0:4]
    upload_month = info["upload_date"][4:6]
    upload_day = info["upload_date"][6:8]
    upload_date = upload_month + "/" + upload_day + "/" + upload_year

    if settings.related_videos_mode:
        related_videos = get_related_items(info)
    else:
        related_videos = []

    if settings.gather_googlevideo_domains:
        with open(os.path.join(settings.data_dir, 'googlevideo-domains.txt'),
                  'a+',
                  encoding='utf-8') as f:
            url = info['formats'][0]['url']
            subdomain = url[0:url.find(".googlevideo.com")]
            f.write(subdomain + "\n")

    download_formats = []

    for format in info['formats']:
        download_formats.append({
            'url':
            format['url'],
            'ext':
            format['ext'],
            'resolution':
            yt_dl_downloader.format_resolution(format),
            'note':
            yt_dl_downloader._format_note(format),
        })

    return flask.render_template(
        'watch.html',
        header_playlist_names=local_playlist.get_playlist_names(),
        uploader_channel_url='/' + info['uploader_url'],
        upload_date=upload_date,
        views=(lambda x: '{:,}'.format(x)
               if x is not None else "")(info.get("view_count", None)),
        likes=(lambda x: '{:,}'.format(x)
               if x is not None else "")(info.get("like_count", None)),
        dislikes=(lambda x: '{:,}'.format(x)
                  if x is not None else "")(info.get("dislike_count", None)),
        download_formats=download_formats,
        video_info=json.dumps(video_info),
        video_sources=get_video_sources(info),
        subtitle_sources=get_subtitle_sources(info),
        related=related_videos,
        music_list=info['music_list'],
        music_attributes=get_ordered_music_list_attributes(info['music_list']),
        comments_info=comments_info,
        title=info['title'],
        uploader=info['uploader'],
        description=info['description'],
        unlisted=info['unlisted'],
    )
def main():
    ns = parse_args()

    ydl = YoutubeDL({
        'quiet': True,
        'outtmpl': '%(title).%(ext)s',
        'simulate': ns.simulate,
    })

    ydl.add_info_extractor(YoutubeSearchIE())
    ydl.add_info_extractor(YoutubeIE())

    ydl.add_post_processor(FFmpegExtractAudioPP())

    artist = Artist(ns.artista)

    if not artist.found:
        print('ERROR: %s no existe' % artist.name)
        return 1

    print('Obteniendo información de %s...' % artist.name)
    artist.parse()

    if not ns.simulate and not os.path.exists(artist.name):
        os.mkdir(artist.name)

    if ns.disco is None:
        albums = artist.albums
    else:
        album = artist.get_album(ns.disco)

        if album is None or not album.found:
            print('ERROR: %s no tiene un disco %s' % (artist.name, ns.disco))
            return 1

        albums = [album]

    for album in albums:
        if not album.found:
            print('Ignorando %s' % album.name)
            continue

        fpath = os.path.join(artist.name, '%s - %s' % (album.year, album.name))

        if not ns.simulate and not os.path.exists(fpath):
            os.mkdir(fpath)

        if ns.disco is None:
            print('%s:' % album.name)
        else:
            print('Obteniendo lista de temas...')

        album.parse()

        for song in album.songs:
            fname = FILE_FORMAT % (album.songs.index(song) + 1, song)

            print(' %s' % fname)

            ydl.params['outtmpl'] = os.path.join(fpath, fname + '.%(ext)s')
            ydl.download(['ytsearch:%s %s' % (artist.name, song)])

    return 0
Beispiel #23
0
def get_watch_page(env, start_response):
    video_id = env['parameters']['v'][0]
    if len(video_id) < 11:
        start_response('404 Not Found', [
            ('Content-type', 'text/plain'),
        ])
        return b'Incomplete video id (too short): ' + video_id.encode('ascii')

    start_response('200 OK', [
        ('Content-type', 'text/html'),
    ])

    lc = util.default_multi_get(env['parameters'], 'lc', 0, default='')
    if settings.route_tor:
        proxy = 'socks5://127.0.0.1:9150/'
    else:
        proxy = ''
    downloader = YoutubeDL(params={
        'youtube_include_dash_manifest': False,
        'proxy': proxy
    })
    tasks = (gevent.spawn(comments.video_comments,
                          video_id,
                          int(settings.default_comment_sorting),
                          lc=lc),
             gevent.spawn(extract_info,
                          downloader,
                          "https://www.youtube.com/watch?v=" + video_id,
                          download=False))
    gevent.joinall(tasks)
    comments_html, info = tasks[0].value, tasks[1].value

    #comments_html = comments.comments_html(video_id(url))
    #info = YoutubeDL().extract_info(url, download=False)

    #chosen_format = choose_format(info)

    if isinstance(info, str):  # youtube error
        return html_common.yt_basic_template.substitute(
            page_title="Error",
            style="",
            header=html_common.get_header(),
            page=html.escape(info),
        ).encode('utf-8')

    sorted_formats = sort_formats(info)

    video_info = {
        "duration": util.seconds_to_timestamp(info["duration"]),
        "id": info['id'],
        "title": info['title'],
        "author": info['uploader'],
    }

    upload_year = info["upload_date"][0:4]
    upload_month = info["upload_date"][4:6]
    upload_day = info["upload_date"][6:8]
    upload_date = upload_month + "/" + upload_day + "/" + upload_year

    if settings.enable_related_videos:
        related_videos_html = get_related_items_html(info)
    else:
        related_videos_html = ''

    music_list = info['music_list']
    if len(music_list) == 0:
        music_list_html = ''
    else:
        # get the set of attributes which are used by atleast 1 track
        # so there isn't an empty, extraneous album column which no tracks use, for example
        used_attributes = set()
        for track in music_list:
            used_attributes = used_attributes | track.keys()

        # now put them in the right order
        ordered_attributes = []
        for attribute in ('Artist', 'Title', 'Album'):
            if attribute.lower() in used_attributes:
                ordered_attributes.append(attribute)

        music_list_html = '''<hr>
<table>
    <caption>Music</caption>
    <tr>
'''
        # table headings
        for attribute in ordered_attributes:
            music_list_html += "<th>" + attribute + "</th>\n"
        music_list_html += '''</tr>\n'''

        for track in music_list:
            music_list_html += '''<tr>\n'''
            for attribute in ordered_attributes:
                try:
                    value = track[attribute.lower()]
                except KeyError:
                    music_list_html += '''<td></td>'''
                else:
                    music_list_html += '''<td>''' + html.escape(
                        value) + '''</td>'''
            music_list_html += '''</tr>\n'''
        music_list_html += '''</table>\n'''
    if settings.gather_googlevideo_domains:
        with open(os.path.join(settings.data_dir, 'googlevideo-domains.txt'),
                  'a+',
                  encoding='utf-8') as f:
            url = info['formats'][0]['url']
            subdomain = url[0:url.find(".googlevideo.com")]
            f.write(subdomain + "\n")

    download_options = ''
    for format in info['formats']:
        download_options += download_link_template.substitute(
            url=html.escape(format['url']),
            ext=html.escape(format['ext']),
            resolution=html.escape(downloader.format_resolution(format)),
            note=html.escape(downloader._format_note(format)),
        )

    page = yt_watch_template.substitute(
        video_title=html.escape(info["title"]),
        page_title=html.escape(info["title"]),
        header=html_common.get_header(),
        uploader=html.escape(info["uploader"]),
        uploader_channel_url='/' + info["uploader_url"],
        upload_date=upload_date,
        views=(lambda x: '{:,}'.format(x)
               if x is not None else "")(info.get("view_count", None)),
        likes=(lambda x: '{:,}'.format(x)
               if x is not None else "")(info.get("like_count", None)),
        dislikes=(lambda x: '{:,}'.format(x)
                  if x is not None else "")(info.get("dislike_count", None)),
        download_options=download_options,
        video_info=html.escape(json.dumps(video_info)),
        description=html.escape(info["description"]),
        video_sources=formats_html(sorted_formats) + subtitles_html(info),
        related=related_videos_html,
        comments=comments_html,
        music_list=music_list_html,
        is_unlisted='<span class="is-unlisted">Unlisted</span>'
        if info['unlisted'] else '',
    )
    return page.encode('utf-8')
Beispiel #24
0
 def __init__(self, video_url: str):
     self.set_downloader(YoutubeDL({'source_address': '0.0.0.0', 'logger': logger}))
     self._input_url = video_url
     self._validate()
     self._thumbframes = self.download_thumbframe_info()