def decode(page_url): print ''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98 Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators. ---------- Booting up... ''' if page_url == '': page_url = raw_input('Please enter Crunchyroll video URL:\n') lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config( ) #player_revision = altfuncs.playerrev(page_url) html = altfuncs.gethtml(page_url) #h = HTMLParser.HTMLParser() title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '') if len(os.path.join('export', title + '.ass')) > 255: title = re.findall('^(.+?) \- ', title)[0] ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ### rep = { ' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G': 'a G', '*': '#', u'\u2026': '...' } rep = dict((re.escape(k), v) for k, v in rep.iteritems()) pattern = re.compile("|".join(rep.keys())) title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title)) ### End stolen code ### media_id = page_url[-6:] xmlconfig = BeautifulSoup( altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml') try: if '4' in xmlconfig.find_all('code')[0]: print xmlconfig.find_all('msg')[0].text sys.exit() except IndexError: pass xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id) xmllist = unidecode(xmllist).replace('><', '>\n<') if '<media_id>None</media_id>' in xmllist: print 'The video has hardcoded subtitles.' hardcoded = True sub_id = False else: try: sub_id2 = re.findall("id=([0-9]+)", xmllist) sub_id3 = re.findall("title='(\[.+\]) ", xmllist) sub_id4 = re.findall("title='(\[.+\]) ", xmllist) hardcoded = False except IndexError: print "The video's subtitles cannot be found, or are region-locked." hardcoded = True sub_id = False sub_id3 = [word.replace('[English (US)]', 'eng') for word in sub_id3] sub_id3 = [word.replace('[Deutsch]', 'deu') for word in sub_id3] sub_id3 = [word.replace('[Portugues (Brasil)]', 'por') for word in sub_id3] sub_id3 = [word.replace('[Francais (France)]', 'fre') for word in sub_id3] sub_id3 = [word.replace('[Espanol (Espana)]', 'spa') for word in sub_id3] sub_id3 = [word.replace('[Espanol]', 'spa') for word in sub_id3] sub_id3 = [word.replace('[Italiano]', 'ita') for word in sub_id3] sub_id3 = [word.replace('[l`rby@]', 'ara') for word in sub_id3] #sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4] sub_id4 = [word.replace('[l`rby@]', u'[Arabic]') for word in sub_id4] #else: # try: # sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0] # hardcoded = False # lang = lang1 # except IndexError: # try: # sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0] # print 'Language not found, reverting to ' + lang2 + '.' # hardcoded = False # lang = lang2 # except IndexError: # try: # sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0] # default back to English # print 'Backup language not found, reverting to English.' # hardcoded = False # lang = 'English' # except IndexError: # print "The video's subtitles cannot be found, or are region-locked." # hardcoded = True # sub_id = False if not hardcoded: for i in sub_id2: #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id) xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i) formattedsubs = CrunchyDec().returnsubs(xmlsub) if formattedsubs is None: continue #subfile = open(eptitle + '.ass', 'wb') subfile = open( os.path.join( 'export', title + '[' + sub_id3.pop(0) + ']' + sub_id4.pop(0) + '.ass'), 'wb') subfile.write(formattedsubs.encode('utf-8-sig')) subfile.close() #shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', '')) print 'Subtitles for ' + title + ' have been downloaded'
def ultimate(page_url, seasonnum, epnum): global url1, url2, filen, player_revision, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2 print ''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98 Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators. ---------- Booting up... ''' if page_url == '': page_url = raw_input('Please enter Crunchyroll video URL:\n') try: int(page_url) page_url = 'http://www.crunchyroll.com/media-' + page_url except ValueError: if not page_url.startswith('http://') and not page_url.startswith('https://'): page_url = 'http://' + page_url try: int(page_url[-6:]) except ValueError: if bool(seasonnum) and bool(epnum): page_url = altfuncs.vidurl(page_url, seasonnum, epnum) elif bool(epnum): page_url = altfuncs.vidurl(page_url, 1, epnum) else: page_url = altfuncs.vidurl(page_url, False, False) subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True) # ---------- #lang1, lang2 = altfuncs.config() #lang1, lang2, forcesub = altfuncs.config() lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config() player_revision = altfuncs.playerrev(page_url) html = altfuncs.gethtml(page_url) h = HTMLParser.HTMLParser() title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '') if len(os.getcwd()+'\\export\\'+title+'.flv') > 255: title = re.findall('^(.+?) \- ', title)[0] # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-'). # replace('?', '.').replace('"', "''").replace('|', '-').replace('"',"''").strip() ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ### rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'} rep = dict((re.escape(k), v) for k, v in rep.iteritems()) pattern = re.compile("|".join(rep.keys())) title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title)) ### End stolen code ### subprocess.call('title ' + title.replace('&', '^&'), shell=True) # ---------- media_id = page_url[-6:] xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml') try: if '4' in xmlconfig.find_all('code')[0]: print xmlconfig.find_all('msg')[0].text sys.exit() except IndexError: pass vid_id = xmlconfig.find('media_id').string # ---------- try: host = xmlconfig.find('host').string except AttributeError: print 'Downloading 2 minute preview.' media_id = xmlconfig.find('media_id').string xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml') host = xmlconfig.find('host').string if re.search('fplive\.net', host): url1 = re.findall('.+/c[0-9]+', host).pop() url2 = re.findall('c[0-9]+\?.+', host).pop() else: url1 = re.findall('.+/ondemand/', host).pop() url2 = re.findall('ondemand/.+', host).pop() filen = xmlconfig.find('file').string # ---------- if 'subs' in sys.argv: subtitles(title) subs_only = True hardcoded = True # bleh else: page_url2 = page_url video() #heightp = subprocess.Popen('"video-engine\MediaInfo.exe" --inform=Video;%Height% ".\export\\' + title + '.flv"' ,shell=True , stdout=subprocess.PIPE).stdout.read() heightp = {'71' : 'android', '60' : '360p', '61' : '480p', '62' : '720p', '80' : '1080p', '0' : 'highest'}[xmlconfig.find('video_encode_quality').string] subtitles(title) subtitlefilecode='' #shutil.move(title + '.flv', os.path.join(os.getcwd(), 'export', '')) print 'Starting mkv merge' if hardcoded: subprocess.call('"video-engine\mkvmerge.exe" -o ".\export\\' + title + '[' + heightp.strip() +'p].mkv" --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' +' --title "' + title +'"') else: sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por', u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita', u'العربية': 'ara', u'Deutsch': 'deu'}[lang] # defaulttrack = False #print lang.encode('utf-8') #print sub_id5 #print sub_id6 for i in sub_id2: defaultsub='' sublangc=sub_id5.pop(0) sublangn=sub_id6.pop(0) # print forcesub if not forcesub: if sublangc == sublang: defaultsub=' --default-track 0:yes --forced-track 0:no' else: defaultsub=' --default-track 0:no --forced-track 0:no' else: if sublangc == sublang: defaultsub=' --default-track 0:yes --forced-track 0:yes' else: defaultsub=' --default-track 0:no --forced-track 0:no' if not onlymainsub: subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 ".\export\\'+title+'['+sublangc+']'+sublangn+'.ass"' else: if sublangc == sublang: subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 ".\export\\'+title+'['+sublangc+']'+sublangn+'.ass"' # subprocess.call('"video-engine\mkvmerge.exe" -o ".\export\\' + title + '.mkv" --language 1:jpn -a 1 -d 0 ' + # '".\export\\' + title + '.flv" --language 0:' + sublang + ' -s 0 ".\export\\'+title+'.ass"') # print '"video-engine\mkvmerge.exe" -o ".\export\\' + title + '.mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"' mkvcmd='"video-engine\mkvmerge.exe" -o ".\export\\' + title + '[' + heightp.strip() +'].mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"' # print mkvcmd #print subtitlefilecode subprocess.call(mkvcmd) print 'Merge process complete' subs_only = False print print '----------' print print 'Starting Final Cleanup' if not subs_only: os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.flv') if not hardcoded or not subs_only: #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass') for root, dirs, files in os.walk('export'): for file in filter(lambda x: re.match(title +'\[.+\]'+ '.ass', x), files): os.remove(os.path.join(root, file)) print 'Cleanup Complete'
def ultimate(page_url, seasonnum, epnum): global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2 #global player_revision print ''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98 Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators. ---------- Booting up... ''' if page_url == '': page_url = raw_input('Please enter Crunchyroll video URL:\n') try: int(page_url) page_url = 'http://www.crunchyroll.com/media-' + page_url except ValueError: if not page_url.startswith('http://') and not page_url.startswith( 'https://'): page_url = 'http://' + page_url try: int(page_url[-6:]) except ValueError: if bool(seasonnum) and bool(epnum): page_url = altfuncs.vidurl(page_url, seasonnum, epnum) elif bool(epnum): page_url = altfuncs.vidurl(page_url, 1, epnum) else: page_url = altfuncs.vidurl(page_url, False, False) #subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True) # ---------- #lang1, lang2 = altfuncs.config() #lang1, lang2, forcesub = altfuncs.config() lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config( ) #player_revision = altfuncs.playerrev(page_url) html = altfuncs.gethtml(page_url) #h = HTMLParser.HTMLParser() title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '') if len(os.path.join('export', title + '.flv')) > 255: title = re.findall('^(.+?) \- ', title)[0] # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-'). # replace('?', '.').replace('"', "''").replace('|', '-').replace('"',"''").strip() ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ### rep = { ' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G': 'a G', '*': '#', u'\u2026': '...' } rep = dict((re.escape(k), v) for k, v in rep.iteritems()) pattern = re.compile("|".join(rep.keys())) title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title)) ### End stolen code ### #subprocess.call('title ' + title.replace('&', '^&'), shell=True) # ---------- media_id = page_url[-6:] xmlconfig = BeautifulSoup( altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml') try: if '4' in xmlconfig.find_all('code')[0]: print xmlconfig.find_all('msg')[0].text sys.exit() except IndexError: pass vid_id = xmlconfig.find('media_id').string # ---------- host = xmlconfig.find('host') if host: host = host.string filen = xmlconfig.find('file') if filen: filen = filen.string if not host and not filen: print 'Downloading 2 minute preview.' media_id = xmlconfig.find('media_id').string xmlconfig = BeautifulSoup( altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml') host = xmlconfig.find('host').string # ---------- if 'subs' in sys.argv: subtitles(title) subs_only = True hardcoded = True # bleh else: page_url2 = page_url if host: if re.search('fplive\.net', host): url1 = re.findall('.+/c[0-9]+', host).pop() url2 = re.findall('c[0-9]+\?.+', host).pop() else: url1 = re.findall('.+/ondemand/', host).pop() url2 = re.findall('ondemand/.+', host).pop() video() video_input = os.path.join("export", title + '.flv') else: video_input = os.path.join("export", title + '.ts') video_hls(filen, video_input) heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format( xmlconfig.height.string) # This is less likely to fail subtitles(title) print 'Starting mkv merge' mkvmerge = os.path.join("video-engine", "mkvmerge.exe") filename_output = os.path.join("export", title + '[' + heightp.strip() + '].mkv') subtitle_input = [] if os.path.isfile(mkvmerge): with_wine = os.name != 'nt' else: mkvmerge = "mkvmerge" with_wine = False cmd = [ mkvmerge, "-o", filename_output, '--language', '0:jpn', '--language', '1:jpn', '-a', '1', '-d', '0', video_input, '--title', title ] if with_wine: cmd.insert(0, 'wine') if not hardcoded: sublang = { u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por', u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita', u'العربية': 'ara', u'Deutsch': 'deu' }[lang] for i in sub_id2: sublangc = sub_id5.pop(0) sublangn = sub_id6.pop(0) if onlymainsub and sublangc != sublang: continue filename_subtitle = os.path.join( "export", title + '[' + sublangc + ']' + sublangn + '.ass') if not os.path.isfile(filename_subtitle): continue cmd.extend( ['--language', '0:' + sublangc.replace('spa_spa', 'spa')]) if sublangc == sublang: cmd.extend(['--default-track', '0:yes']) else: cmd.extend(['--default-track', '0:no']) if forcesub: cmd.extend(['--forced-track', '0:yes']) else: cmd.extend(['--forced-track', '0:no']) cmd.extend(['--track-name', '0:' + sublangn]) cmd.extend(['-s', '0']) cmd.append(filename_subtitle) subtitle_input.append(filename_subtitle) subprocess.call(cmd) print 'Merge process complete' subs_only = False print print '----------' print print 'Starting Final Cleanup' if not subs_only: os.remove(video_input) if not hardcoded or not subs_only: #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass') for f in subtitle_input: os.remove(f) print 'Cleanup Complete'
def decode(page_url): print ''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98 Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators. ---------- Booting up... ''' if page_url == '': page_url = raw_input('Please enter Crunchyroll video URL:\n') lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config() #player_revision = altfuncs.playerrev(page_url) html = altfuncs.gethtml(page_url) #h = HTMLParser.HTMLParser() title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '') if len(os.path.join('export', title+'.ass')) > 255: title = re.findall('^(.+?) \- ', title)[0] ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ### rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'} rep = dict((re.escape(k), v) for k, v in rep.iteritems()) pattern = re.compile("|".join(rep.keys())) title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title)) ### End stolen code ### media_id = page_url[-6:] xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml') try: if '4' in xmlconfig.find_all('code')[0]: print xmlconfig.find_all('msg')[0].text sys.exit() except IndexError: pass xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id) xmllist = unidecode(xmllist).replace('><', '>\n<') if '<media_id>None</media_id>' in xmllist: print 'The video has hardcoded subtitles.' hardcoded = True sub_id = False else: try: sub_id2 = re.findall("id=([0-9]+)", xmllist) sub_id3 = re.findall("title='(\[.+\]) ", xmllist) sub_id4 = re.findall("title='(\[.+\]) ", xmllist) hardcoded = False except IndexError: print "The video's subtitles cannot be found, or are region-locked." hardcoded = True sub_id = False sub_id3 = [word.replace('[English (US)]','eng') for word in sub_id3] sub_id3 = [word.replace('[Deutsch]','deu') for word in sub_id3] sub_id3 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id3] sub_id3 = [word.replace('[Francais (France)]','fre') for word in sub_id3] sub_id3 = [word.replace('[Espanol (Espana)]','spa') for word in sub_id3] sub_id3 = [word.replace('[Espanol]','spa') for word in sub_id3] sub_id3 = [word.replace('[Italiano]','ita') for word in sub_id3] sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3] #sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4] sub_id4 = [word.replace('[l`rby@]',u'[Arabic]') for word in sub_id4]#else: # try: # sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0] # hardcoded = False # lang = lang1 # except IndexError: # try: # sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0] # print 'Language not found, reverting to ' + lang2 + '.' # hardcoded = False # lang = lang2 # except IndexError: # try: # sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0] # default back to English # print 'Backup language not found, reverting to English.' # hardcoded = False # lang = 'English' # except IndexError: # print "The video's subtitles cannot be found, or are region-locked." # hardcoded = True # sub_id = False if not hardcoded: for i in sub_id2: #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id) xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i) formattedsubs = CrunchyDec().returnsubs(xmlsub) if formattedsubs is None: continue #subfile = open(eptitle + '.ass', 'wb') subfile = open(os.path.join('export', title+'['+sub_id3.pop(0)+']'+sub_id4.pop(0)+'.ass'), 'wb') subfile.write(formattedsubs.encode('utf-8-sig')) subfile.close() #shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', '')) print 'Subtitles for '+title+' have been downloaded'
int(page_url[-6:]) except ValueError: if bool(seasonnum) and bool(epnum): page_url = altfuncs.vidurl(page_url, seasonnum, epnum) elif bool(epnum): page_url = altfuncs.vidurl(page_url, 1, epnum) else: page_url = altfuncs.vidurl(page_url, False, False) subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True) # ---------- lang1, lang2 = altfuncs.config() player_revision = altfuncs.playerrev(page_url) html = altfuncs.gethtml(page_url) h = HTMLParser.HTMLParser() title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '') if len(os.getcwd()+'\\export\\'+title+'.flv') > 255: title = re.findall('^(.+?) \- ', title)[0] # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-'). # replace('?', '.').replace('"', "''").replace('|', '-').replace('"',"''").strip() ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ### rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'} rep = dict((re.escape(k), v) for k, v in rep.iteritems()) pattern = re.compile("|".join(rep.keys())) title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))
def ultimate(page_url, seasonnum, epnum): global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub #global player_revision print ''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98 Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators. ---------- Booting up... ''' if page_url == '': page_url = raw_input('Please enter Crunchyroll video URL:\n') try: int(page_url) page_url = 'http://www.crunchyroll.com/media-' + page_url except ValueError: if not page_url.startswith('http://') and not page_url.startswith('https://'): page_url = 'http://' + page_url try: int(page_url[-6:]) except ValueError: if bool(seasonnum) and bool(epnum): page_url = altfuncs.vidurl(page_url, seasonnum, epnum) elif bool(epnum): page_url = altfuncs.vidurl(page_url, 1, epnum) else: page_url = altfuncs.vidurl(page_url, False, False) #subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True) # ---------- #lang1, lang2 = altfuncs.config() #lang1, lang2, forcesub = altfuncs.config() lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config() #player_revision = altfuncs.playerrev(page_url) html = altfuncs.gethtml(page_url) #h = HTMLParser.HTMLParser() title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '') if len(os.path.join('export', title+'.flv')) > 255: title = re.findall('^(.+?) \- ', title)[0] # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-'). # replace('?', '.').replace('"', "''").replace('|', '-').replace('"',"''").strip() ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ### rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'} rep = dict((re.escape(k), v) for k, v in rep.iteritems()) pattern = re.compile("|".join(rep.keys())) title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title)) ### End stolen code ### #subprocess.call('title ' + title.replace('&', '^&'), shell=True) # ---------- media_id = page_url[-6:] xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml') try: if '4' in xmlconfig.find_all('code')[0]: print xmlconfig.find_all('msg')[0].text sys.exit() except IndexError: pass # ---------- host = xmlconfig.host and xmlconfig.host.string filen = xmlconfig.file and xmlconfig.file.string if not (host or filen): print 'Downloading 2 minute preview.' media_id = xmlconfig.media_id.string xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml') host = xmlconfig.host.string filen = xmlconfig.file.string # ---------- if 'subs' in sys.argv: subtitles(title) subs_only = True hardcoded = True # bleh else: page_url2 = page_url if host: if re.search('fplive\.net', host): url1 = re.findall('.+/c[0-9]+', host).pop() url2 = re.findall('c[0-9]+\?.+', host).pop() else: url1 = re.findall('.+/ondemand/', host).pop() url2 = re.findall('ondemand/.+', host).pop() video() video_input = os.path.join("export", title + '.flv') else: video_input = os.path.join("export", title + '.ts') video_hls(filen, video_input) heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format(xmlconfig.height.string) # This is less likely to fail subtitles(title) print 'Starting mkv merge' mkvmerge = os.path.join("video-engine", "mkvmerge.exe") filename_output = os.path.join("export", title + '[' + heightp.strip() +'].mkv') subtitle_input = [] if os.path.isfile(mkvmerge): with_wine = os.name != 'nt' else: mkvmerge = "mkvmerge" with_wine = False cmd = [mkvmerge, "-o", filename_output, '--language', '0:jpn', '--language', '1:jpn', '-a', '1', '-d', '0', video_input, '--title', title] if with_wine: cmd.insert(0, 'wine') if not hardcoded: sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por', u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita', u'العربية': 'ara', u'Deutsch': 'deu'}[lang] for i in sub_id2: sublangc=sub_id5.pop(0) sublangn=sub_id6.pop(0) if onlymainsub and sublangc != sublang: continue filename_subtitle = os.path.join("export", title+'['+sublangc+']'+sublangn+'.ass') if not os.path.isfile(filename_subtitle): continue cmd.extend(['--language', '0:' + sublangc.replace('spa_spa','spa')]) if sublangc == sublang: cmd.extend(['--default-track', '0:yes']) else: cmd.extend(['--default-track', '0:no']) if forcesub: cmd.extend(['--forced-track', '0:yes']) else: cmd.extend(['--forced-track', '0:no']) cmd.extend(['--track-name', '0:' + sublangn]) cmd.extend(['-s', '0']) cmd.append(filename_subtitle) subtitle_input.append(filename_subtitle) subprocess.call(cmd) print 'Merge process complete' subs_only = False print print '----------' print print 'Starting Final Cleanup' if not subs_only: os.remove(video_input) if not hardcoded or not subs_only: #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass') for f in subtitle_input: os.remove(f) print 'Cleanup Complete'
def ultimate(page_url, seasonnum, epnum): global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2 #global player_revision print ''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98 Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators. ---------- Booting up... ''' if page_url == '': page_url = raw_input('Please enter Crunchyroll video URL:\n') try: int(page_url) page_url = 'http://www.crunchyroll.com/media-' + page_url except ValueError: if not page_url.startswith('http://') and not page_url.startswith('https://'): page_url = 'http://' + page_url try: int(page_url[-6:]) except ValueError: if bool(seasonnum) and bool(epnum): page_url = altfuncs.vidurl(page_url, seasonnum, epnum) elif bool(epnum): page_url = altfuncs.vidurl(page_url, 1, epnum) else: page_url = altfuncs.vidurl(page_url, False, False) subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True) # ---------- #lang1, lang2 = altfuncs.config() #lang1, lang2, forcesub = altfuncs.config() lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config() #player_revision = altfuncs.playerrev(page_url) html = altfuncs.gethtml(page_url) h = HTMLParser.HTMLParser() title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '') if len(os.getcwd()+'./export/'+title+'.flv') > 255: title = re.findall('^(.+?) \- ', title)[0] # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-'). # replace('?', '.').replace('"', "''").replace('|', '-').replace('"',"''").strip() ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ### rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'} rep = dict((re.escape(k), v) for k, v in rep.iteritems()) pattern = re.compile("|".join(rep.keys())) title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title)) ### End stolen code ### subprocess.call('title ' + title.replace('&', '^&'), shell=True) # ---------- media_id = page_url[-6:] xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml') try: if '4' in xmlconfig.find_all('code')[0]: print xmlconfig.find_all('msg')[0].text sys.exit() except IndexError: pass vid_id = xmlconfig.find('media_id').string # ---------- try: host = xmlconfig.find('host').string except AttributeError: print 'Downloading 2 minute preview.' media_id = xmlconfig.find('media_id').string xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml') host = xmlconfig.find('host').string if re.search('fplive\.net', host): url1 = re.findall('.+/c[0-9]+', host).pop() url2 = re.findall('c[0-9]+\?.+', host).pop() else: url1 = re.findall('.+/ondemand/', host).pop() url2 = re.findall('ondemand/.+', host).pop() filen = xmlconfig.find('file').string # ---------- if 'subs' in sys.argv: subtitles(title) subs_only = True hardcoded = True # bleh else: page_url2 = page_url video() #heightp = subprocess.Popen('"video-engine\MediaInfo.exe" --inform=Video;%Height% "./export/' + title + '.flv"' ,shell=True , stdout=subprocess.PIPE).stdout.read() heightp = {'71' : 'android', '60' : '360p', '61' : '480p', '62' : '720p', '80' : '1080p', '0' : 'highest'}[xmlconfig.find('video_encode_quality').string] subtitles(title) subtitlefilecode='' #shutil.move(title + '.flv', os.path.join(os.getcwd(), 'export', '')) print 'Starting mkv merge' if hardcoded: subprocess.call('mkvmerge -o "./export/' + title + '[' + heightp.strip() +'p].mkv" --language 1:jpn -a 1 -d 0 ' + '"./export/' + title + '.flv"' +'"') else: sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por', u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita', u'\u0627\u0644\u0639\u0631\u0628\u064a\u0629': 'ara', u'Deutsch': 'deu'}[lang] # defaulttrack = False #print lang.encode('utf-8') #print sub_id5 #print sub_id6 for i in sub_id2: defaultsub='' sublangc=sub_id5.pop(0) sublangn=sub_id6.pop(0) # print forcesub if not forcesub: if sublangc == sublang: defaultsub=' --default-track 0:yes --forced-track 0:no' else: defaultsub=' --default-track 0:no --forced-track 0:no' else: if sublangc == sublang: defaultsub=' --default-track 0:yes --forced-track 0:yes' else: defaultsub=' --default-track 0:no --forced-track 0:no' if not onlymainsub: subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 "./export/'+title+'['+sublangc+']'+sublangn+'.ass"' else: if sublangc == sublang: subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 "./export/'+title+'['+sublangc+']'+sublangn+'.ass"' # subprocess.call('"mkvmerge" -o ".\export\' + title + '.mkv" --language 1:jpn -a 1 -d 0 ' + # '".\export\\' + title + '.flv" --language 0:' + sublang + ' -s 0 ".\export\\'+title+'.ass"') # print '"mkvmerge" -o ".\export\\' + title + '.mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"' mkvcmd='mkvmerge -o "./export/' + title + '[' + heightp.strip() +'].mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '"./export/' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"' #print mkvcmd #print subtitlefilecode os.system(mkvcmd) print 'Merge process complete' subs_only = False print print '----------' print print 'Starting Final Cleanup' if not subs_only: os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.flv') if not hardcoded or not subs_only: #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass') for root, dirs, files in os.walk('export'): for file in filter(lambda x: re.match(title +'\[.+\]'+ '.ass', x), files): os.remove(os.path.join(root, file)) print 'Cleanup Complete'
def ultimate(page_url='', seasonnum=0, epnum=0, sess_id_=''): #global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub #global player_revision print(''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98b Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worth it, and supports the animators. ---------- Booting up... ''') if page_url == '': #page_url = input('Please enter Crunchyroll video URL:\n') #page_url = 'https://www.crunchyroll.com/the-rising-of-the-shield-hero/episode-10-in-the-midst-of-turmoil-781157' #page_url = 'http://www.crunchyroll.com/military/episode-1-the-mission-begins-668503' page_url = 'https://www.crunchyroll.com/mob-psycho-100/episode-11-guidance-psychic-sensor-780930' try: int(page_url) page_url = 'http://www.crunchyroll.com/media-' + page_url except ValueError: if re.findall(r'https?:\/\/', page_url) == []: page_url = 'http://' + page_url ''' try: int(page_url[-6:]) except ValueError: if bool(seasonnum) and bool(epnum): page_url = altfuncs.vidurl(page_url, seasonnum, epnum) elif bool(epnum): page_url = altfuncs.vidurl(page_url, 1, epnum) else: page_url = altfuncs.vidurl(page_url, False, False) ''' # ---------- lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config( ) if sess_id_ == '': cookies_ = ConfigParser() cookies_.read('cookies') if forceusa: sess_id_ = cookies_.get('COOKIES', 'sess_id_usa') else: sess_id_ = cookies_.get('COOKIES', 'sess_id') media_id = re.findall(r'https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)', page_url)[0] #htmlconfig = BeautifulSoup(gethtml(page_url), 'html') htmlconfig = json.loads( re.findall(r'vilos\.config\.media = ({.*})', gethtml(page_url))[0]) stream_url = {} for i in htmlconfig['streams']: stream_url.update({i['hardsub_lang']: i['url']}) for i in htmlconfig['subtitles']: print(i["language"], i["url"]) for i in stream_url: print(i, stream_url[i]) media_info = getxml('RpcApiVideoPlayer_GetStandardConfig', media_id) #print(media_info) #print(media_info['file']) #print(media_info['media_metadata']['series_title']) #print(media_info['media_metadata']['episode_number']) #print(media_info['media_metadata']['episode_title']) title: str = '%s Episode %s - %s' % ( media_info['media_metadata']['series_title'], media_info['media_metadata']['episode_number'], media_info['media_metadata']['episode_title']) if len(os.path.join('export', title + '.flv') ) > 255 or media_info['media_metadata']['episode_title'] is '': title: str = '%s Episode %s' % ( media_info['media_metadata']['series_title'], media_info['media_metadata']['episode_number']) ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings and improved to include the backslash### rep = { ' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '"': "''", 'a*G': 'a G', '*': '#', '\u2026': '...', ' \ ': ' - ' } rep = dict((re.escape(k), v) for k, v in rep.items()) pattern = re.compile("|".join(rep.keys())) title_shell = unidecode( pattern.sub(lambda m: rep[re.escape(m.group(0))], title)) ### End stolen code ### # ---------- print(format('Now Downloading - ' + title_shell)) #video_input = os.path.join("export", title + '.ts') video_input = dircheck([ os.path.abspath('export') + '\\', media_info['media_metadata']['series_title'], ' Episode', ' - ' + media_info['media_metadata']['episode_number'], ' - ' + media_info['media_metadata']['episode_title'], '.ts' ], [ 'True', 'True', 'False', 'True', 1, 'True', ], 240)
def ultimate(page_url='', seasonnum=0, epnum=0, sess_id_=''): #global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub #global player_revision print(''' -------------------------- ---- Start New Export ---- -------------------------- CrunchyRoll Downloader Toolkit DX v0.98b Crunchyroll hasn't changed anything. If you don't have a premium account, go and sign up for one now. It's well worth it, and supports the animators. ---------- Booting up... ''') if page_url == '': page_url = input('Please enter Crunchyroll video URL:\n') #page_url = 'https://www.crunchyroll.com/the-rising-of-the-shield-hero/episode-11-catastrophe-returns-781158' #page_url = 'http://www.crunchyroll.com/military/episode-1-the-mission-begins-668503' #page_url = 'https://www.crunchyroll.com/mob-psycho-100/episode-11-guidance-psychic-sensor-780930' try: int(page_url) page_url = 'http://www.crunchyroll.com/media-' + page_url except ValueError: if not re.findall(r'https?://', page_url): page_url = 'http://' + page_url ''' try: int(page_url[-6:]) except ValueError: if bool(seasonnum) and bool(epnum): page_url = altfuncs.vidurl(page_url, seasonnum, epnum) elif bool(epnum): page_url = altfuncs.vidurl(page_url, 1, epnum) else: page_url = altfuncs.vidurl(page_url, False, False) ''' # ---------- #lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config() config_ = config() if not os.path.lexists(config_['download_dirctory']): os.makedirs(config_['download_dirctory']) #print(config_) forcesub = config_['forcesubtitle'] if sess_id_ == '': cookies_ = ConfigParser() cookies_.read('cookies') if config_['forceusa']: sess_id_ = cookies_.get('COOKIES', 'sess_id_usa') else: sess_id_ = cookies_.get('COOKIES', 'sess_id') media_id = re.findall(r'https?://www\.crunchyroll\.com/.+/.+-(\d*)', page_url)[0] #htmlconfig = BeautifulSoup(gethtml(page_url), 'html') html_page_ = gethtml(page_url) #print(re.findall(r'vilos\.config\.media = ({.*})',html_page_)) htmlconfig = json.loads( re.findall(r'vilos\.config\.media = ({.*})', html_page_)[0]) htmlconfig['metadata']['series_title'] = json.loads( re.findall(r'vilos\.config\.analytics = ({.*})', html_page_)[0])['media_reporting_parent']['title'] stream_url = {} stream_url_dash = {} # print(htmlconfig) for i in htmlconfig['streams']: if i['format'] == 'adaptive_hls': stream_url.update({i['hardsub_lang']: i['url']}) elif i['format'] == 'adaptive_dash': stream_url_dash.update({i['hardsub_lang']: i['url']}) #stream_url.update({i['hardsub_lang']: i['url']}) #for i in htmlconfig['subtitles']: # print(i["language"], i["url"]) #for i in stream_url: # print(i, stream_url[i]) #media_info = getxml('RpcApiVideoPlayer_GetStandardConfig', media_id) #print(media_info) #print(media_info['file']) #print(media_info['media_metadata']['series_title']) #print(media_info['media_metadata']['episode_number']) #print(media_info['media_metadata']['episode_title']) if htmlconfig['metadata']['episode_number'] != '': title = '%s Episode %s - %s' % ( htmlconfig['metadata']['series_title'], htmlconfig['metadata']['episode_number'], htmlconfig['metadata']['title']) # print(title) title = clean_text(title) # print(title) else: title = '%s - %s' % (htmlconfig['metadata']['series_title'], htmlconfig['metadata']['title']) # print(title) title = clean_text(title) # print(title) #title: str = re.findall(r'var mediaMetadata = \{.*?name":"(.+?)",".+?\};',html_page_)[0] #if len(os.path.join('export', title + '.flv')) > 255 or media_info['media_metadata']['episode_title'] is '': # title = clean_text('%s Episode %s' % (media_info['media_metadata']['series_title'], media_info['media_metadata']['episode_number'])) #print(config_['language2']) #Loc_lang = {u'Español (Espana)': 'esES', u'Français (France)': 'frFR', u'Português (Brasil)': 'ptBR', # u'English': 'enUS', u'Español': 'esLA', u'Türkçe': 'trTR', u'Italiano': 'itIT', # u'العربية': 'arME', u'Deutsch': 'deDE', u'Русский' : 'ruRU'} Loc_lang = { 'Espanol_Espana': 'esES', 'Francais': 'frFR', 'Portugues': 'ptBR', 'English': 'enUS', 'Espanol': 'esLA', 'Turkce': 'trTR', 'Italiano': 'itIT', 'Arabic': 'arME', 'Deutsch': 'deDE', 'Russian': 'ruRU' } Loc_lang_1 = Loc_lang[config_['language']] Loc_lang_2 = Loc_lang[config_['language2']] #print(Loc_lang_1,Loc_lang_2,stream_url) if forcesub: try: hls_url = stream_url[Loc_lang_1] dash_url = stream_url_dash[Loc_lang_1] except: try: hls_url = stream_url[Loc_lang_2] dash_url = stream_url_dash[Loc_lang_2] except: hls_url = stream_url[None] dash_url = stream_url_dash[None] forcesub = False else: # print(stream_url) try: hls_url = stream_url[None] dash_url = stream_url_dash[None] except: try: hls_url = stream_url['enUS'] dash_url = stream_url_dash['enUS'] except: hls_url = stream_url[list(stream_url)[0]] dash_url = stream_url_dash[list(stream_url_dash)[0]] #print(dash_url) hls_url_m3u8 = m3u8.load(hls_url) hls_url_parse = {} dash_id_parse = {} for stream in hls_url_m3u8.playlists: hls_url_parse.update( {stream.stream_info.resolution[1]: stream.absolute_uri}) if config_['video_quality'] == '1080p': try: hls_url = hls_url_parse[1080] except: pass elif config_['video_quality'] == '720p': try: hls_url = hls_url_parse[720] except: pass elif config_['video_quality'] == '480p': try: hls_url = hls_url_parse[480] except: pass elif config_['video_quality'] == '360p': try: hls_url = hls_url_parse[360] except: pass elif config_['video_quality'] == '240p': try: hls_url = hls_url_parse[240] except: pass ### End stolen code ### # ---------- #print(vquality,hls_url) print(format('Now Downloading - ' + title)) #video_input = os.path.join("export", title + '.ts') if htmlconfig['metadata']['episode_number'] != '': video_input = dircheck([ os.path.join(os.path.abspath(config_['download_dirctory']), ''), clean_text(htmlconfig['metadata']['series_title']), ' Episode', ' - ' + clean_text(htmlconfig['metadata']['episode_number']), ' - ' + clean_text(htmlconfig['metadata']['title']), '.ts' ], [ 'True', 'True', 'False', 'True', 1, 'True', ], 240) else: video_input = dircheck([ os.path.join(os.path.abspath(config_['download_dirctory']), ''), clean_text(htmlconfig['metadata']['series_title']), ' - ' + clean_text(htmlconfig['metadata']['title']), '.ts' ], [ 'True', 'True', 1, 'True', ], 240) download_subprocess_result = 0 try: # assert 1==2 download_ = video_hls() download_subprocess_result = download_.video_hls( hls_url, video_input, config_['connection_n_']) except AssertionError: download_subprocess_result = 1 if download_subprocess_result != 0: try: print( 'It seem there is problem in HLS stream, will use DASH stream instead' ) # assert 1==2 download_ = dash_download() # print(config_['connection_n_'],config_['video_quality']) download_subprocess_result = download_.download( dash_url, video_input, config_['connection_n_'], r=config_['video_quality'], abr='best') except: download_subprocess_result = 1 if download_subprocess_result != 0: print( 'It seem there is problem in DASH stream, will use External Library YoutubeDL instead' ) with youtube_dl.YoutubeDL({'logger': MyLogger()}) as ydl: dash_info_dict = ydl.extract_info(dash_url, download=False) for stream in dash_info_dict['formats']: if not stream['height'] == None: dash_id_parse.update({stream['height']: stream['format_id']}) # for i in dash_info_dict['formats']: # print(i['format_id'], i['ext'], i['height'], i['tbr'], i['asr'], i['language'], i['format_note'], i['filesize'], # i['vcodec'], i['acodec'], i['format']) # for i in hls_url_parse: # print(i,hls_url_parse[i]) if config_['video_quality'] == '1080p': try: dash_video_id = dash_id_parse[1080] except: pass elif config_['video_quality'] == '720p': try: dash_video_id = dash_id_parse[720] except: pass elif config_['video_quality'] == '480p': try: dash_video_id = dash_id_parse[480] except: pass elif config_['video_quality'] == '360p': try: dash_video_id = dash_id_parse[360] except: pass elif config_['video_quality'] == '240p': try: dash_video_id = dash_id_parse[240] except: pass def youtube_dl_proxy(*args, **kwargs): import sys if 'idlelib.run' in sys.modules: # code to force this script to only run in console try: import run_code_with_console return run_code_with_console.run_code_with_console() except: pass # end of code to force this script to only run in console return youtube_dl.YoutubeDL(*args, **kwargs) pass # youtube_dl_proxy({'format': dash_video_id + ',bestaudio', # 'outtmpl': video_input[:-3] + '.%(ext)s'}).download([dash_url]) if not 'idlelib.run' in sys.modules: with youtube_dl.YoutubeDL({ 'format': dash_video_id + ',bestaudio', 'outtmpl': video_input[:-3] + '.%(ext)s' }) as ydl: ydl.download([dash_url]) else: youtube_dl_script = '''\ import youtube_dl with youtube_dl.YoutubeDL( {'format': \'''' + dash_video_id + ''',bestaudio', 'outtmpl': r\'''' + video_input[: -3] + '''\' + '.%(ext)s'}) as ydl: ydl.download([\'\'\'''' + dash_url + '''\'\'\']) ''' #print(youtube_dl_script) command = 'where' # Windows if os.name != "nt": # non-Windows command = 'which' python_path_ = os.path.normpath( os.path.join( os.path.split(subprocess.getoutput([command, 'pip3']))[0], '..', 'python.exe')) try: subprocess.call([python_path_, '-c', youtube_dl_script]) except FileNotFoundError: # fix for old version windows that dont have 'where' command reg_ = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Python\PythonCore') python_request_v = [3, 0] if len(python_request_v) > 0: if len(python_request_v) < 2: python_request_v += [0] python_request_v = python_request_v[ 0] + python_request_v[1] / 10 else: python_request_v = 0.0 for reg_i in range(0, winreg.QueryInfoKey(reg_)[0]): reg_2 = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Python\PythonCore') if float(winreg.EnumKey(reg_2, reg_i)) >= python_request_v and \ True if python_request_v == 0.0 else float(winreg.EnumKey(reg_2, reg_i)) < float( round(python_request_v) + 1): reg_2 = winreg.OpenKey(reg_2, winreg.EnumKey(reg_2, reg_i)) reg_2 = winreg.OpenKey(reg_2, r'PythonPath') python_path_ = os.path.normpath( os.path.join( winreg.EnumValue(reg_2, 0)[1].split(';')[0], '..', 'python.exe')) subprocess.call([python_path_, '-c', youtube_dl_script]) """ if not 'idlelib.run' in sys.modules: #video_hls(hls_url, video_input, config_['connection_n_']) try: #assert 1==2 download_ = video_hls() download_.video_hls(hls_url, video_input, config_['connection_n_']) except AssertionError: try: print('It seem there is problem in HLS stream, will use DASH stream instead') #assert 1==2 download_ = dash_download() # print(config_['connection_n_'],config_['video_quality']) download_.download(dash_url, video_input, config_['connection_n_'], r=config_['video_quality'], abr='best') except: print('It seem there is problem in DASH stream, will use External Library YoutubeDL instead') with youtube_dl.YoutubeDL({'logger': MyLogger()}) as ydl: dash_info_dict = ydl.extract_info(dash_url, download=False) for stream in dash_info_dict['formats']: if not stream['height'] == None: dash_id_parse.update({stream['height']: stream['format_id']}) # for i in dash_info_dict['formats']: # print(i['format_id'], i['ext'], i['height'], i['tbr'], i['asr'], i['language'], i['format_note'], i['filesize'], # i['vcodec'], i['acodec'], i['format']) # for i in hls_url_parse: # print(i,hls_url_parse[i]) if config_['video_quality'] == '1080p': try: dash_video_id = dash_id_parse[1080] except: pass elif config_['video_quality'] == '720p': try: dash_video_id = dash_id_parse[720] except: pass elif config_['video_quality'] == '480p': try: dash_video_id = dash_id_parse[480] except: pass elif config_['video_quality'] == '360p': try: dash_video_id = dash_id_parse[360] except: pass elif config_['video_quality'] == '240p': try: dash_video_id = dash_id_parse[240] except: pass with youtube_dl.YoutubeDL( {'format': dash_video_id + ',bestaudio', 'outtmpl': video_input[:-3] + '.%(ext)s'}) as ydl: ydl.download([dash_url]) else: if os.path.lexists(os.path.abspath(os.path.join(".", "crunchy-xml-decoder", "hls.py"))): hls_s_path = os.path.abspath(os.path.join(".", "crunchy-xml-decoder")) elif os.path.lexists(os.path.abspath(os.path.join("..", "crunchy-xml-decoder", "hls.py"))): hls_s_path = os.path.abspath(os.path.join("..", "crunchy-xml-decoder")) else: print('hls script not found') hls_script = '''\ #!/usr/bin/python3 # -*- coding: utf-8 -*- import sys sys.path.append(r"''' + hls_s_path + '''") from hls_ import video_hls download_ = video_hls() download_.video_hls("''' + hls_url + '''", r"''' + video_input + '''", ''' + str(config_['connection_n_']) + ''') #video_hls("''' + hls_url + '''", r"''' + video_input + '''", ''' + str(config_['connection_n_']) + ''')''' # print(hls_script) open(os.path.join(".", "export", "hls_script_temp.py"), "w", encoding='utf-8').write(hls_script) hls_subprocess_result = subprocess.call([sys.executable.replace('pythonw.exe', 'python.exe'), os.path.join(".", "export", "hls_script_temp.py")]) if not hls_subprocess_result == 0: print('It seem there is problem in HLS stream, will use DASH stream instead') subprocess.call([sys.executable.replace('pythonw.exe', 'python.exe'), '-m','youtube_dl', '-f', dash_video_id+',bestaudio', '-o', video_input[:-3]+'.%(ext)s', dash_url ]) os.remove(os.path.join(".", "export", "hls_script_temp.py")) """ #decode(page_url) vilos_subtitle(page_url) mkv_merge(video_input, config_['video_quality'], 'English')