Пример #1
0
def subtitles(eptitle):
    global sub_id

    xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
    xmllist = unidecode(xmllist).replace('><', '>\n<')

    global hardcoded
    if '<media_id>None</media_id>' in xmllist:
        print 'The video has hardcoded subtitles.'
        hardcoded = True
        sub_id = False
    else:
        try:
            sub_id = re.findall("id='([0-9]+)' .+? title='.+?" + re.escape(unidecode(lang)) + "'", xmllist)[0]
            hardcoded = False
        except IndexError:
            try:
                sub_id = re.findall("id=([0-9]+)' title='.+English", xmllist)[0]  # default back to English
                print 'Language not found, reverting to English'
                hardcoded = False
            except IndexError:
                print "The video's subtitles cannot be found, or are region-locked."
                hardcoded = True
                sub_id = False

    if not hardcoded:
        xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
        formattedsubs = CrunchyDec().returnsubs(xmlsub)
        subfile = open(eptitle + '.ass', 'wb')
        subfile.write(formattedsubs.encode('utf-8-sig'))
        subfile.close()
        shutil.move(eptitle + '.ass', '.\export\\')
Пример #2
0
def subtitles(eptitle):
    global sub_id
    global sub_id2
    global sub_id3
    global sub_id4
    global sub_id5
    global sub_id6
    global lang

    xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
    xmllist = unidecode(xmllist).replace('><', '>\n<')

    global hardcoded
    if '<media_id>None</media_id>' in xmllist:
        print 'The video has hardcoded subtitles.'
        hardcoded = True
        sub_id = False
    else:
		try:
			sub_id2 = re.findall("id=([0-9]+)", xmllist)
			sub_id3 = re.findall("title='(\[.+\]) ", xmllist)
			sub_id4 = re.findall("title='(\[.+\]) ", xmllist)
			sub_id5 = re.findall("title='(\[.+\]) ", xmllist)
			sub_id6 = re.findall("title='(\[.+\]) ", xmllist)
			hardcoded = False
#			try:
#				sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
#				lang = lang1
#			except IndexError:
#				try:
#					sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
#					lang = lang2
		except IndexError:
			print "The video's subtitles cannot be found, or are region-locked."
			hardcoded = True
			sub_id = False
		try:
			sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
			lang = lang1
		except IndexError:
			try:
				sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
				lang = lang2
			except IndexError:
				lang ='[English (US)]'
    sub_id3 = [word.replace('[English (US)]','eng') for word in sub_id3]
    sub_id3 = [word.replace('[Deutsch]','deu') for word in sub_id3]
    sub_id3 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id3]
    sub_id3 = [word.replace('[Francais (France)]','fre') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol (Espana)]','spa_spa') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol]','spa') for word in sub_id3]
    sub_id3 = [word.replace('[Italiano]','ita') for word in sub_id3]
    sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3]
#    sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
    sub_id4 = [word.replace('[l`rby@]',u'[Arabic]') for word in sub_id4]
    sub_id5 = [word.replace('[English (US)]','eng') for word in sub_id5]
    sub_id5 = [word.replace('[Deutsch]','deu') for word in sub_id5]
    sub_id5 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id5]
    sub_id5 = [word.replace('[Francais (France)]','fre') for word in sub_id5]
    sub_id5 = [word.replace('[Espanol (Espana)]','spa_spa') for word in sub_id5]
    sub_id5 = [word.replace('[Espanol]','spa') for word in sub_id5]
    sub_id5 = [word.replace('[Italiano]','ita') for word in sub_id5]
    sub_id5 = [word.replace('[l`rby@]','ara') for word in sub_id5]
#    sub_id6 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id6]
    sub_id6 = [word.replace('[l`rby@]',u'[Arabic]') for word in sub_id6]
#    else:
#        try:
#            sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
#            hardcoded = False
#            lang = lang1
#        except IndexError:
#            try:
#                sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
#                print 'Language not found, reverting to ' + lang2 + '.'
#                hardcoded = False
#                lang = lang2
#            except IndexError:
#                try:
#                    sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0]  # default back to English
#                    print 'Backup language not found, reverting to English.'
#                    hardcoded = False
#                    lang = 'English'
#                except IndexError:
#                    print "The video's subtitles cannot be found, or are region-locked."
#                    hardcoded = True
#                    sub_id = False

    if not hardcoded:
		for i in sub_id2:
			#xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
			xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i)
			formattedsubs = CrunchyDec().returnsubs(xmlsub)
			#subfile = open(eptitle + '.ass', 'wb')
			subfile = open('.\\export\\'+title+'['+sub_id3.pop(0)+']'+sub_id4.pop(0)+'.ass', 'wb')
			subfile.write(formattedsubs.encode('utf-8-sig'))
			subfile.close()		
Пример #3
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, player_revision, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')
	
    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith('https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.getcwd()+'\\export\\'+title+'.flv') > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()
    
    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    vid_id = xmlconfig.find('media_id').string

    # ----------

    try:
        host = xmlconfig.find('host').string
    except AttributeError:
        print 'Downloading 2 minute preview.'
        media_id = xmlconfig.find('media_id').string
        xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
        host = xmlconfig.find('host').string

    if re.search('fplive\.net', host):
        url1 = re.findall('.+/c[0-9]+', host).pop()
        url2 = re.findall('c[0-9]+\?.+', host).pop()
    else:
        url1 = re.findall('.+/ondemand/', host).pop()
        url2 = re.findall('ondemand/.+', host).pop()
    filen = xmlconfig.find('file').string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        video()
        #heightp = subprocess.Popen('"video-engine\MediaInfo.exe" --inform=Video;%Height% ".\export\\' + title + '.flv"' ,shell=True , stdout=subprocess.PIPE).stdout.read()
        heightp = {'71' : 'android', '60' : '360p', '61' : '480p',
                 '62' : '720p', '80' : '1080p', '0' : 'highest'}[xmlconfig.find('video_encode_quality').string]
        subtitles(title)
        subtitlefilecode=''
        #shutil.move(title + '.flv', os.path.join(os.getcwd(), 'export', ''))


        print 'Starting mkv merge'
        if hardcoded:
            subprocess.call('"video-engine\mkvmerge.exe" -o ".\export\\' + title + '[' + heightp.strip() +'p].mkv" --language 1:jpn -a 1 -d 0 ' +
                            '".\export\\' + title + '.flv"' +' --title "' + title +'"')
        else:
            sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
                       u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
                       u'العربية': 'ara', u'Deutsch': 'deu'}[lang]
    #		defaulttrack = False
            #print lang.encode('utf-8')
            #print sub_id5
            #print sub_id6
            for i in sub_id2:
	    		defaultsub=''
    			sublangc=sub_id5.pop(0)
    			sublangn=sub_id6.pop(0)
    #			print forcesub
	    		if not forcesub:
    				if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:no'
	    			else:
	    				defaultsub=' --default-track 0:no --forced-track 0:no'
	    		else:
	    			if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:yes'
	    			else:
		    			defaultsub=' --default-track 0:no --forced-track 0:no'
	    		if not onlymainsub:
    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 ".\export\\'+title+'['+sublangc+']'+sublangn+'.ass"'
	    		else:
    				if sublangc == sublang:
	    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 ".\export\\'+title+'['+sublangc+']'+sublangn+'.ass"'
    #        subprocess.call('"video-engine\mkvmerge.exe" -o ".\export\\' + title + '.mkv" --language 1:jpn -a 1 -d 0 ' +
    #                        '".\export\\' + title + '.flv" --language 0:' + sublang + ' -s 0 ".\export\\'+title+'.ass"')
    #        print '"video-engine\mkvmerge.exe" -o ".\export\\' + title + '.mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
            mkvcmd='"video-engine\mkvmerge.exe" -o ".\export\\' + title + '[' + heightp.strip() +'].mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
    #        print mkvcmd
            #print subtitlefilecode
            subprocess.call(mkvcmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.flv')
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for root, dirs, files in os.walk('export'):
            for file in filter(lambda x: re.match(title +'\[.+\]'+ '.ass', x), files):
                os.remove(os.path.join(root, file))
    print 'Cleanup Complete'
def decode(argv_=''):
    print('''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
''')
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config(
    )
    if argv_ == '':
        argv_ = input('Please enter Crunchyroll video URL:\n')
    #print(argv_, re.findall('https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',argv_))
    if re.findall('https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',
                  argv_) == []:
        print(idle_cmd_txt_fix("\x1b[31m" + "ERROR: Invalid URL." + "\x1b[0m"))
        exit()
    #html = gethtml(argv_)
    #print str(argv_)[:15]

    #if html == '':

    #with open('.\html_ex.txt', 'r') as myfile:
    #    html = myfile.read().strip()
    #import urllib
    #html = urllib.urlopen('E:\+Jwico\Manual & Catalog\a\l\z\project\Military! Episode 1 - Watch on Crunchyroll.html').read()
    #   with open("..\..\Military! Episode 1 - Watch on Crunchyroll.html", 'r') as myfile:
    #       html = myfile.read()
    #BeautifulSoup(unicode(html, errors='ignore')).get_text()
    #html = BeautifulSoup(open('.\html_ex.txt', 'r', 'utf-8').read()).get_text()
    #print html
    '''
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    title = title.replace(' - Watch on Crunchyroll', '')

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    warnings.simplefilter("ignore")
    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))
    warnings.simplefilter("default")

    ### End stolen code ###

    if len(os.path.join(os.path.abspath('export'), title + '.ass')) > 255:
        eps_num = re.findall('([0-9].*?)$', title)[0]
        title = title[:246-len(os.path.join(os.path.abspath('export')))-len(eps_num)] + '~ Ep' +eps_num
	
    print os.path.join(os.path.abspath('export'), title +'.ass')
    '''
    media_id = re.findall('https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',
                          argv_)[0]
    #xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')
    xmlconfig = getxml('RpcApiVideoPlayer_GetStandardConfig', media_id)
    #print xmlconfig
    #print xmlconfig['subtitle']
    if xmlconfig['subtitle'] == []:
        print('The video has hardcoded subtitles.')
        hardcoded = True
        sub_id = False
    else:
        #lang_iso = {'English (US)':'eng',u'Espa\xc3\xb1ol':'spa',u'Espa\xc3\xb1ol (Espa\xc3\xb1a)':'spa',u'Fran\xc3\xa7ais (France)':'fre',u'Portugu\xc3\xaas (Brasil)':'por','Italiano':'ita','Deutsch':'deu'}
        #lang_iso = {'English (US)':'eng',u'Espa\xf1ol':'spa',u'Espa\xf1ol (Espa\xf1a)':'spa',u'Fran\xe7ais (France)':'fre',u'Portugu\xeas (Brasil)':'por','Italiano':'ita','Deutsch':'deu'}
        lang_iso = {
            'English (US)': 'eng',
            u'Espa\xf1ol': 'spa',
            u'Espa\xf1ol (Espa\xf1a)': 'spa',
            u'Fran\xe7ais (France)': 'fre',
            u'Portugu\xeas (Brasil)': 'por',
            'Italiano': 'ita',
            'Deutsch': 'deu',
            'العربية': 'ara',
            'Русский': 'rus'
        }

        #    sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3]
        for i in xmlconfig['subtitle']:
            sub_file_ = dircheck([
                os.path.abspath('export') + '\\',
                xmlconfig['media_metadata']['series_title'], ' Episode',
                ' - ' + xmlconfig['media_metadata']['episode_number'],
                ' - ' + xmlconfig['media_metadata']['episode_title'],
                '[' + lang_iso[re.findall('\[(.+)\]', i[1])[0]] + ']',
                '[' + re.findall('\[(.+)\]', i[1])[0] + ']', '.ass'
            ], ['True', 'True', 'False', 'True', 1, 'True', 'False', 'True'],
                                 240)
            #print os.path.join('export', xmlconfig['media_metadata']['series_title'] + ' Episode ' + xmlconfig['media_metadata']['episode_number']+'['+lang_iso[re.findall('\[(.+)\]',i[1])[0]]+']['+re.findall('\[(.+)\]',i[1])[0]+'].ass')
            #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
            print("Attempting to download " + re.findall('\[(.+)\]', i[1])[0] +
                  " subtitle...")
            xmlsub = getxml('RpcApiSubtitle_GetXml', i[0])
            formattedsubs = CrunchyDec().returnsubs(xmlsub)
            if formattedsubs is None:
                continue
            #subfile = open(eptitle + '.ass', 'wb')
            subfile = open(sub_file_, 'wb')
            subfile.write(formattedsubs.encode('utf8'))
            subfile.close()

    pass
Пример #5
0
def decode(page_url):
    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config(
    )
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>',
                       html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title + '.ass')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {
        ' / ': ' - ',
        '/': ' - ',
        ':': '-',
        '?': '.',
        '"': "''",
        '|': '-',
        '&quot;': "''",
        'a*G': 'a G',
        '*': '#',
        u'\u2026': '...'
    }

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(
        altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id),
        'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
    xmllist = unidecode(xmllist).replace('><', '>\n<')

    if '<media_id>None</media_id>' in xmllist:
        print 'The video has hardcoded subtitles.'
        hardcoded = True
        sub_id = False
    else:
        try:
            sub_id2 = re.findall("id=([0-9]+)", xmllist)
            sub_id3 = re.findall("title='(\[.+\]) ", xmllist)
            sub_id4 = re.findall("title='(\[.+\]) ", xmllist)
            hardcoded = False
        except IndexError:
            print "The video's subtitles cannot be found, or are region-locked."
            hardcoded = True
            sub_id = False
    sub_id3 = [word.replace('[English (US)]', 'eng') for word in sub_id3]
    sub_id3 = [word.replace('[Deutsch]', 'deu') for word in sub_id3]
    sub_id3 = [word.replace('[Portugues (Brasil)]', 'por') for word in sub_id3]
    sub_id3 = [word.replace('[Francais (France)]', 'fre') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol (Espana)]', 'spa') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol]', 'spa') for word in sub_id3]
    sub_id3 = [word.replace('[Italiano]', 'ita') for word in sub_id3]
    sub_id3 = [word.replace('[l`rby@]', 'ara') for word in sub_id3]
    #sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
    sub_id4 = [word.replace('[l`rby@]', u'[Arabic]')
               for word in sub_id4]  #else:
    #	try:
    #		sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
    #		hardcoded = False
    #		lang = lang1
    #	except IndexError:
    #		try:
    #			sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
    #			print 'Language not found, reverting to ' + lang2 + '.'
    #			hardcoded = False
    #			lang = lang2
    #		except IndexError:
    #			try:
    #				sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0]  # default back to English
    #				print 'Backup language not found, reverting to English.'
    #				hardcoded = False
    #				lang = 'English'
    #			except IndexError:
    #				print "The video's subtitles cannot be found, or are region-locked."
    #				hardcoded = True
    #				sub_id = False
    if not hardcoded:
        for i in sub_id2:
            #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
            xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i)
            formattedsubs = CrunchyDec().returnsubs(xmlsub)
            if formattedsubs is None:
                continue
            #subfile = open(eptitle + '.ass', 'wb')
            subfile = open(
                os.path.join(
                    'export', title + '[' + sub_id3.pop(0) + ']' +
                    sub_id4.pop(0) + '.ass'), 'wb')
            subfile.write(formattedsubs.encode('utf-8-sig'))
            subfile.close()
        #shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', ''))

    print 'Subtitles for ' + title + ' have been downloaded'
Пример #6
0
def decode(page_url):
    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title+'.ass')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
    xmllist = unidecode(xmllist).replace('><', '>\n<')



    if '<media_id>None</media_id>' in xmllist:
        print 'The video has hardcoded subtitles.'
        hardcoded = True
        sub_id = False
    else:
        try:
            sub_id2 = re.findall("id=([0-9]+)", xmllist)
            sub_id3 = re.findall("title='(\[.+\]) ", xmllist)
            sub_id4 = re.findall("title='(\[.+\]) ", xmllist)
            hardcoded = False
        except IndexError:
            print "The video's subtitles cannot be found, or are region-locked."
            hardcoded = True
            sub_id = False
    sub_id3 = [word.replace('[English (US)]','eng') for word in sub_id3]
    sub_id3 = [word.replace('[Deutsch]','deu') for word in sub_id3]
    sub_id3 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id3]
    sub_id3 = [word.replace('[Francais (France)]','fre') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol (Espana)]','spa') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol]','spa') for word in sub_id3]
    sub_id3 = [word.replace('[Italiano]','ita') for word in sub_id3]
    sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3]
    #sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
    sub_id4 = [word.replace('[l`rby@]',u'[Arabic]') for word in sub_id4]#else:
    #   try:
    #       sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
    #       hardcoded = False
    #       lang = lang1
    #   except IndexError:
    #       try:
    #           sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
    #           print 'Language not found, reverting to ' + lang2 + '.'
    #           hardcoded = False
    #           lang = lang2
    #       except IndexError:
    #           try:
    #               sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0]  # default back to English
    #               print 'Backup language not found, reverting to English.'
    #               hardcoded = False
    #               lang = 'English'
    #           except IndexError:
    #               print "The video's subtitles cannot be found, or are region-locked."
    #               hardcoded = True
    #               sub_id = False
    if not hardcoded:
        for i in sub_id2:
            #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
            xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i)
            formattedsubs = CrunchyDec().returnsubs(xmlsub)
            if formattedsubs is None:
                continue
            #subfile = open(eptitle + '.ass', 'wb')
            subfile = open(os.path.join('export', title+'['+sub_id3.pop(0)+']'+sub_id4.pop(0)+'.ass'), 'wb')
            subfile.write(formattedsubs.encode('utf-8-sig'))
            subfile.close()
        #shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', ''))

    print 'Subtitles for '+title+' have been downloaded'
Пример #7
0
def subtitles(eptitle):
    global sub_id
    global sub_id2
    global sub_id3
    global sub_id4
    global sub_id5
    global sub_id6
    global lang

    xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
    xmllist = unidecode(xmllist).replace('><', '>\n<')

    global hardcoded
    if '<media_id>None</media_id>' in xmllist:
        print 'The video has hardcoded subtitles.'
        hardcoded = True
        sub_id = False
    else:
        try:
            sub_id2 = re.findall("id=([0-9]+)", xmllist)
            sub_id3 = re.findall("title='(\[.+\]) ", xmllist)
            sub_id4 = re.findall("title='(\[.+\]) ", xmllist)
            sub_id5 = re.findall("title='(\[.+\]) ", xmllist)
            sub_id6 = re.findall("title='(\[.+\]) ", xmllist)
            hardcoded = False
#			try:
#				sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
#				lang = lang1
#			except IndexError:
#				try:
#					sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
#					lang = lang2
        except IndexError:
            print "The video's subtitles cannot be found, or are region-locked."
            hardcoded = True
            sub_id = False
        try:
            sub_id = re.findall(
                "id=([0-9]+)' title='\[" + re.escape(unidecode(lang1)),
                xmllist)[0]
            lang = lang1
        except IndexError:
            try:
                sub_id = re.findall(
                    "id=([0-9]+)' title='\[" + re.escape(unidecode(lang2)),
                    xmllist)[0]
                lang = lang2
            except IndexError:
                lang = 'English'
    sub_id3 = [word.replace('[English (US)]', 'eng') for word in sub_id3]
    sub_id3 = [word.replace('[Deutsch]', 'deu') for word in sub_id3]
    sub_id3 = [word.replace('[Portugues (Brasil)]', 'por') for word in sub_id3]
    sub_id3 = [word.replace('[Francais (France)]', 'fre') for word in sub_id3]
    sub_id3 = [
        word.replace('[Espanol (Espana)]', 'spa_spa') for word in sub_id3
    ]
    sub_id3 = [word.replace('[Espanol]', 'spa') for word in sub_id3]
    sub_id3 = [word.replace('[Italiano]', 'ita') for word in sub_id3]
    sub_id3 = [word.replace('[l`rby@]', 'ara') for word in sub_id3]
    #    sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
    sub_id4 = [word.replace('[l`rby@]', u'[Arabic]') for word in sub_id4]
    sub_id5 = [word.replace('[English (US)]', 'eng') for word in sub_id5]
    sub_id5 = [word.replace('[Deutsch]', 'deu') for word in sub_id5]
    sub_id5 = [word.replace('[Portugues (Brasil)]', 'por') for word in sub_id5]
    sub_id5 = [word.replace('[Francais (France)]', 'fre') for word in sub_id5]
    sub_id5 = [
        word.replace('[Espanol (Espana)]', 'spa_spa') for word in sub_id5
    ]
    sub_id5 = [word.replace('[Espanol]', 'spa') for word in sub_id5]
    sub_id5 = [word.replace('[Italiano]', 'ita') for word in sub_id5]
    sub_id5 = [word.replace('[l`rby@]', 'ara') for word in sub_id5]
    #    sub_id6 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id6]
    sub_id6 = [word.replace('[l`rby@]', u'[Arabic]') for word in sub_id6]
    #    else:
    #        try:
    #            sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
    #            hardcoded = False
    #            lang = lang1
    #        except IndexError:
    #            try:
    #                sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
    #                print 'Language not found, reverting to ' + lang2 + '.'
    #                hardcoded = False
    #                lang = lang2
    #            except IndexError:
    #                try:
    #                    sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0]  # default back to English
    #                    print 'Backup language not found, reverting to English.'
    #                    hardcoded = False
    #                    lang = 'English'
    #                except IndexError:
    #                    print "The video's subtitles cannot be found, or are region-locked."
    #                    hardcoded = True
    #                    sub_id = False

    if not hardcoded:
        for i in sub_id2:
            #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
            xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i)
            formattedsubs = CrunchyDec().returnsubs(xmlsub)
            if formattedsubs is None:
                continue
            #subfile = open(eptitle + '.ass', 'wb')
            subfile = open(
                os.path.join(
                    'export', title + '[' + sub_id3.pop(0) + ']' +
                    sub_id4.pop(0) + '.ass'), 'wb')
            subfile.write(formattedsubs.encode('utf-8-sig'))
            subfile.close()
Пример #8
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2
    #global player_revision

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith(
                'https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    #subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config(
    )
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>',
                       html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title + '.flv')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {
        ' / ': ' - ',
        '/': ' - ',
        ':': '-',
        '?': '.',
        '"': "''",
        '|': '-',
        '&quot;': "''",
        'a*G': 'a G',
        '*': '#',
        u'\u2026': '...'
    }

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    #subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(
        altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id),
        'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    vid_id = xmlconfig.find('media_id').string

    # ----------

    host = xmlconfig.find('host')
    if host:
        host = host.string

    filen = xmlconfig.find('file')
    if filen:
        filen = filen.string

    if not host and not filen:
        print 'Downloading 2 minute preview.'
        media_id = xmlconfig.find('media_id').string
        xmlconfig = BeautifulSoup(
            altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id),
            'xml')
        host = xmlconfig.find('host').string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        if host:
            if re.search('fplive\.net', host):
                url1 = re.findall('.+/c[0-9]+', host).pop()
                url2 = re.findall('c[0-9]+\?.+', host).pop()
            else:
                url1 = re.findall('.+/ondemand/', host).pop()
                url2 = re.findall('ondemand/.+', host).pop()
            video()
            video_input = os.path.join("export", title + '.flv')
        else:
            video_input = os.path.join("export", title + '.ts')
            video_hls(filen, video_input)

        heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format(
            xmlconfig.height.string)  # This is less likely to fail
        subtitles(title)

        print 'Starting mkv merge'
        mkvmerge = os.path.join("video-engine", "mkvmerge.exe")
        filename_output = os.path.join("export",
                                       title + '[' + heightp.strip() + '].mkv')
        subtitle_input = []
        if os.path.isfile(mkvmerge):
            with_wine = os.name != 'nt'
        else:
            mkvmerge = "mkvmerge"
            with_wine = False
        cmd = [
            mkvmerge, "-o", filename_output, '--language', '0:jpn',
            '--language', '1:jpn', '-a', '1', '-d', '0', video_input,
            '--title', title
        ]
        if with_wine:
            cmd.insert(0, 'wine')
        if not hardcoded:
            sublang = {
                u'Español (Espana)': 'spa_spa',
                u'Français (France)': 'fre',
                u'Português (Brasil)': 'por',
                u'English': 'eng',
                u'Español': 'spa',
                u'Türkçe': 'tur',
                u'Italiano': 'ita',
                u'العربية': 'ara',
                u'Deutsch': 'deu'
            }[lang]
            for i in sub_id2:
                sublangc = sub_id5.pop(0)
                sublangn = sub_id6.pop(0)

                if onlymainsub and sublangc != sublang:
                    continue

                filename_subtitle = os.path.join(
                    "export", title + '[' + sublangc + ']' + sublangn + '.ass')
                if not os.path.isfile(filename_subtitle):
                    continue

                cmd.extend(
                    ['--language', '0:' + sublangc.replace('spa_spa', 'spa')])

                if sublangc == sublang:
                    cmd.extend(['--default-track', '0:yes'])
                else:
                    cmd.extend(['--default-track', '0:no'])
                if forcesub:
                    cmd.extend(['--forced-track', '0:yes'])
                else:
                    cmd.extend(['--forced-track', '0:no'])

                cmd.extend(['--track-name', '0:' + sublangn])
                cmd.extend(['-s', '0'])
                cmd.append(filename_subtitle)
                subtitle_input.append(filename_subtitle)
        subprocess.call(cmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(video_input)
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for f in subtitle_input:
            os.remove(f)
    print 'Cleanup Complete'
Пример #9
0
### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

### End stolen code ###

subprocess.call('title ' + title.replace('&', '^&'), shell=True)

# ----------

media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

try:
    if '4' in xmlconfig.find_all('code')[0]:
        print xmlconfig.find_all('msg')[0].text
        sys.exit()
except IndexError:
    pass

vid_id = xmlconfig.find('media_id').string

# ----------

try:
    host = xmlconfig.find('host').string
except AttributeError:
Пример #10
0
subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

# ----------

player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)

h = HTMLParser.HTMLParser()
title = re.findall('<title>(.+?)</title>', html).pop().replace('Crunchyroll - Watch ', '')
title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').replace('?', '.').replace('"', '\'').strip()
subprocess.call('title ' + title.replace('&', '^&'), shell=True)

# ----------

media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')
if '<code>4</code>' in xmlconfig:
    print 'Video not available in your region.'
    sys.exit()
vid_id = xmlconfig.find('media_id').string

# ----------

try:
    host = xmlconfig.find('host').string
except AttributeError:
    print 'Downloading 2 minute preview.'
    media_id = xmlconfig.find('media_id').string
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
    host = xmlconfig.find('host').string
Пример #11
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub
    #global player_revision

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith('https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    #subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title+'.flv')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    #subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    # ----------

    host = xmlconfig.host and xmlconfig.host.string
    filen = xmlconfig.file and xmlconfig.file.string

    if not (host or filen):
        print 'Downloading 2 minute preview.'

    media_id = xmlconfig.media_id.string
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
    host = xmlconfig.host.string
    filen = xmlconfig.file.string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        if host:
            if re.search('fplive\.net', host):
                url1 = re.findall('.+/c[0-9]+', host).pop()
                url2 = re.findall('c[0-9]+\?.+', host).pop()
            else:
                url1 = re.findall('.+/ondemand/', host).pop()
                url2 = re.findall('ondemand/.+', host).pop()
            video()
            video_input = os.path.join("export", title + '.flv')
        else:
            video_input = os.path.join("export", title + '.ts')
            video_hls(filen, video_input)

        heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format(xmlconfig.height.string)  # This is less likely to fail
        subtitles(title)

        print 'Starting mkv merge'
        mkvmerge = os.path.join("video-engine", "mkvmerge.exe")
        filename_output = os.path.join("export", title + '[' + heightp.strip() +'].mkv')
        subtitle_input = []
        if os.path.isfile(mkvmerge):
            with_wine = os.name != 'nt'
        else:
            mkvmerge = "mkvmerge"
            with_wine = False
        cmd = [mkvmerge, "-o", filename_output, '--language', '0:jpn', '--language', '1:jpn', '-a', '1', '-d', '0', video_input, '--title', title]
        if with_wine:
            cmd.insert(0, 'wine')
        if not hardcoded:
            sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
                       u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
                       u'العربية': 'ara', u'Deutsch': 'deu'}[lang]
            for i in sub_id2:
                sublangc=sub_id5.pop(0)
                sublangn=sub_id6.pop(0)

                if onlymainsub and sublangc != sublang:
                    continue

                filename_subtitle = os.path.join("export", title+'['+sublangc+']'+sublangn+'.ass')
                if not os.path.isfile(filename_subtitle):
                    continue

                cmd.extend(['--language', '0:' + sublangc.replace('spa_spa','spa')])

                if sublangc == sublang:
                    cmd.extend(['--default-track', '0:yes'])
                else:
                    cmd.extend(['--default-track', '0:no'])
                if forcesub:
                    cmd.extend(['--forced-track', '0:yes'])
                else:
                    cmd.extend(['--forced-track', '0:no'])

                cmd.extend(['--track-name', '0:' + sublangn])
                cmd.extend(['-s', '0'])
                cmd.append(filename_subtitle)
                subtitle_input.append(filename_subtitle)
        subprocess.call(cmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(video_input)
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for f in subtitle_input:
            os.remove(f)
    print 'Cleanup Complete'
Пример #12
0
    '"': "''",
    "|": "-",
    "&quot;": "''",
    "a*G": "a G",
    "*": "#",
    u"\u2026": "...",
}

rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

### End stolen code ###

media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml("RpcApiVideoPlayer_GetStandardConfig", media_id), "xml")

try:
    if "4" in xmlconfig.find_all("code")[0]:
        print xmlconfig.find_all("msg")[0].text
        sys.exit()
except IndexError:
    pass

xmllist = altfuncs.getxml("RpcApiSubtitle_GetListing", media_id)
xmllist = unidecode(xmllist).replace("><", ">\n<")

if "<media_id>None</media_id>" in xmllist:
    print "The video has hardcoded subtitles."
    hardcoded = True
    sub_id = False
Пример #13
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2
    #global player_revision

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')
	
    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith('https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.getcwd()+'./export/'+title+'.flv') > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()
    
    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    vid_id = xmlconfig.find('media_id').string

    # ----------

    try:
        host = xmlconfig.find('host').string
    except AttributeError:
        print 'Downloading 2 minute preview.'
        media_id = xmlconfig.find('media_id').string
        xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
        host = xmlconfig.find('host').string

    if re.search('fplive\.net', host):
        url1 = re.findall('.+/c[0-9]+', host).pop()
        url2 = re.findall('c[0-9]+\?.+', host).pop()
    else:
        url1 = re.findall('.+/ondemand/', host).pop()
        url2 = re.findall('ondemand/.+', host).pop()
    filen = xmlconfig.find('file').string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        video()
        #heightp = subprocess.Popen('"video-engine\MediaInfo.exe" --inform=Video;%Height% "./export/' + title + '.flv"' ,shell=True , stdout=subprocess.PIPE).stdout.read()
        heightp = {'71' : 'android', '60' : '360p', '61' : '480p',
                 '62' : '720p', '80' : '1080p', '0' : 'highest'}[xmlconfig.find('video_encode_quality').string]
        subtitles(title)
        subtitlefilecode=''
        #shutil.move(title + '.flv', os.path.join(os.getcwd(), 'export', ''))


        print 'Starting mkv merge'
        if hardcoded:
            subprocess.call('mkvmerge -o "./export/' + title + '[' + heightp.strip() +'p].mkv" --language 1:jpn -a 1 -d 0 ' +
                            '"./export/' + title + '.flv"' +'"')
        else:
            sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
                       u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
                       u'\u0627\u0644\u0639\u0631\u0628\u064a\u0629': 'ara', u'Deutsch': 'deu'}[lang]
    #		defaulttrack = False
            #print lang.encode('utf-8')
            #print sub_id5
            #print sub_id6
            for i in sub_id2:
	    		defaultsub=''
    			sublangc=sub_id5.pop(0)
    			sublangn=sub_id6.pop(0)
    #			print forcesub
	    		if not forcesub:
    				if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:no'
	    			else:
	    				defaultsub=' --default-track 0:no --forced-track 0:no'
	    		else:
	    			if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:yes'
	    			else:
		    			defaultsub=' --default-track 0:no --forced-track 0:no'
	    		if not onlymainsub:
    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 "./export/'+title+'['+sublangc+']'+sublangn+'.ass"'
	    		else:
    				if sublangc == sublang:
	    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 "./export/'+title+'['+sublangc+']'+sublangn+'.ass"'
    #        subprocess.call('"mkvmerge" -o ".\export\' + title + '.mkv" --language 1:jpn -a 1 -d 0 ' +
    #                        '".\export\\' + title + '.flv" --language 0:' + sublang + ' -s 0 ".\export\\'+title+'.ass"')
    #        print '"mkvmerge" -o ".\export\\' + title + '.mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
            mkvcmd='mkvmerge -o "./export/' + title + '[' + heightp.strip() +'].mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '"./export/' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
            #print mkvcmd
            #print subtitlefilecode
            os.system(mkvcmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.flv')
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for root, dirs, files in os.walk('export'):
            for file in filter(lambda x: re.match(title +'\[.+\]'+ '.ass', x), files):
                os.remove(os.path.join(root, file))
    print 'Cleanup Complete'
def ultimate(page_url='', seasonnum=0, epnum=0, sess_id_=''):
    #global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub
    #global player_revision

    print('''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98b 

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worth it, and supports the animators.

----------
Booting up...
''')
    if page_url == '':
        #page_url = input('Please enter Crunchyroll video URL:\n')
        #page_url = 'https://www.crunchyroll.com/the-rising-of-the-shield-hero/episode-10-in-the-midst-of-turmoil-781157'
        #page_url = 'http://www.crunchyroll.com/military/episode-1-the-mission-begins-668503'
        page_url = 'https://www.crunchyroll.com/mob-psycho-100/episode-11-guidance-psychic-sensor-780930'

    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if re.findall(r'https?:\/\/', page_url) == []:
            page_url = 'http://' + page_url
        '''
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)
        '''

    # ----------

    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config(
    )
    if sess_id_ == '':
        cookies_ = ConfigParser()
        cookies_.read('cookies')
        if forceusa:
            sess_id_ = cookies_.get('COOKIES', 'sess_id_usa')
        else:
            sess_id_ = cookies_.get('COOKIES', 'sess_id')
    media_id = re.findall(r'https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',
                          page_url)[0]
    #htmlconfig = BeautifulSoup(gethtml(page_url), 'html')
    htmlconfig = json.loads(
        re.findall(r'vilos\.config\.media = ({.*})', gethtml(page_url))[0])
    stream_url = {}
    for i in htmlconfig['streams']:
        stream_url.update({i['hardsub_lang']: i['url']})
    for i in htmlconfig['subtitles']:
        print(i["language"], i["url"])
    for i in stream_url:
        print(i, stream_url[i])
    media_info = getxml('RpcApiVideoPlayer_GetStandardConfig', media_id)
    #print(media_info)
    #print(media_info['file'])
    #print(media_info['media_metadata']['series_title'])
    #print(media_info['media_metadata']['episode_number'])
    #print(media_info['media_metadata']['episode_title'])
    title: str = '%s Episode %s - %s' % (
        media_info['media_metadata']['series_title'],
        media_info['media_metadata']['episode_number'],
        media_info['media_metadata']['episode_title'])
    if len(os.path.join('export', title + '.flv')
           ) > 255 or media_info['media_metadata']['episode_title'] is '':
        title: str = '%s Episode %s' % (
            media_info['media_metadata']['series_title'],
            media_info['media_metadata']['episode_number'])

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings and improved to include the backslash###
    rep = {
        ' / ': ' - ',
        '/': ' - ',
        ':': '-',
        '?': '.',
        '"': "''",
        '|': '-',
        '&quot;': "''",
        'a*G': 'a G',
        '*': '#',
        '\u2026': '...',
        ' \ ': ' - '
    }
    rep = dict((re.escape(k), v) for k, v in rep.items())
    pattern = re.compile("|".join(rep.keys()))
    title_shell = unidecode(
        pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    # ----------
    print(format('Now Downloading - ' + title_shell))
    #video_input = os.path.join("export", title + '.ts')
    video_input = dircheck([
        os.path.abspath('export') + '\\',
        media_info['media_metadata']['series_title'], ' Episode',
        ' - ' + media_info['media_metadata']['episode_number'],
        ' - ' + media_info['media_metadata']['episode_title'], '.ts'
    ], [
        'True',
        'True',
        'False',
        'True',
        1,
        'True',
    ], 240)
Пример #15
0
h = HTMLParser.HTMLParser()
title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
if len(os.getcwd()+'\\export\\'+title+'.ass') > 255:
    title = re.findall('^(.+?) \- ', title)[0]

### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", '*': '#', u'\u2026': '...'}

rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

### End stolen code ###

media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

try:
    if '4' in xmlconfig.find_all('code')[0]:
        print xmlconfig.find_all('msg')[0].text
        sys.exit()
except IndexError:
    pass

vid_id = xmlconfig.find('media_id').string


xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
xmllist = unidecode(xmllist).replace('><', '>\n<')

if '<media_id>None</media_id>' in xmllist: