def videoquality_():
    slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config()
    seleccion = 5
    print '''Set This To The Preferred Quality:
0.- android (hard-subbed)
1.- 360p
2.- 480p
3.- 720p
4.- 1080p
5.- highest

Note: Any Quality Higher Than 360p Still Requires Premium, Unless It's Available That Way For Free (Some First Episodes).
We're Not Miracle Workers.
'''
    try:
        seleccion = int(input("> "))
    except:
        print "ERROR: Invalid option."
        videoquality_()
    if seleccion == 0 :
        return 'android'
    elif seleccion == 1 :
        return '360p'
    elif seleccion == 2 :
        return '480p'
    elif seleccion == 3 :
        return '720p'
    elif seleccion == 4 :
        return '1080p'
    elif seleccion == 5 :
        return 'highest'
    else:
        print "ERROR: Invalid option."
        videoquality_()
예제 #2
0
 def load_config(**kwargs):
     for key in kwargs:
         if type(kwargs[key]) == list:
             if kwargs[key][0] == input:
                 kwargs[key] = menu_test.input_pc(kwargs[key][1])
     #print(kwargs)
     config_ = config(**kwargs)
     #print(config_)
     #print(config_['language'])
     
     #config_['language'] = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais',
     #                       u'Português (Brasil)' : 'Portugues', u'English' : 'English', u'Español' : 'Espanol',
     #                       u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano', u'العربية' : 'Arabic',
     #                       u'Deutsch' : 'Deutsch', u'Русский' : 'Russian'}[config_['language']]
     #config_['language2'] = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais',
     #                       u'Português (Brasil)' : 'Portugues', u'English' : 'English', u'Español' : 'Espanol',
     #                       u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano', u'العربية' : 'Arabic',
     #                       u'Deutsch' : 'Deutsch', u'Русский' : 'Russian'}[config_['language2']]
     if config_['language'] == 'Espanol_Espana':
         config_['language'] = 'Espanol (Espana)'
     if config_['language2'] == 'Espanol_Espana':
         config_['language2'] = 'Espanol (Espana)'
     #print(config_)
     menu_test.varible_pool_.update(config_)
     menu_test.redraw()
예제 #3
0
def videoquality_():
    while True:
        vquality = config()['video_quality']
        seleccion = 5
        try:
            print(idle_cmd_txt_fix('''Set This To The Preferred Quality:
0.- '''+'''\x1b[32m'''+'''android (240p)'''+'''\x1b[0m'''+'''
1.- '''+'''\x1b[32m'''+'''360p'''+'''\x1b[0m'''+'''
2.- '''+'''\x1b[32m'''+'''480p'''+'''\x1b[0m'''+'''
3.- '''+'''\x1b[32m'''+'''720p'''+'''\x1b[0m'''+'''
4.- '''+'''\x1b[32m'''+'''1080p'''+'''\x1b[0m'''+'''
5.- '''+'''\x1b[32m'''+'''highest'''+'''\x1b[0m'''+'''
Note: Any Quality Higher Than 360p Still Requires Premium, Unless It's Available That Way For Free (Some First Episodes).
We're Not Miracle Workers.'''))
            seleccion = int(input('> '))
        except:
            print(idle_cmd_txt_fix("\x1b[31m"+"ERROR: Invalid option."+"\x1b[0m"))
            continue
        if seleccion == 0 :
            return '240p'
        elif seleccion == 1 :
            return '360p'
        elif seleccion == 2 :
            return '480p'
        elif seleccion == 3 :
            return '720p'
        elif seleccion == 4 :
            return '1080p'
        elif seleccion == 5 :
            return 'highest'
        else:
            print(idle_cmd_txt_fix("\x1b[31m"+"ERROR: Invalid option."+"\x1b[0m"))
            continue
예제 #4
0
def videoquality_():
    slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config(
    )
    seleccion = 5
    print '''Set This To The Preferred Quality:
0.- android (hard-subbed)
1.- 360p
2.- 480p
3.- 720p
4.- 1080p
5.- highest
Note: Any Quality Higher Than 360p Still Requires Premium, Unless It's Available That Way For Free (Some First Episodes).
We're Not Miracle Workers.
'''
    try:
        seleccion = int(input("> "))
    except:
        print "ERROR: Invalid option."
        videoquality_()
    if seleccion == 0:
        return 'android'
    elif seleccion == 1:
        return '360p'
    elif seleccion == 2:
        return '480p'
    elif seleccion == 3:
        return '720p'
    elif seleccion == 4:
        return '1080p'
    elif seleccion == 5:
        return 'highest'
    else:
        print "ERROR: Invalid option."
        videoquality_()
    def load_config(**kwargs):
        for key in kwargs:
            if type(kwargs[key]) == list:
                if kwargs[key][0] == input:
                    kwargs[key] = menu_test.input_pc(kwargs[key][1])
        config_ = config(**kwargs)

        if config_['language'] == 'Espanol_Espana':
            config_['language'] = 'Espanol (Espana)'
        if config_['language2'] == 'Espanol_Espana':
            config_['language2'] = 'Espanol (Espana)'
        menu_test.varible_pool_.update(config_)
        menu_test.redraw()
예제 #6
0
def decode(page_url):
    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title+'.ass')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
    xmllist = unidecode(xmllist).replace('><', '>\n<')



    if '<media_id>None</media_id>' in xmllist:
        print 'The video has hardcoded subtitles.'
        hardcoded = True
        sub_id = False
    else:
        try:
            sub_id2 = re.findall("id=([0-9]+)", xmllist)
            sub_id3 = re.findall("title='(\[.+\]) ", xmllist)
            sub_id4 = re.findall("title='(\[.+\]) ", xmllist)
            hardcoded = False
        except IndexError:
            print "The video's subtitles cannot be found, or are region-locked."
            hardcoded = True
            sub_id = False
    sub_id3 = [word.replace('[English (US)]','eng') for word in sub_id3]
    sub_id3 = [word.replace('[Deutsch]','deu') for word in sub_id3]
    sub_id3 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id3]
    sub_id3 = [word.replace('[Francais (France)]','fre') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol (Espana)]','spa') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol]','spa') for word in sub_id3]
    sub_id3 = [word.replace('[Italiano]','ita') for word in sub_id3]
    sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3]
    #sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
    sub_id4 = [word.replace('[l`rby@]',u'[Arabic]') for word in sub_id4]#else:
    #   try:
    #       sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
    #       hardcoded = False
    #       lang = lang1
    #   except IndexError:
    #       try:
    #           sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
    #           print 'Language not found, reverting to ' + lang2 + '.'
    #           hardcoded = False
    #           lang = lang2
    #       except IndexError:
    #           try:
    #               sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0]  # default back to English
    #               print 'Backup language not found, reverting to English.'
    #               hardcoded = False
    #               lang = 'English'
    #           except IndexError:
    #               print "The video's subtitles cannot be found, or are region-locked."
    #               hardcoded = True
    #               sub_id = False
    if not hardcoded:
        for i in sub_id2:
            #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
            xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i)
            formattedsubs = CrunchyDec().returnsubs(xmlsub)
            if formattedsubs is None:
                continue
            #subfile = open(eptitle + '.ass', 'wb')
            subfile = open(os.path.join('export', title+'['+sub_id3.pop(0)+']'+sub_id4.pop(0)+'.ass'), 'wb')
            subfile.write(formattedsubs.encode('utf-8-sig'))
            subfile.close()
        #shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', ''))

    print 'Subtitles for '+title+' have been downloaded'
예제 #7
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2
    #global player_revision

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith(
                'https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    #subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config(
    )
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>',
                       html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title + '.flv')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {
        ' / ': ' - ',
        '/': ' - ',
        ':': '-',
        '?': '.',
        '"': "''",
        '|': '-',
        '&quot;': "''",
        'a*G': 'a G',
        '*': '#',
        u'\u2026': '...'
    }

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    #subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(
        altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id),
        'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    vid_id = xmlconfig.find('media_id').string

    # ----------

    host = xmlconfig.find('host')
    if host:
        host = host.string

    filen = xmlconfig.find('file')
    if filen:
        filen = filen.string

    if not host and not filen:
        print 'Downloading 2 minute preview.'
        media_id = xmlconfig.find('media_id').string
        xmlconfig = BeautifulSoup(
            altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id),
            'xml')
        host = xmlconfig.find('host').string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        if host:
            if re.search('fplive\.net', host):
                url1 = re.findall('.+/c[0-9]+', host).pop()
                url2 = re.findall('c[0-9]+\?.+', host).pop()
            else:
                url1 = re.findall('.+/ondemand/', host).pop()
                url2 = re.findall('ondemand/.+', host).pop()
            video()
            video_input = os.path.join("export", title + '.flv')
        else:
            video_input = os.path.join("export", title + '.ts')
            video_hls(filen, video_input)

        heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format(
            xmlconfig.height.string)  # This is less likely to fail
        subtitles(title)

        print 'Starting mkv merge'
        mkvmerge = os.path.join("video-engine", "mkvmerge.exe")
        filename_output = os.path.join("export",
                                       title + '[' + heightp.strip() + '].mkv')
        subtitle_input = []
        if os.path.isfile(mkvmerge):
            with_wine = os.name != 'nt'
        else:
            mkvmerge = "mkvmerge"
            with_wine = False
        cmd = [
            mkvmerge, "-o", filename_output, '--language', '0:jpn',
            '--language', '1:jpn', '-a', '1', '-d', '0', video_input,
            '--title', title
        ]
        if with_wine:
            cmd.insert(0, 'wine')
        if not hardcoded:
            sublang = {
                u'Español (Espana)': 'spa_spa',
                u'Français (France)': 'fre',
                u'Português (Brasil)': 'por',
                u'English': 'eng',
                u'Español': 'spa',
                u'Türkçe': 'tur',
                u'Italiano': 'ita',
                u'العربية': 'ara',
                u'Deutsch': 'deu'
            }[lang]
            for i in sub_id2:
                sublangc = sub_id5.pop(0)
                sublangn = sub_id6.pop(0)

                if onlymainsub and sublangc != sublang:
                    continue

                filename_subtitle = os.path.join(
                    "export", title + '[' + sublangc + ']' + sublangn + '.ass')
                if not os.path.isfile(filename_subtitle):
                    continue

                cmd.extend(
                    ['--language', '0:' + sublangc.replace('spa_spa', 'spa')])

                if sublangc == sublang:
                    cmd.extend(['--default-track', '0:yes'])
                else:
                    cmd.extend(['--default-track', '0:no'])
                if forcesub:
                    cmd.extend(['--forced-track', '0:yes'])
                else:
                    cmd.extend(['--forced-track', '0:no'])

                cmd.extend(['--track-name', '0:' + sublangn])
                cmd.extend(['-s', '0'])
                cmd.append(filename_subtitle)
                subtitle_input.append(filename_subtitle)
        subprocess.call(cmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(video_input)
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for f in subtitle_input:
            os.remove(f)
    print 'Cleanup Complete'
예제 #8
0
        page_url = 'http://' + page_url
    try:
        int(page_url[-6:])
    except ValueError:
        if bool(seasonnum) and bool(epnum):
            page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
        elif bool(epnum):
            page_url = altfuncs.vidurl(page_url, 1, epnum)
        else:
            page_url = altfuncs.vidurl(page_url, False, False)

subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

# ----------

lang1, lang2 = altfuncs.config()
player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)

h = HTMLParser.HTMLParser()
title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
if len(os.getcwd()+'\\export\\'+title+'.flv') > 255:
    title = re.findall('^(.+?) \- ', title)[0]

# title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
# replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()

### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

rep = dict((re.escape(k), v) for k, v in rep.iteritems())
예제 #9
0
def mkv_merge(video_input, pixl, defult_lang=None, keep_files=False):
    print('Starting mkv merge')
    #lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config()
    config_ = config()
    if defult_lang is None:
        defult_lang = config_['onlymainsub']
    #print(os.path.abspath(os.path.join(".","video-engine", "mkvmerge.exe")))
    #print(os.path.abspath(os.path.join("..","video-engine", "mkvmerge.exe")))
    if os.path.lexists(
            os.path.abspath(os.path.join(".", "video-engine",
                                         "mkvmerge.exe"))):
        mkvmerge = os.path.abspath(
            os.path.join(".", "video-engine", "mkvmerge.exe"))
    elif os.path.lexists(
            os.path.abspath(os.path.join("..", "video-engine",
                                         "mkvmerge.exe"))):
        mkvmerge = os.path.abspath(
            os.path.join("..", "video-engine", "mkvmerge.exe"))
    #mkvmerge = os.path.abspath(os.path.join("..","video-engine", "mkvmerge.exe"))
    working_dir = os.path.dirname(video_input)
    working_name = os.path.splitext(os.path.basename(video_input))[0]
    filename_output = os.path.join(working_dir,
                                   working_name + '[' + pixl + '].mkv')
    exists_counter = 1
    while os.path.lexists(filename_output):
        filename_output = filename_output[:-4] + '(' + str(
            exists_counter) + ')' + filename_output[-4:]
        exists_counter += 1
    for file in os.listdir(working_dir):
        if file.startswith(working_name) and file.endswith(".ts"):
            cmd = [
                mkvmerge, "-o",
                os.path.abspath(filename_output), '--language', '0:jpn',
                '--language', '1:jpn', '-a', '1', '-d', '0',
                os.path.abspath(os.path.join(working_dir,
                                             file)), '--title', working_name
            ]
    for file in os.listdir(working_dir):
        if file.startswith(working_name) and file.endswith(".mp4"):
            cmd = [
                mkvmerge, "-o",
                os.path.abspath(filename_output), '--language', '0:jpn',
                '--language', '1:jpn', '-a', '1', '-d', '0',
                os.path.abspath(os.path.join(working_dir,
                                             file)), '--title', working_name
            ]
    #cmd = [mkvmerge, "-o", os.path.abspath(filename_output), '--language', '0:jpn', '--language', '1:jpn',
    #       '-a', '1', '-d', '0', os.path.abspath(video_input), '--title', working_name]
    #lang_iso = {'English': 'English (US)', u'Español' : u'Espa\xf1ol', u'Español (Espana)': u'Espa\xf1ol (Espa\xf1a)',
    #            u'Français (France)': u'Fran\xe7ais (France)', u'Português (Brasil)': u'Portugu\xeas (Brasil)',
    #            u'Italiano': 'Italiano', u'Deutsch': 'Deutsch', u'العربية': 'العربية', u'Русский': 'Русский',
    #            u'Türkçe': 'Türkçe'}
    lang_iso = {
        'Espanol_Espana': u'Espa\xf1ol (Espa\xf1a)',
        'Francais': u'Fran\xe7ais (France)',
        'Portugues': u'Portugu\xeas (Brasil)',
        'English': 'English (US)',
        'Espanol': u'Espa\xf1ol',
        'Turkce': 'Türkçe',
        'Italiano': 'Italiano',
        'Arabic': 'العربية',
        'Deutsch': 'Deutsch',
        'Russian': 'Русский'
    }
    defult_lang_sub = ''
    for file in os.listdir(working_dir):
        if file.startswith(working_name) and file.endswith(".ass"):
            #print(re.findall(r'\]\[(.*)\]',file)[0], lang_iso[lang1], lang_iso[lang2], defult_lang_sub)
            if re.findall(r'\]\[(.*)\]',
                          file)[0] == lang_iso[config_['language']]:
                defult_lang_sub = re.findall(r'\]\[(.*)\]', file)[0]
            if defult_lang_sub == '':
                if re.findall(r'\]\[(.*)\]',
                              file)[0] == lang_iso[config_['language2']]:
                    defult_lang_sub = re.findall(r'\]\[(.*)\]', file)[0]
    #print(defult_lang_sub)
    for file in os.listdir(working_dir):
        if file.startswith(working_name) and file.endswith(".m4a"):
            cmd += [
                '--language', '0:jpn', '--default-track', '0:yes',
                '--forced-track', '0:yes',
                os.path.abspath(os.path.join(working_dir, file))
            ]
        if file.startswith(working_name) and file.endswith(".ass"):
            #print(os.path.abspath(os.path.join(working_dir,file)))
            cmd += [
                '--language', '0:' + re.findall(r'\[(.*)\]\[', file)[0],
                '--sub-charset',
                '0:UTF-8', '--default-track', '0:yes' if re.findall(
                    r'\]\[(.*)\]', file)[0] == defult_lang_sub else '0:no',
                '--forced-track', '0:yes', '--track-name',
                '0:' + re.findall(r'\]\[(.*)\]', file)[0], '-s', '0',
                os.path.abspath(os.path.join(working_dir, file))
            ]

            #cmd.extend(['--language', '0:' + sublangc.replace('spa_spa','spa')])

            #if sublangc == sublang:
            #    cmd.extend(['--default-track', '0:yes'])
            #else:
            #    cmd.extend(['--default-track', '0:no'])
            #if forcesub:
            #    cmd.extend(['--forced-track', '0:yes'])
            #else:
            #    cmd.extend(['--forced-track', '0:no'])

            #cmd.extend(['--track-name', '0:' + sublangn])
            #cmd.extend(['-s', '0'])
            #cmd.append(filename_subtitle)
    #print(cmd)
    cmd_exitcode = 2
    #if os.name == 'nt':
    #    cmd_exitcode = subprocess.call(cmd)
    #else:
    #    cmd_exitcode = subprocess.call(['wine']+cmd)
    if os.name != 'nt':
        cmd = ['wine'] + cmd
    cmd_exitcode = subprocess.call(cmd)
    #print(cmd_exitcode)
    #print(cmd)
    if cmd_exitcode != 0:
        print('fixing TS file')
        for file in os.listdir(working_dir):
            if file.startswith(working_name) and file.endswith(".ts"):
                #os.path.abspath(os.path.join(working_dir, file))
                unix_pre = []
                if os.name != 'nt':
                    unix_pre += ['wine']
                subprocess.call(unix_pre + [
                    mkvmerge.replace('mkvmerge', 'ffmpeg'), '-i',
                    os.path.abspath(os.path.join(working_dir, file)), '-map',
                    '0', '-c', 'copy', '-f', 'mpegts',
                    os.path.abspath(os.path.join(working_dir, file)).replace(
                        '.ts', '_fix.ts')
                ])
                if os.name == 'nt':
                    cmd[11] = cmd[11].replace('.ts', '_fix.ts')
                else:
                    cmd[12] = cmd[12].replace('.ts', '_fix.ts')
                #print(cmd)
                cmd_exitcode = subprocess.call(cmd)
                #print(cmd_exitcode)

    #subprocess.Popen(cmd.encode('ascii', 'surrogateescape').decode('utf-8'))
    print('Merge process complete')
    print('Starting Final Cleanup')
    #os.remove(os.path.abspath(video_input))
    if not keep_files:
        for file in os.listdir(working_dir):
            if file.startswith(working_name) and (file.endswith(".ass")
                                                  or file.endswith(".m4a")
                                                  or file.endswith(".mp4")
                                                  or file.endswith(".ts")):
                os.remove(os.path.abspath(os.path.join(working_dir, file)))
예제 #10
0
def settings_():
    slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config(
    )
    slang1 = {
        u'Español (Espana)': 'Espanol_Espana',
        u'Français (France)': 'Francais',
        u'Português (Brasil)': 'Portugues',
        u'English': 'English',
        u'Español': 'Espanol',
        u'Türkçe': 'Turkce',
        u'Italiano': 'Italiano',
        u'العربية': 'Arabic',
        u'Deutsch': 'Deutsch'
    }[slang1]
    slang2 = {
        u'Español (Espana)': 'Espanol_Espana',
        u'Français (France)': 'Francais',
        u'Português (Brasil)': 'Portugues',
        u'English': 'English',
        u'Español': 'Espanol',
        u'Türkçe': 'Turkce',
        u'Italiano': 'Italiano',
        u'العربية': 'Arabic',
        u'Deutsch': 'Deutsch'
    }[slang2]
    if slang1 == 'Espanol_Espana':
        slang1_ = 'Espanol (Espana)'
    else:
        slang1_ = slang1
    if slang2 == 'Espanol_Espana':
        slang2_ = 'Espanol (Espana)'
    else:
        slang2_ = slang2
    seleccion = 0
    print '''Options:
0.- Exit
1.- Video Quality = ''' + vquality + '''
2.- Primary Language = ''' + slang1_ + '''
3.- Secondary Language = ''' + slang2_ + '''
4.- Force Subtitle = ''' + str(
        sforcesub) + '''     #Use --forced-track in Subtitle
5.- USA Proxy = ''' + str(sforceusa) + '''          #use a US session ID
6.- Localize cookies = ''' + str(
            slocalizecookies) + '''        #Localize the cookies (Experiment)
7.- Only One Subtitle = ''' + str(
                vonlymainsub) + '''       #Only download Primary Language
8.- Restore Default Settings
'''
    try:
        seleccion = int(input("> "))
    except:
        print "ERROR: Invalid option."
        settings_()
    if seleccion == 1:
        vquality = videoquality_()
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa,
                        slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 2:
        slang1 = Languages_('slang1')
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa,
                        slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 3:
        slang2 = Languages_('slang2')
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa,
                        slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 4:
        if sforcesub:
            sforcesub = False
        else:
            sforcesub = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa,
                        slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 5:
        if sforceusa:
            sforceusa = False
        else:
            sforceusa = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa,
                        slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 6:
        if slocalizecookies:
            slocalizecookies = False
        else:
            slocalizecookies = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa,
                        slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 7:
        if vonlymainsub:
            vonlymainsub = False
        else:
            vonlymainsub = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa,
                        slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 8:
        defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa,
                        ilocalizecookies, ionlymainsub)
        settings_()
    elif seleccion == 0:
        pass
    else:
        print "ERROR: Invalid option."
        settings_()
예제 #11
0
def getuserstatus(sess_id_renew=False, sess_id_usa=''):
    status = 'Guest'
    user1 = 'Guest'
    session = requests.session()
    cookies_ = ConfigParser()
    cookies_.read('cookies')

    if sess_id_usa == '':
        device_id = cookies_.get('COOKIES', 'device_id')
        device_id_usa = cookies_.get('COOKIES', 'device_id_usa')
        sess_id_usa = cookies_.get('COOKIES', 'sess_id_usa')
        sess_id_ = cookies_.get('COOKIES', 'sess_id')
        auth = cookies_.get('COOKIES', 'auth')
    else:
        sess_id_ = sess_id_usa
    if sess_id_renew:
        device_id = ''.join(
            random.sample(string.ascii_letters + string.digits, 32))
        device_id_usa = ''.join(
            random.sample(string.ascii_letters + string.digits, 32))
        session.get(
            'http://api.crunchyroll.com/end_session.0.json?session_id=' +
            sess_id_usa).json()
        session.get(
            'http://api.crunchyroll.com/end_session.0.json?session_id=' +
            sess_id_).json()
    checkusaid = session.get(
        'http://api.crunchyroll.com/start_session.0.json?session_id=' +
        sess_id_usa).json()
    checkusaid2 = session.get(
        'http://api.crunchyroll.com/start_session.0.json?session_id=' +
        sess_id_).json()
    if checkusaid['code'] == 'ok' and checkusaid2['code'] == 'ok':
        if checkusaid['data']['user'] is not None:
            user1 = checkusaid['data']['user']['username']
            if checkusaid['data']['user']['premium'] == '':
                status = 'Free Member'
            else:  # later will add Premium+ status
                status = 'Premium'
    else:
        payload_usa = {
            'device_id': device_id_usa,
            'api_ver': '1.0',
            'device_type': 'com.crunchyroll.crunchyroid',
            'access_token': 'WveH9VkPLrXvuNm',
            'version': '2313.8',
            'locale': 'jaJP',
            'duration': '9999999999',
            'auth': auth
        }
        payload = {
            'device_id': device_id,
            'api_ver': '1.0',
            'device_type': 'com.crunchyroll.crunchyroid',
            'access_token': 'WveH9VkPLrXvuNm',
            'version': '2313.8',
            'locale': 'jaJP',
            'duration': '9999999999',
            'auth': auth
        }
        if config()['proxy'] != '':
            proxy_ = get_proxy(['HTTPS'], [config()['proxy']])
            try:
                proxies = {'http': proxy_[0]}
            except:
                proxies = {}
        else:
            proxies = {}
        sess_id_usa = create_sess_id_usa(payload_usa)
        try:
            checkusaid2 = session.post(
                'http://api.crunchyroll.com/start_session.0.json',
                proxies=proxies,
                params=payload).json()
            # print(session.post('http://api.crunchyroll.com/start_session.0.json', proxies=proxies, params=payload).url)
        except requests.exceptions.ProxyError:
            checkusaid2 = session.post(
                'http://api.crunchyroll.com/start_session.0.json',
                params=payload).json()
        sess_id_ = checkusaid2['data']['session_id']
        cookies_out = '''[COOKIES]
device_id = ''' + device_id + '''
device_id_usa = ''' + device_id_usa + '''
sess_id = ''' + sess_id_ + '''
sess_id_usa = ''' + sess_id_usa + '''
auth = ''' + auth + '''
'''
        open("cookies", "w").write(cookies_out)
        if not checkusaid2['data']['user'] is None:
            user1 = checkusaid2['data']['user']['username']
            if checkusaid2['data']['user']['premium'] == '':
                status = 'Free Member'
            else:
                if checkusaid2['data']['user'][
                        'access_type'] == 'premium_plus':
                    status = 'Premium Plus'
                else:
                    status = 'Premium'
    return [status, user1]
예제 #12
0
        page_url = 'http://' + page_url
    try:
        int(page_url[-6:])
    except ValueError:
        if bool(seasonnum) and bool(epnum):
            page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
        elif bool(epnum):
            page_url = altfuncs.vidurl(page_url, 1, epnum)
        else:
            page_url = altfuncs.vidurl(page_url, False, False)

subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

# ----------

lang1, lang2, forcesub, forceusa = altfuncs.config()
player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)

h = HTMLParser.HTMLParser()
title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
if len(os.getcwd()+'\\export\\'+title+'.flv') > 255:
    title = re.findall('^(.+?) \- ', title)[0]

# title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
# replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()

### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

rep = dict((re.escape(k), v) for k, v in rep.iteritems())
예제 #13
0
def getuserstatus(sess_id_renew = False, sess_id = ''):
    """
    status = 'Guest'
    user1 = 'Guest'
    sess_id_usa = ''
    
    device_id = ''
    device_id_usa = ''
    auth = ''
    auth2 = ''
    session = cloudscraper.create_scraper(interpreter='nodejs')
    #https://www.crunchyroll.com/acct/membership
    cookies_ = ConfigParser()
    cookies_.read('cookies')
    if sess_id =='':
      device_id = cookies_.get('COOKIES', 'device_id')
      device_id_usa = cookies_.get('COOKIES', 'device_id_usa')
      sess_id_usa = cookies_.get('COOKIES', 'sess_id_usa')
      sess_id = cookies_.get('COOKIES', 'sess_id')
      auth = cookies_.get('COOKIES', 'auth')
      if 'auth2' in cookies_.options('COOKIES'):
        auth2 = cookies_.get('COOKIES', 'auth2')
    checkid = session.get('https://www.crunchyroll.com/acct/membership')
    username_check = re.findall('''<li class="username">.*?\n.*?<a href="/user/.+?" token="topbar">.*?\n +(.+?)\n.*?</a>''',checkid.text)
    print(username_check)
    if username_check:
      user1 = username_check[0]
    status_check = re.findall('''<table class="acct-membership-status">.*?\n.*?<tr>.*?\n.*?\
<th>Status:</th>.*?\n.*?<td>(.+?)</td>.*?\n.*?</tr>.*?\n.*?</table>''',checkid.text)
    if status_check:
      status = status_check[0]
    print(status, user1)
    return [status, user1]
      
"""
    status = 'Guest'
    user1 = 'Guest'
    sess_id_usa = ''
    
    device_id = ''
    device_id_usa = ''
    auth = ''
    auth2 = ''

    #session = requests.session()
    cookies_ = ConfigParser()
    cookies_.read('cookies')
    if sess_id =='':
        device_id = cookies_.get('COOKIES', 'device_id')
        device_id_usa = cookies_.get('COOKIES', 'device_id_usa')
        sess_id_usa = cookies_.get('COOKIES', 'sess_id_usa')
        sess_id = cookies_.get('COOKIES', 'sess_id')
        auth = cookies_.get('COOKIES', 'auth')
        if 'auth2' in cookies_.options('COOKIES'):
          auth2 = cookies_.get('COOKIES', 'auth2')
    resp = requests.get('http://api.crunchyroll.com/start_session.0.json', params={'session_id' : sess_id})
    checkusaid = resp.json()
    #print('test1',resp)
    #print(resp.url)
    #print(checkusaid)
    if config()['forceusa']:
      resp = requests.get('http://api.crunchyroll.com/start_session.0.json', params={'session_id' : sess_id_usa})
      checkusaid_us = resp.json()
      #print('test2',resp)
      #print(resp.url)
      #print(checkusaid_us)
      if checkusaid_us['code'] != 'ok':
        sess_id_renew = True
    else:
      sess_id_usa = ''
    #print(checkusaid)
    if checkusaid['code'] != 'ok':
      sess_id_renew = True
      #if checkusaid['country_code'] != 'US' and config()['forceusa'] and sess_id_usa != '':
      #  checkusaid_us = session.get('http://api.crunchyroll.com/start_session.0.json?session_id='+sess_id_usa).json()
      #  print(checkusaid_us)
      #  if checkusaid_us['code'] != 'ok':
      #    sess_id_renew = True
    
    if sess_id_renew:
      requests.get('http://api.crunchyroll.com/end_session.0.json?session_id='+sess_id_usa)
      requests.get('http://api.crunchyroll.com/end_session.0.json?session_id='+sess_id)

      if not auth2:
        return ['Guest', 'Guest']

      re_username, re_password = extrct_auth2(auth2)
      #print(re_username, re_password)
      re_login_status = login(re_username, re_password)
      return re_login_status
      """

      generate_sess_id = create_sess_id()
      sess_id = generate_sess_id['sess_id']
      device_id = generate_sess_id['device_id']
      #proxies = generate_sess_id['proxies']
      #generate_sess_id['country_code'] = 'JO'
      if generate_sess_id['country_code'] != 'US' and  config()['forceusa']:
        #print('us seesss')
        generate_sess_id_usa = create_sess_id(True)
        sess_id_usa = generate_sess_id_usa['sess_id']
        device_id_usa = generate_sess_id_usa['device_id']

      checkusaid = requests.get('http://api.crunchyroll.com/start_session.0.json?session_id='+sess_id).json()
      print('test3')
      print(checkusaid)
      cookies_out = '''\
[COOKIES]
device_id = {}
device_id_usa = {}
sess_id = {}
sess_id_usa = {}
auth = {}
'''.format(device_id, device_id_usa, sess_id, sess_id_usa, auth)
        # open("cookies", "w").write('[COOKIES]\nsess_id = '+sess_id_+'\nsess_id_usa = '+sess_id_usa+'\nauth = '+auth)
      open("cookies", "w").write(cookies_out)

    """
    if not checkusaid['data']['user'] is None:
      user1 = checkusaid['data']['user']['username']
      if checkusaid['data']['user']['premium'] == '':
        status = 'Free Member'
      else:
        if checkusaid['data']['user']['access_type'] == 'premium_plus':
          status = 'Premium Plus'
        else:
          status = 'Premium'
    
    #print(1,[status, user1])
    
    return [status, user1]
예제 #14
0
def login(username, password):
    session = requests.session()
    device_id = ''
    device_id_usa = ''
    sess_id = ''
    sess_id_usa = ''
    auth = ''
    auth2 = ''
    #session = requests.session()
    #device_id = ''.join(random.sample(string.ascii_letters + string.digits, 32))
    #device_id_usa = ''.join(random.sample(string.ascii_letters + string.digits, 32))
    #payload_usa = {'device_id': device_id_usa, 'api_ver': '1.0',
    #           'device_type': 'com.crunchyroll.crunchyroid', 'access_token': 'WveH9VkPLrXvuNm', 'version': '2313.8',
    #           'locale': 'jaJP', 'duration': '9999999999'}
    #payload = {'device_id': device_id, 'api_ver': '1.0',
    #           'device_type': 'com.crunchyroll.crunchyroid', 'access_token': 'WveH9VkPLrXvuNm', 'version': '2313.8',
    #           'locale': 'jaJP', 'duration': '9999999999'}
    #if config()['proxy'] != '':
    #    proxy_ = get_proxy(['HTTPS'], [config()['proxy']])
    #    try:
    #        proxies = {'http': proxy_[0]}
    #    except:
    #        proxies = {}
    #else:
    #    proxies = {}


    
    generate_sess_id = create_sess_id()
    #print(generate_sess_id)
    sess_id = generate_sess_id['sess_id']
    device_id = generate_sess_id['device_id']
    proxies = generate_sess_id['proxies']
    #generate_sess_id['country_code'] = 'JO'
    ##print(generate_sess_id['country_code'], 'forceusa:'+str(config()['forceusa']), generate_sess_id['country_code'] != 'US' and  config()['forceusa'])
    if generate_sess_id['country_code'] != 'US' and  config()['forceusa']:
      ##print('us seesss')
      generate_sess_id_usa = create_sess_id(True)
      sess_id_usa = generate_sess_id_usa['sess_id']
      device_id_usa = generate_sess_id_usa['device_id']

    ##print(sess_id_usa)
    #try:
    #    sess_id_ = session.post('http://api.crunchyroll.com/start_session.0.json', proxies=proxies, params=payload).json()['data']['session_id']
    #except requests.exceptions.ProxyError:
    #    sess_id_ = session.post('http://api.crunchyroll.com/start_session.0.json', params=payload).json()['data']['session_id']
    #auth = ''
    if username != '' and password != '':
        #print('test1')
        auth2 = generate_auth2(username, password)
        payload = {'session_id' : sess_id,'locale': 'jaJP','duration': '9999999999','account' : username, 'password' : password}
        try:
            respond = session.post('https://api.crunchyroll.com/login.0.json', params=payload)
            #print(respond,respond.text)
            #print(respond.url)
            auth = respond.json()['data']['auth']
            
        except:
            pass
        if sess_id_usa:
            payload = {'session_id' : sess_id_usa,'locale': 'jaJP','duration': '9999999999','account' : username, 'password' : password}
            try:
                respond = session.post('https://api.crunchyroll.com/login.0.json', params=payload)
                #print(respond,respond.text)
                #print(respond.url)
            except:
                pass
    #input()
    #payload = {'device_id': device_id, 'api_ver': '1.0',
    #           'device_type': 'com.crunchyroll.crunchyroid', 'access_token': 'WveH9VkPLrXvuNm', 'version': '2313.8',
    #           'locale': 'jaJP', 'duration': '9999999999', 'auth': auth}
    #try:
    #    respond = session.post('http://api.crunchyroll.com/start_session.0.json', proxies=proxies, params=payload)
    #    print(respond, respond.text)
    #    print(respond.url)
    #    input()
    #    sess_id = respond.json()['data']['session_id']
    #except requests.exceptions.ProxyError:
    #    sess_id = session.post('http://api.crunchyroll.com/start_session.0.json', params=payload).json()['data']['session_id']
    
    cookies_out = '''\
[COOKIES]
device_id = {}
device_id_usa = {}
sess_id = {}
sess_id_usa = {}
auth = {}
auth2 = {}
'''.format(device_id, device_id_usa, sess_id, sess_id_usa, auth, auth2)
        # open("cookies", "w").write('[COOKIES]\nsess_id = '+sess_id_+'\nsess_id_usa = '+sess_id_usa+'\nauth = '+auth)
    open("cookies", "w").write(cookies_out)
    userstatus = getuserstatus(False)
    #userstatus = ['1','1']
    #print(userstatus)
    if username != '' and userstatus[0] == 'Guest':
        print('Login failed.' if 'idlelib.run' in sys.modules else '\x1b[31m' + 'Login failed.' + '\x1b[0m')
    # sys.exit()
    else:
        print('Login as ' + userstatus[1] + ' successfully.' if 'idlelib.run' in sys.modules else 'Login as ' + '\x1b[32m' + userstatus[1] + '\x1b[0m' + ' successfully.')
    
    return userstatus
예제 #15
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2
    #global player_revision

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')
	
    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith('https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.getcwd()+'./export/'+title+'.flv') > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()
    
    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    vid_id = xmlconfig.find('media_id').string

    # ----------

    try:
        host = xmlconfig.find('host').string
    except AttributeError:
        print 'Downloading 2 minute preview.'
        media_id = xmlconfig.find('media_id').string
        xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
        host = xmlconfig.find('host').string

    if re.search('fplive\.net', host):
        url1 = re.findall('.+/c[0-9]+', host).pop()
        url2 = re.findall('c[0-9]+\?.+', host).pop()
    else:
        url1 = re.findall('.+/ondemand/', host).pop()
        url2 = re.findall('ondemand/.+', host).pop()
    filen = xmlconfig.find('file').string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        video()
        #heightp = subprocess.Popen('"video-engine\MediaInfo.exe" --inform=Video;%Height% "./export/' + title + '.flv"' ,shell=True , stdout=subprocess.PIPE).stdout.read()
        heightp = {'71' : 'android', '60' : '360p', '61' : '480p',
                 '62' : '720p', '80' : '1080p', '0' : 'highest'}[xmlconfig.find('video_encode_quality').string]
        subtitles(title)
        subtitlefilecode=''
        #shutil.move(title + '.flv', os.path.join(os.getcwd(), 'export', ''))


        print 'Starting mkv merge'
        if hardcoded:
            subprocess.call('mkvmerge -o "./export/' + title + '[' + heightp.strip() +'p].mkv" --language 1:jpn -a 1 -d 0 ' +
                            '"./export/' + title + '.flv"' +'"')
        else:
            sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
                       u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
                       u'\u0627\u0644\u0639\u0631\u0628\u064a\u0629': 'ara', u'Deutsch': 'deu'}[lang]
    #		defaulttrack = False
            #print lang.encode('utf-8')
            #print sub_id5
            #print sub_id6
            for i in sub_id2:
	    		defaultsub=''
    			sublangc=sub_id5.pop(0)
    			sublangn=sub_id6.pop(0)
    #			print forcesub
	    		if not forcesub:
    				if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:no'
	    			else:
	    				defaultsub=' --default-track 0:no --forced-track 0:no'
	    		else:
	    			if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:yes'
	    			else:
		    			defaultsub=' --default-track 0:no --forced-track 0:no'
	    		if not onlymainsub:
    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 "./export/'+title+'['+sublangc+']'+sublangn+'.ass"'
	    		else:
    				if sublangc == sublang:
	    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 "./export/'+title+'['+sublangc+']'+sublangn+'.ass"'
    #        subprocess.call('"mkvmerge" -o ".\export\' + title + '.mkv" --language 1:jpn -a 1 -d 0 ' +
    #                        '".\export\\' + title + '.flv" --language 0:' + sublang + ' -s 0 ".\export\\'+title+'.ass"')
    #        print '"mkvmerge" -o ".\export\\' + title + '.mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
            mkvcmd='mkvmerge -o "./export/' + title + '[' + heightp.strip() +'].mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '"./export/' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
            #print mkvcmd
            #print subtitlefilecode
            os.system(mkvcmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.flv')
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for root, dirs, files in os.walk('export'):
            for file in filter(lambda x: re.match(title +'\[.+\]'+ '.ass', x), files):
                os.remove(os.path.join(root, file))
    print 'Cleanup Complete'
def getuserstatus(sess_id_renew = False, sess_id = ''):
    
    status = 'Guest'
    user1 = 'Guest'
    sess_id_usa = ''
    
    device_id = ''
    device_id_usa = ''
    auth = ''

    session = requests.session()
    cookies_ = ConfigParser()
    cookies_.read('cookies')
    if sess_id =='':
        device_id = cookies_.get('COOKIES', 'device_id')
        device_id_usa = cookies_.get('COOKIES', 'device_id_usa')
        sess_id_usa = cookies_.get('COOKIES', 'sess_id_usa')
        sess_id = cookies_.get('COOKIES', 'sess_id')
        auth = cookies_.get('COOKIES', 'auth')
    checkusaid = session.get('http://api.crunchyroll.com/start_session.0.json?session_id='+sess_id).json()
    print('test1')
    print(checkusaid)
    if config()['forceusa']:
      checkusaid_us = session.get('http://api.crunchyroll.com/start_session.0.json?session_id='+sess_id_usa).json()
      print('test2')
      print(checkusaid_us)
      if checkusaid_us['code'] != 'ok':
        sess_id_renew = True
    else:
      sess_id_usa = ''
    #print(checkusaid)
    if checkusaid['code'] != 'ok':
      sess_id_renew = True
      #if checkusaid['country_code'] != 'US' and config()['forceusa'] and sess_id_usa != '':
      #  checkusaid_us = session.get('http://api.crunchyroll.com/start_session.0.json?session_id='+sess_id_usa).json()
      #  print(checkusaid_us)
      #  if checkusaid_us['code'] != 'ok':
      #    sess_id_renew = True
    
    if sess_id_renew:
      session.get('http://api.crunchyroll.com/end_session.0.json?session_id='+sess_id_usa)
      session.get('http://api.crunchyroll.com/end_session.0.json?session_id='+sess_id)

      generate_sess_id = create_sess_id(auth=auth)
      sess_id = generate_sess_id['sess_id']
      device_id = generate_sess_id['device_id']
      #proxies = generate_sess_id['proxies']
      #generate_sess_id['country_code'] = 'JO'
      if generate_sess_id['country_code'] != 'US' and  config()['forceusa']:
        #print('us seesss')
        generate_sess_id_usa = create_sess_id(True,auth=auth)
        sess_id_usa = generate_sess_id_usa['sess_id']
        device_id_usa = generate_sess_id_usa['device_id']

      checkusaid = session.get('http://api.crunchyroll.com/start_session.0.json?session_id='+sess_id).json()
      print('test3')
      print(checkusaid)
      cookies_out = '''\
[COOKIES]
device_id = {}
device_id_usa = {}
sess_id = {}
sess_id_usa = {}
auth = {}
'''.format(device_id, device_id_usa, sess_id, sess_id_usa, auth)
        # open("cookies", "w").write('[COOKIES]\nsess_id = '+sess_id_+'\nsess_id_usa = '+sess_id_usa+'\nauth = '+auth)
      open("cookies", "w").write(cookies_out)

    
    if not checkusaid['data']['user'] is None:
      user1 = checkusaid['data']['user']['username']
      if checkusaid['data']['user']['premium'] == '':
        status = 'Free Member'
      else:
        if checkusaid['data']['user']['access_type'] == 'premium_plus':
          status = 'Premium Plus'
        else:
          status = 'Premium'
    
    #print(1,[status, user1])
    return [status, user1]
예제 #17
0
def create_sess_id(usa_=False, auth=''):
    session = requests.session()
    proxies = {}
    device_id = ''.join(random.sample(string.ascii_letters + string.digits,
                                      32))
    headers = {'Referer': 'http://crunchyroll.com/'}
    payload = {
        'device_id': device_id,
        'api_ver': '1.0',
        'device_type': 'com.crunchyroll.crunchyroid',
        'access_token': 'WveH9VkPLrXvuNm',
        'version': '2313.8',
        'locale': 'jaJP',
        'duration': '9999999999'
    }
    if auth != '':
        payload.update({'auth': auth})

    if usa_:
        sess_id_data = {'session_id': ''}
        ### First Method
        p_usa_session_post = requests.Request(
            'GET',
            'https://api.crunchyroll.com/start_session.0.json',
            params=payload).prepare()
        #encoded_usa_session_post_url = urllib.parse.quote(p_usa_session_post.url, safe='')
        google_p_params = {'container': 'focus', 'url': p_usa_session_post.url}
        retries = 5
        retries_o = retries + 1
        while retries >= 0:
            print('using g_proxy retry #{}'.format(retries_o - retries))
            usa_session_post = session.post(
                'https://images-focus-opensocial.googleusercontent.com/gadgets/proxy',
                params=google_p_params,
                headers=headers)
            #print(usa_session_post.json())
            if usa_session_post.status_code == 200:
                break
            else:
                retries -= 1
                time.sleep(30)  #30 sec sleep to not over heat server
        try:
            if usa_session_post.json()['error'] != "true":
                sess_id_data = usa_session_post.json()['data']
        except:
            try:
                print(h.handle(usa_session_post.text))
            except:
                print(usa_session_post.content)
                #exit()

        if sess_id_data['session_id'] == '':  ### Second Method
            #print("Second Method")
            for prxy_ in get_proxy(['HTTPS'], ['US']):
                proxies = {'https': prxy_}
                try:
                    usa_session_post = session.post(
                        'https://api.crunchyroll.com/start_session.0.json',
                        proxies=proxies,
                        params=payload).json()
                    sess_id_data = usa_session_post['data']
                except:
                    pass

        if sess_id_data['session_id'] == '':  ### Third Method
            #print("Third Method")
            try:
                usa_session_post = session.get(
                    'http://rssfeedfilter.netne.net/').json()
                sess_id_data['session_id'] = usa_session_post['sessionId']
            except:
                print('\x1b[31m' + 'Could Not Create USA Session' + '\x1b[0m')
                print(
                    '\x1b[31m' +
                    'You Will Not be Able to Download USA Locked Anime at Moment'
                    + '\x1b[0m')
                print('\x1b[31m' + 'Try Again Later' + '\x1b[0m')

    else:
        sess_id_data = {'session_id': ''}
        if config()['proxy'] != '':
            proxy_ = get_proxy(['HTTPS'], [config()['proxy']])
            try:
                proxies = {'http': proxy_[0]}
            except:
                proxies = {}
        try:
            sess_id_data = session.post(
                'http://api.crunchyroll.com/start_session.0.json',
                proxies=proxies,
                params=payload).json()['data']
        except requests.exceptions.ProxyError:
            sess_id_data = session.post(
                'http://api.crunchyroll.com/start_session.0.json',
                params=payload).json()['data']
        #print(sess_id_data)
    returned_data = {
        'sess_id': sess_id_data['session_id'],
        'device_id': device_id,
        'proxies': proxies,
        'country_code': sess_id_data['country_code']
    }

    return returned_data
예제 #18
0
def login(username, password):
    session = requests.session()
    device_id = ''.join(random.sample(string.ascii_letters + string.digits,
                                      32))
    device_id_usa = ''.join(
        random.sample(string.ascii_letters + string.digits, 32))
    payload_usa = {
        'device_id': device_id_usa,
        'api_ver': '1.0',
        'device_type': 'com.crunchyroll.crunchyroid',
        'access_token': 'WveH9VkPLrXvuNm',
        'version': '2313.8',
        'locale': 'jaJP',
        'duration': '9999999999'
    }
    payload = {
        'device_id': device_id,
        'api_ver': '1.0',
        'device_type': 'com.crunchyroll.crunchyroid',
        'access_token': 'WveH9VkPLrXvuNm',
        'version': '2313.8',
        'locale': 'jaJP',
        'duration': '9999999999'
    }
    if config()['proxy'] != '':
        proxy_ = get_proxy(['HTTPS'], [config()['proxy']])
        try:
            proxies = {'http': proxy_[0]}
        except:
            proxies = {}
    else:
        proxies = {}

    sess_id_usa = create_sess_id_usa(payload_usa)
    #print(sess_id_usa)
    try:
        sess_id_ = session.post(
            'http://api.crunchyroll.com/start_session.0.json',
            proxies=proxies,
            params=payload).json()['data']['session_id']
    except requests.exceptions.ProxyError:
        sess_id_ = session.post(
            'http://api.crunchyroll.com/start_session.0.json',
            params=payload).json()['data']['session_id']
    auth = ''
    if username != '' and password != '':
        payload = {
            'session_id': sess_id_usa,
            'locale': 'jaJP',
            'duration': '9999999999',
            'account': username,
            'password': password
        }
        try:
            auth = session.post('https://api.crunchyroll.com/login.0.json',
                                params=payload).json()['data']['auth']
        except:
            pass
    userstatus = getuserstatus(False, sess_id_usa)
    if username != '' and userstatus[0] == 'Guest':
        print('Login failed.' if 'idlelib.run' in sys.modules else '\x1b[31m' +
              'Login failed.' + '\x1b[0m')
    # sys.exit()
    else:
        print('Login as ' + userstatus[1] +
              ' successfully.' if 'idlelib.run' in
              sys.modules else 'Login as ' + '\x1b[32m' + userstatus[1] +
              '\x1b[0m' + ' successfully.')
    payload = {
        'device_id': device_id,
        'api_ver': '1.0',
        'device_type': 'com.crunchyroll.crunchyroid',
        'access_token': 'WveH9VkPLrXvuNm',
        'version': '2313.8',
        'locale': 'jaJP',
        'duration': '9999999999',
        'auth': auth
    }
    try:
        sess_id_ = session.post(
            'http://api.crunchyroll.com/start_session.0.json',
            proxies=proxies,
            params=payload).json()['data']['session_id']
    except requests.exceptions.ProxyError:
        sess_id_ = session.post(
            'http://api.crunchyroll.com/start_session.0.json',
            params=payload).json()['data']['session_id']
    cookies_out = '''[COOKIES]
device_id = ''' + device_id + '''
device_id_usa = ''' + device_id_usa + '''
sess_id = ''' + sess_id_ + '''
sess_id_usa = ''' + sess_id_usa + '''
auth = ''' + auth + '''
'''
    # open("cookies", "w").write('[COOKIES]\nsess_id = '+sess_id_+'\nsess_id_usa = '+sess_id_usa+'\nauth = '+auth)
    open("cookies", "w").write(cookies_out)
    return userstatus
예제 #19
0
def settings_():
    while True:
        #slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub, vconnection_n_,vproxy_ = config()
        config_ = config()
        slang1 = config_['language']
        slang2 = config_['language2']
        sforcesub = config_['forcesubtitle']
        sforceusa = config_['forceusa']
        slocalizecookies = config_['localizecookies']
        vquality = config_['video_quality']
        vonlymainsub = config_['onlymainsub']
        vconnection_n_ = config_['connection_n_']
        vproxy_ = config_['proxy']
        vdubfilter = config_['dubfilter']
        slang1 = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais', u'Português (Brasil)' : 'Portugues',
            u'English' : 'English', u'Español' : 'Espanol', u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano',
            u'العربية' : 'Arabic', u'Deutsch' : 'Deutsch', u'Русский' : 'Russian'}[slang1]
        slang2 = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais', u'Português (Brasil)' : 'Portugues',
            u'English' : 'English', u'Español' : 'Espanol', u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano',
            u'العربية' : 'Arabic', u'Deutsch' : 'Deutsch', u'Русский' : 'Russian'}[slang2]
        if slang1 == 'Espanol_Espana':
            slang1_ = 'Espanol (Espana)'
        else:
            slang1_ = slang1
        if slang2 == 'Espanol_Espana':
            slang2_ = 'Espanol (Espana)'
        else:
            slang2_ = slang2
        seleccion = 0
        try:
            print(idle_cmd_txt_fix('''Options:
0.- Exit
1.- Video Quality = \x1b[32m'''+vquality+'''\x1b[0m
2.- Primary Language = \x1b[32m'''+slang1_+'''\x1b[0m
3.- Secondary Language = \x1b[32m'''+slang2_+'''\x1b[0m
4.- Hard Subtitle = '''+('\x1b[32m'+str(sforcesub)+'\x1b[0m' if sforcesub else '\x1b[31m'+str(sforcesub)+'\x1b[0m')+'''		#The Video will have 1 hard subtitle
5.- USA Proxy = '''+('\x1b[32m'+str(sforceusa)+'\x1b[0m' if sforceusa else '\x1b[31m'+str(sforceusa)+'\x1b[0m')+'''			#use a US session ID
6.- Localize cookies = '''+('\x1b[32m'+str(slocalizecookies)+'\x1b[0m' if slocalizecookies else '\x1b[31m'+str(slocalizecookies)+'\x1b[0m')+'''		#Localize the cookies (Experiment)
7.- Only One Subtitle = '''+('\x1b[32m'+str(vonlymainsub)+'\x1b[0m' if vonlymainsub else '\x1b[31m'+str(vonlymainsub)+'\x1b[0m')+'''		#Only download Primary Language
8.- Dub Filter = '''+('\x1b[32m'+str(vdubfilter)+'\x1b[0m' if vdubfilter else '\x1b[31m'+str(vdubfilter)+'\x1b[0m')+'''		#Ignor dub links when autocatch
9.- Change the Number of The Download Connection = \x1b[32m'''+str(vconnection_n_)+'''\x1b[0m
10.- use proxy(it disable if left blank)  = \x1b[32m'''+vproxy_+''' \x1b[0m  #ex:US
11.- Restore Default Settings
> '''))
            seleccion = int(input('> '))
        except:
            print(idle_cmd_txt_fix("\x1b[31m"+"ERROR: Invalid option."+"\x1b[0m"))
            continue
        if seleccion == 1 :
            vquality = videoquality_()
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 2 :
            slang1 = Languages_('slang1')
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 3 :
            slang2 = Languages_('slang2')
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 4 :
            if sforcesub:
                sforcesub = False
            else:
                sforcesub = True
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 5 :
            if sforceusa:
                sforceusa = False
            else:
                sforceusa = True
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 6 :
            if slocalizecookies:
                slocalizecookies = False
            else:
                slocalizecookies = True
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 7 :
            if vonlymainsub:
                vonlymainsub = False
            else:
                vonlymainsub = True
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 8 :
            if vdubfilter:
                vdubfilter = False
            else:
                vdubfilter = True
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 9 :
            vconnection_n_ = input(u'Please Input The Download Connection Nymber: ')
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            continue
        elif seleccion == 10 :
            vproxy_ = input(u'Please Input The Proxy: ')
            defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_, vproxy_, vdubfilter)
            getuserstatus(True)
            continue
        elif seleccion == 11 :
            defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub, iconnection_n_, iproxy_, idubfilter)
            continue
        elif seleccion == 0 :
            break
        else:
            print(idle_cmd_txt_fix("\x1b[31m"+"ERROR: Invalid option."+"\x1b[0m"))
            continue
def settings_():
    slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config()
    slang1 = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais', u'Português (Brasil)' : 'Portugues',
            u'English' : 'English', u'Español' : 'Espanol', u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano',
            u'العربية' : 'Arabic', u'Deutsch' : 'Deutsch'}[slang1]
    slang2 = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais', u'Português (Brasil)' : 'Portugues',
            u'English' : 'English', u'Español' : 'Espanol', u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano',
            u'العربية' : 'Arabic', u'Deutsch' : 'Deutsch'}[slang2]
    if slang1 == 'Espanol_Espana':
        slang1_ = 'Espanol (Espana)'
    else:
        slang1_ = slang1
    if slang2 == 'Espanol_Espana':
        slang2_ = 'Espanol (Espana)'
    else:
        slang2_ = slang2
    seleccion = 0
    print '''Options:
0.- Exit
1.- Video Quality = '''+vquality+'''
2.- Primary Language = '''+slang1_+'''
3.- Secondary Language = '''+slang2_+'''
4.- Force Subtitle = '''+str(sforcesub)+'''		#Use --forced-track in Subtitle
5.- USA Proxy = '''+str(sforceusa)+'''			#use a US session ID
6.- Localize cookies = '''+str(slocalizecookies)+'''		#Localize the cookies (Experiment)
7.- Only One Subtitle = '''+str(vonlymainsub)+'''		#Only download Primary Language
8.- Restore Default Settings
'''
    try:
        seleccion = int(input("> "))
    except:
        print "ERROR: Invalid option."
        settings_()
    if seleccion == 1 :
        vquality = videoquality_()
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 2 :
        slang1 = Languages_('slang1')
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 3 :
        slang2 = Languages_('slang2')
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 4 :
        if sforcesub:
            sforcesub = False
        else:
            sforcesub = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 5 :
        if sforceusa:
            sforceusa = False
        else:
            sforceusa = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 6 :
        if slocalizecookies:
            slocalizecookies = False
        else:
            slocalizecookies = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 7 :
        if vonlymainsub:
            vonlymainsub = False
        else:
            vonlymainsub = True
        defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
        settings_()
    elif seleccion == 8 :
        defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub)
        settings_()
    elif seleccion == 0 :
        pass
    else:
        print "ERROR: Invalid option."
        settings_()
예제 #21
0
def decode(argv_=''):
    print('''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
''')
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config(
    )
    if argv_ == '':
        argv_ = input('Please enter Crunchyroll video URL:\n')
    #print(argv_, re.findall('https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',argv_))
    if re.findall('https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',
                  argv_) == []:
        print(idle_cmd_txt_fix("\x1b[31m" + "ERROR: Invalid URL." + "\x1b[0m"))
        exit()
    #html = gethtml(argv_)
    #print str(argv_)[:15]

    #if html == '':

    #with open('.\html_ex.txt', 'r') as myfile:
    #    html = myfile.read().strip()
    #import urllib
    #html = urllib.urlopen('E:\+Jwico\Manual & Catalog\a\l\z\project\Military! Episode 1 - Watch on Crunchyroll.html').read()
    #   with open("..\..\Military! Episode 1 - Watch on Crunchyroll.html", 'r') as myfile:
    #       html = myfile.read()
    #BeautifulSoup(unicode(html, errors='ignore')).get_text()
    #html = BeautifulSoup(open('.\html_ex.txt', 'r', 'utf-8').read()).get_text()
    #print html
    '''
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    title = title.replace(' - Watch on Crunchyroll', '')

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    warnings.simplefilter("ignore")
    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))
    warnings.simplefilter("default")

    ### End stolen code ###

    if len(os.path.join(os.path.abspath('export'), title + '.ass')) > 255:
        eps_num = re.findall('([0-9].*?)$', title)[0]
        title = title[:246-len(os.path.join(os.path.abspath('export')))-len(eps_num)] + '~ Ep' +eps_num
	
    print os.path.join(os.path.abspath('export'), title +'.ass')
    '''
    media_id = re.findall('https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',
                          argv_)[0]
    #xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')
    xmlconfig = getxml('RpcApiVideoPlayer_GetStandardConfig', media_id)
    #print xmlconfig
    #print xmlconfig['subtitle']
    if xmlconfig['subtitle'] == []:
        print('The video has hardcoded subtitles.')
        hardcoded = True
        sub_id = False
    else:
        #lang_iso = {'English (US)':'eng',u'Espa\xc3\xb1ol':'spa',u'Espa\xc3\xb1ol (Espa\xc3\xb1a)':'spa',u'Fran\xc3\xa7ais (France)':'fre',u'Portugu\xc3\xaas (Brasil)':'por','Italiano':'ita','Deutsch':'deu'}
        #lang_iso = {'English (US)':'eng',u'Espa\xf1ol':'spa',u'Espa\xf1ol (Espa\xf1a)':'spa',u'Fran\xe7ais (France)':'fre',u'Portugu\xeas (Brasil)':'por','Italiano':'ita','Deutsch':'deu'}
        lang_iso = {
            'English (US)': 'eng',
            u'Espa\xf1ol': 'spa',
            u'Espa\xf1ol (Espa\xf1a)': 'spa',
            u'Fran\xe7ais (France)': 'fre',
            u'Portugu\xeas (Brasil)': 'por',
            'Italiano': 'ita',
            'Deutsch': 'deu',
            'العربية': 'ara',
            'Русский': 'rus'
        }

        #    sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3]
        for i in xmlconfig['subtitle']:
            sub_file_ = dircheck([
                os.path.abspath('export') + '\\',
                xmlconfig['media_metadata']['series_title'], ' Episode',
                ' - ' + xmlconfig['media_metadata']['episode_number'],
                ' - ' + xmlconfig['media_metadata']['episode_title'],
                '[' + lang_iso[re.findall('\[(.+)\]', i[1])[0]] + ']',
                '[' + re.findall('\[(.+)\]', i[1])[0] + ']', '.ass'
            ], ['True', 'True', 'False', 'True', 1, 'True', 'False', 'True'],
                                 240)
            #print os.path.join('export', xmlconfig['media_metadata']['series_title'] + ' Episode ' + xmlconfig['media_metadata']['episode_number']+'['+lang_iso[re.findall('\[(.+)\]',i[1])[0]]+']['+re.findall('\[(.+)\]',i[1])[0]+'].ass')
            #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
            print("Attempting to download " + re.findall('\[(.+)\]', i[1])[0] +
                  " subtitle...")
            xmlsub = getxml('RpcApiSubtitle_GetXml', i[0])
            formattedsubs = CrunchyDec().returnsubs(xmlsub)
            if formattedsubs is None:
                continue
            #subfile = open(eptitle + '.ass', 'wb')
            subfile = open(sub_file_, 'wb')
            subfile.write(formattedsubs.encode('utf8'))
            subfile.close()

    pass
예제 #22
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub
    #global player_revision

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith('https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    #subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title+'.flv')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    #subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    # ----------

    host = xmlconfig.host and xmlconfig.host.string
    filen = xmlconfig.file and xmlconfig.file.string

    if not (host or filen):
        print 'Downloading 2 minute preview.'

    media_id = xmlconfig.media_id.string
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
    host = xmlconfig.host.string
    filen = xmlconfig.file.string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        if host:
            if re.search('fplive\.net', host):
                url1 = re.findall('.+/c[0-9]+', host).pop()
                url2 = re.findall('c[0-9]+\?.+', host).pop()
            else:
                url1 = re.findall('.+/ondemand/', host).pop()
                url2 = re.findall('ondemand/.+', host).pop()
            video()
            video_input = os.path.join("export", title + '.flv')
        else:
            video_input = os.path.join("export", title + '.ts')
            video_hls(filen, video_input)

        heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format(xmlconfig.height.string)  # This is less likely to fail
        subtitles(title)

        print 'Starting mkv merge'
        mkvmerge = os.path.join("video-engine", "mkvmerge.exe")
        filename_output = os.path.join("export", title + '[' + heightp.strip() +'].mkv')
        subtitle_input = []
        if os.path.isfile(mkvmerge):
            with_wine = os.name != 'nt'
        else:
            mkvmerge = "mkvmerge"
            with_wine = False
        cmd = [mkvmerge, "-o", filename_output, '--language', '0:jpn', '--language', '1:jpn', '-a', '1', '-d', '0', video_input, '--title', title]
        if with_wine:
            cmd.insert(0, 'wine')
        if not hardcoded:
            sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
                       u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
                       u'العربية': 'ara', u'Deutsch': 'deu'}[lang]
            for i in sub_id2:
                sublangc=sub_id5.pop(0)
                sublangn=sub_id6.pop(0)

                if onlymainsub and sublangc != sublang:
                    continue

                filename_subtitle = os.path.join("export", title+'['+sublangc+']'+sublangn+'.ass')
                if not os.path.isfile(filename_subtitle):
                    continue

                cmd.extend(['--language', '0:' + sublangc.replace('spa_spa','spa')])

                if sublangc == sublang:
                    cmd.extend(['--default-track', '0:yes'])
                else:
                    cmd.extend(['--default-track', '0:no'])
                if forcesub:
                    cmd.extend(['--forced-track', '0:yes'])
                else:
                    cmd.extend(['--forced-track', '0:no'])

                cmd.extend(['--track-name', '0:' + sublangn])
                cmd.extend(['-s', '0'])
                cmd.append(filename_subtitle)
                subtitle_input.append(filename_subtitle)
        subprocess.call(cmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(video_input)
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for f in subtitle_input:
            os.remove(f)
    print 'Cleanup Complete'
def ultimate(page_url='', seasonnum=0, epnum=0, sess_id_=''):
    #global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub
    #global player_revision

    print('''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98b 

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worth it, and supports the animators.

----------
Booting up...
''')
    if page_url == '':
        #page_url = input('Please enter Crunchyroll video URL:\n')
        #page_url = 'https://www.crunchyroll.com/the-rising-of-the-shield-hero/episode-10-in-the-midst-of-turmoil-781157'
        #page_url = 'http://www.crunchyroll.com/military/episode-1-the-mission-begins-668503'
        page_url = 'https://www.crunchyroll.com/mob-psycho-100/episode-11-guidance-psychic-sensor-780930'

    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if re.findall(r'https?:\/\/', page_url) == []:
            page_url = 'http://' + page_url
        '''
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)
        '''

    # ----------

    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config(
    )
    if sess_id_ == '':
        cookies_ = ConfigParser()
        cookies_.read('cookies')
        if forceusa:
            sess_id_ = cookies_.get('COOKIES', 'sess_id_usa')
        else:
            sess_id_ = cookies_.get('COOKIES', 'sess_id')
    media_id = re.findall(r'https?:\/\/www\.crunchyroll\.com\/.+\/.+-(\d*)',
                          page_url)[0]
    #htmlconfig = BeautifulSoup(gethtml(page_url), 'html')
    htmlconfig = json.loads(
        re.findall(r'vilos\.config\.media = ({.*})', gethtml(page_url))[0])
    stream_url = {}
    for i in htmlconfig['streams']:
        stream_url.update({i['hardsub_lang']: i['url']})
    for i in htmlconfig['subtitles']:
        print(i["language"], i["url"])
    for i in stream_url:
        print(i, stream_url[i])
    media_info = getxml('RpcApiVideoPlayer_GetStandardConfig', media_id)
    #print(media_info)
    #print(media_info['file'])
    #print(media_info['media_metadata']['series_title'])
    #print(media_info['media_metadata']['episode_number'])
    #print(media_info['media_metadata']['episode_title'])
    title: str = '%s Episode %s - %s' % (
        media_info['media_metadata']['series_title'],
        media_info['media_metadata']['episode_number'],
        media_info['media_metadata']['episode_title'])
    if len(os.path.join('export', title + '.flv')
           ) > 255 or media_info['media_metadata']['episode_title'] is '':
        title: str = '%s Episode %s' % (
            media_info['media_metadata']['series_title'],
            media_info['media_metadata']['episode_number'])

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings and improved to include the backslash###
    rep = {
        ' / ': ' - ',
        '/': ' - ',
        ':': '-',
        '?': '.',
        '"': "''",
        '|': '-',
        '&quot;': "''",
        'a*G': 'a G',
        '*': '#',
        '\u2026': '...',
        ' \ ': ' - '
    }
    rep = dict((re.escape(k), v) for k, v in rep.items())
    pattern = re.compile("|".join(rep.keys()))
    title_shell = unidecode(
        pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    # ----------
    print(format('Now Downloading - ' + title_shell))
    #video_input = os.path.join("export", title + '.ts')
    video_input = dircheck([
        os.path.abspath('export') + '\\',
        media_info['media_metadata']['series_title'], ' Episode',
        ' - ' + media_info['media_metadata']['episode_number'],
        ' - ' + media_info['media_metadata']['episode_title'], '.ts'
    ], [
        'True',
        'True',
        'False',
        'True',
        1,
        'True',
    ], 240)
예제 #24
0
def decode(page_url):
    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')

    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config(
    )
    #player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    #h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>',
                       html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.path.join('export', title + '.ass')) > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {
        ' / ': ' - ',
        '/': ' - ',
        ':': '-',
        '?': '.',
        '"': "''",
        '|': '-',
        '&quot;': "''",
        'a*G': 'a G',
        '*': '#',
        u'\u2026': '...'
    }

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(
        altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id),
        'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
    xmllist = unidecode(xmllist).replace('><', '>\n<')

    if '<media_id>None</media_id>' in xmllist:
        print 'The video has hardcoded subtitles.'
        hardcoded = True
        sub_id = False
    else:
        try:
            sub_id2 = re.findall("id=([0-9]+)", xmllist)
            sub_id3 = re.findall("title='(\[.+\]) ", xmllist)
            sub_id4 = re.findall("title='(\[.+\]) ", xmllist)
            hardcoded = False
        except IndexError:
            print "The video's subtitles cannot be found, or are region-locked."
            hardcoded = True
            sub_id = False
    sub_id3 = [word.replace('[English (US)]', 'eng') for word in sub_id3]
    sub_id3 = [word.replace('[Deutsch]', 'deu') for word in sub_id3]
    sub_id3 = [word.replace('[Portugues (Brasil)]', 'por') for word in sub_id3]
    sub_id3 = [word.replace('[Francais (France)]', 'fre') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol (Espana)]', 'spa') for word in sub_id3]
    sub_id3 = [word.replace('[Espanol]', 'spa') for word in sub_id3]
    sub_id3 = [word.replace('[Italiano]', 'ita') for word in sub_id3]
    sub_id3 = [word.replace('[l`rby@]', 'ara') for word in sub_id3]
    #sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
    sub_id4 = [word.replace('[l`rby@]', u'[Arabic]')
               for word in sub_id4]  #else:
    #	try:
    #		sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang1)), xmllist)[0]
    #		hardcoded = False
    #		lang = lang1
    #	except IndexError:
    #		try:
    #			sub_id = re.findall("id=([0-9]+)' title='\["+re.escape(unidecode(lang2)), xmllist)[0]
    #			print 'Language not found, reverting to ' + lang2 + '.'
    #			hardcoded = False
    #			lang = lang2
    #		except IndexError:
    #			try:
    #				sub_id = re.findall("id=([0-9]+)' title='\[English", xmllist)[0]  # default back to English
    #				print 'Backup language not found, reverting to English.'
    #				hardcoded = False
    #				lang = 'English'
    #			except IndexError:
    #				print "The video's subtitles cannot be found, or are region-locked."
    #				hardcoded = True
    #				sub_id = False
    if not hardcoded:
        for i in sub_id2:
            #xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', sub_id)
            xmlsub = altfuncs.getxml('RpcApiSubtitle_GetXml', i)
            formattedsubs = CrunchyDec().returnsubs(xmlsub)
            if formattedsubs is None:
                continue
            #subfile = open(eptitle + '.ass', 'wb')
            subfile = open(
                os.path.join(
                    'export', title + '[' + sub_id3.pop(0) + ']' +
                    sub_id4.pop(0) + '.ass'), 'wb')
            subfile.write(formattedsubs.encode('utf-8-sig'))
            subfile.close()
        #shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', ''))

    print 'Subtitles for ' + title + ' have been downloaded'
예제 #25
0
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
'''

# ----------

print 'Booting up...'
lang = altfuncs.config()
# http://www.crunchyroll.com/miss-monochrome-the-animation/episode-2-645085
# http://www.crunchyroll.com/naruto-shippuden/episode-136-the-light-dark-of-the-mangekyo-sharingan-535200
# page_url = 'http://www.crunchyroll.com/media-535200'

try:
    page_url = sys.argv[1]
except IndexError:
    page_url = raw_input('Please enter Crunchyroll video URL:\n')

try:
    seasonnum, epnum = sys.argv[2:4]
except ValueError:
    try:
        epnum = str(int(sys.argv[2]))
        seasonnum = ''
예제 #26
0
def ultimate(page_url, seasonnum, epnum):
    global url1, url2, filen, player_revision, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2

    print '''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worthit, and supports the animators.

----------
Booting up...
'''
    if page_url == '':
        page_url = raw_input('Please enter Crunchyroll video URL:\n')
	
    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not page_url.startswith('http://') and not page_url.startswith('https://'):
            page_url = 'http://' + page_url
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)

    subprocess.call('title ' + page_url.replace('http://www.crunchyroll.com/', ''), shell=True)

    # ----------

    #lang1, lang2 = altfuncs.config()
    #lang1, lang2, forcesub = altfuncs.config()
    lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
    player_revision = altfuncs.playerrev(page_url)
    html = altfuncs.gethtml(page_url)

    h = HTMLParser.HTMLParser()
    title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
    if len(os.getcwd()+'\\export\\'+title+'.flv') > 255:
        title = re.findall('^(.+?) \- ', title)[0]

    # title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
    # replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()
    
    ### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
    rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

    rep = dict((re.escape(k), v) for k, v in rep.iteritems())
    pattern = re.compile("|".join(rep.keys()))
    title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

    ### End stolen code ###

    subprocess.call('title ' + title.replace('&', '^&'), shell=True)

    # ----------

    media_id = page_url[-6:]
    xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

    try:
        if '4' in xmlconfig.find_all('code')[0]:
            print xmlconfig.find_all('msg')[0].text
            sys.exit()
    except IndexError:
        pass

    vid_id = xmlconfig.find('media_id').string

    # ----------

    try:
        host = xmlconfig.find('host').string
    except AttributeError:
        print 'Downloading 2 minute preview.'
        media_id = xmlconfig.find('media_id').string
        xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoEncode_GetStreamInfo', media_id), 'xml')
        host = xmlconfig.find('host').string

    if re.search('fplive\.net', host):
        url1 = re.findall('.+/c[0-9]+', host).pop()
        url2 = re.findall('c[0-9]+\?.+', host).pop()
    else:
        url1 = re.findall('.+/ondemand/', host).pop()
        url2 = re.findall('ondemand/.+', host).pop()
    filen = xmlconfig.find('file').string

    # ----------
    if 'subs' in sys.argv:
        subtitles(title)
        subs_only = True
        hardcoded = True  # bleh
    else:
        page_url2 = page_url
        video()
        #heightp = subprocess.Popen('"video-engine\MediaInfo.exe" --inform=Video;%Height% ".\export\\' + title + '.flv"' ,shell=True , stdout=subprocess.PIPE).stdout.read()
        heightp = {'71' : 'android', '60' : '360p', '61' : '480p',
                 '62' : '720p', '80' : '1080p', '0' : 'highest'}[xmlconfig.find('video_encode_quality').string]
        subtitles(title)
        subtitlefilecode=''
        #shutil.move(title + '.flv', os.path.join(os.getcwd(), 'export', ''))


        print 'Starting mkv merge'
        if hardcoded:
            subprocess.call('"video-engine\mkvmerge.exe" -o ".\export\\' + title + '[' + heightp.strip() +'p].mkv" --language 1:jpn -a 1 -d 0 ' +
                            '".\export\\' + title + '.flv"' +' --title "' + title +'"')
        else:
            sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
                       u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
                       u'العربية': 'ara', u'Deutsch': 'deu'}[lang]
    #		defaulttrack = False
            #print lang.encode('utf-8')
            #print sub_id5
            #print sub_id6
            for i in sub_id2:
	    		defaultsub=''
    			sublangc=sub_id5.pop(0)
    			sublangn=sub_id6.pop(0)
    #			print forcesub
	    		if not forcesub:
    				if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:no'
	    			else:
	    				defaultsub=' --default-track 0:no --forced-track 0:no'
	    		else:
	    			if sublangc == sublang:
	    				defaultsub=' --default-track 0:yes --forced-track 0:yes'
	    			else:
		    			defaultsub=' --default-track 0:no --forced-track 0:no'
	    		if not onlymainsub:
    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 ".\export\\'+title+'['+sublangc+']'+sublangn+'.ass"'
	    		else:
    				if sublangc == sublang:
	    				subtitlefilecode=subtitlefilecode+' --language 0:' + sublangc.replace('spa_spa','spa') + defaultsub +' --track-name 0:"' + sublangn + '" -s 0 ".\export\\'+title+'['+sublangc+']'+sublangn+'.ass"'
    #        subprocess.call('"video-engine\mkvmerge.exe" -o ".\export\\' + title + '.mkv" --language 1:jpn -a 1 -d 0 ' +
    #                        '".\export\\' + title + '.flv" --language 0:' + sublang + ' -s 0 ".\export\\'+title+'.ass"')
    #        print '"video-engine\mkvmerge.exe" -o ".\export\\' + title + '.mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
            mkvcmd='"video-engine\mkvmerge.exe" -o ".\export\\' + title + '[' + heightp.strip() +'].mkv" --language 0:jpn --language 1:jpn -a 1 -d 0 ' + '".\export\\' + title + '.flv"' + subtitlefilecode +' --title "' + title +'"'
    #        print mkvcmd
            #print subtitlefilecode
            subprocess.call(mkvcmd)
        print 'Merge process complete'
        subs_only = False

    print
    print '----------'
    print

    print 'Starting Final Cleanup'
    if not subs_only:
        os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.flv')
    if not hardcoded or not subs_only:
        #os.remove(os.path.join(os.getcwd(), 'export', '') + title + '.ass')
        for root, dirs, files in os.walk('export'):
            for file in filter(lambda x: re.match(title +'\[.+\]'+ '.ass', x), files):
                os.remove(os.path.join(root, file))
    print 'Cleanup Complete'
예제 #27
0
def ultimate(page_url='', seasonnum=0, epnum=0, sess_id_=''):
    #global url1, url2, filen, title, media_id, lang1, lang2, hardcoded, forceusa, page_url2, onlymainsub
    #global player_revision

    print('''
--------------------------
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98b 

Crunchyroll hasn't changed anything.

If you don't have a premium account, go and sign up for one now. It's well worth it, and supports the animators.

----------
Booting up...
''')
    if page_url == '':
        page_url = input('Please enter Crunchyroll video URL:\n')
        #page_url = 'https://www.crunchyroll.com/the-rising-of-the-shield-hero/episode-11-catastrophe-returns-781158'
        #page_url = 'http://www.crunchyroll.com/military/episode-1-the-mission-begins-668503'
        #page_url = 'https://www.crunchyroll.com/mob-psycho-100/episode-11-guidance-psychic-sensor-780930'

    try:
        int(page_url)
        page_url = 'http://www.crunchyroll.com/media-' + page_url
    except ValueError:
        if not re.findall(r'https?://', page_url):
            page_url = 'http://' + page_url
        '''
        try:
            int(page_url[-6:])
        except ValueError:
            if bool(seasonnum) and bool(epnum):
                page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
            elif bool(epnum):
                page_url = altfuncs.vidurl(page_url, 1, epnum)
            else:
                page_url = altfuncs.vidurl(page_url, False, False)
        '''

    # ----------

    #lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_, proxy_ = config()
    config_ = config()
    if not os.path.lexists(config_['download_dirctory']):
        os.makedirs(config_['download_dirctory'])
    #print(config_)
    forcesub = config_['forcesubtitle']
    if sess_id_ == '':
        cookies_ = ConfigParser()
        cookies_.read('cookies')
        if config_['forceusa']:
            sess_id_ = cookies_.get('COOKIES', 'sess_id_usa')
        else:
            sess_id_ = cookies_.get('COOKIES', 'sess_id')
    media_id = re.findall(r'https?://www\.crunchyroll\.com/.+/.+-(\d*)',
                          page_url)[0]
    #htmlconfig = BeautifulSoup(gethtml(page_url), 'html')
    html_page_ = gethtml(page_url)
    #print(re.findall(r'vilos\.config\.media = ({.*})',html_page_))
    htmlconfig = json.loads(
        re.findall(r'vilos\.config\.media = ({.*})', html_page_)[0])
    htmlconfig['metadata']['series_title'] = json.loads(
        re.findall(r'vilos\.config\.analytics = ({.*})',
                   html_page_)[0])['media_reporting_parent']['title']
    stream_url = {}
    stream_url_dash = {}
    # print(htmlconfig)
    for i in htmlconfig['streams']:
        if i['format'] == 'adaptive_hls':
            stream_url.update({i['hardsub_lang']: i['url']})
        elif i['format'] == 'adaptive_dash':
            stream_url_dash.update({i['hardsub_lang']: i['url']})
        #stream_url.update({i['hardsub_lang']: i['url']})
    #for i in htmlconfig['subtitles']:
    #    print(i["language"], i["url"])
    #for i in stream_url:
    #    print(i, stream_url[i])
    #media_info = getxml('RpcApiVideoPlayer_GetStandardConfig', media_id)
    #print(media_info)
    #print(media_info['file'])
    #print(media_info['media_metadata']['series_title'])
    #print(media_info['media_metadata']['episode_number'])
    #print(media_info['media_metadata']['episode_title'])
    if htmlconfig['metadata']['episode_number'] != '':
        title = '%s Episode %s - %s' % (
            htmlconfig['metadata']['series_title'],
            htmlconfig['metadata']['episode_number'],
            htmlconfig['metadata']['title'])
        # print(title)
        title = clean_text(title)
        # print(title)
    else:
        title = '%s - %s' % (htmlconfig['metadata']['series_title'],
                             htmlconfig['metadata']['title'])
        # print(title)
        title = clean_text(title)
        # print(title)
    #title: str = re.findall(r'var mediaMetadata = \{.*?name":"(.+?)",".+?\};',html_page_)[0]
    #if len(os.path.join('export', title + '.flv')) > 255 or media_info['media_metadata']['episode_title'] is '':
    #    title = clean_text('%s Episode %s' % (media_info['media_metadata']['series_title'], media_info['media_metadata']['episode_number']))
    #print(config_['language2'])
    #Loc_lang = {u'Español (Espana)': 'esES', u'Français (France)': 'frFR', u'Português (Brasil)': 'ptBR',
    #        u'English': 'enUS', u'Español': 'esLA', u'Türkçe': 'trTR', u'Italiano': 'itIT',
    #        u'العربية': 'arME', u'Deutsch': 'deDE', u'Русский' : 'ruRU'}
    Loc_lang = {
        'Espanol_Espana': 'esES',
        'Francais': 'frFR',
        'Portugues': 'ptBR',
        'English': 'enUS',
        'Espanol': 'esLA',
        'Turkce': 'trTR',
        'Italiano': 'itIT',
        'Arabic': 'arME',
        'Deutsch': 'deDE',
        'Russian': 'ruRU'
    }
    Loc_lang_1 = Loc_lang[config_['language']]
    Loc_lang_2 = Loc_lang[config_['language2']]

    #print(Loc_lang_1,Loc_lang_2,stream_url)
    if forcesub:
        try:
            hls_url = stream_url[Loc_lang_1]
            dash_url = stream_url_dash[Loc_lang_1]
        except:
            try:
                hls_url = stream_url[Loc_lang_2]
                dash_url = stream_url_dash[Loc_lang_2]
            except:
                hls_url = stream_url[None]
                dash_url = stream_url_dash[None]
                forcesub = False
    else:
        # print(stream_url)
        try:
            hls_url = stream_url[None]
            dash_url = stream_url_dash[None]
        except:
            try:
                hls_url = stream_url['enUS']
                dash_url = stream_url_dash['enUS']
            except:
                hls_url = stream_url[list(stream_url)[0]]
                dash_url = stream_url_dash[list(stream_url_dash)[0]]

    #print(dash_url)
    hls_url_m3u8 = m3u8.load(hls_url)
    hls_url_parse = {}
    dash_id_parse = {}
    for stream in hls_url_m3u8.playlists:
        hls_url_parse.update(
            {stream.stream_info.resolution[1]: stream.absolute_uri})
    if config_['video_quality'] == '1080p':
        try:
            hls_url = hls_url_parse[1080]
        except:
            pass
    elif config_['video_quality'] == '720p':
        try:
            hls_url = hls_url_parse[720]
        except:
            pass
    elif config_['video_quality'] == '480p':
        try:
            hls_url = hls_url_parse[480]
        except:
            pass
    elif config_['video_quality'] == '360p':
        try:
            hls_url = hls_url_parse[360]
        except:
            pass
    elif config_['video_quality'] == '240p':
        try:
            hls_url = hls_url_parse[240]
        except:
            pass

    ### End stolen code ###

    # ----------
    #print(vquality,hls_url)
    print(format('Now Downloading - ' + title))
    #video_input = os.path.join("export", title + '.ts')
    if htmlconfig['metadata']['episode_number'] != '':
        video_input = dircheck([
            os.path.join(os.path.abspath(config_['download_dirctory']), ''),
            clean_text(htmlconfig['metadata']['series_title']), ' Episode',
            ' - ' + clean_text(htmlconfig['metadata']['episode_number']),
            ' - ' + clean_text(htmlconfig['metadata']['title']), '.ts'
        ], [
            'True',
            'True',
            'False',
            'True',
            1,
            'True',
        ], 240)
    else:
        video_input = dircheck([
            os.path.join(os.path.abspath(config_['download_dirctory']), ''),
            clean_text(htmlconfig['metadata']['series_title']),
            ' - ' + clean_text(htmlconfig['metadata']['title']), '.ts'
        ], [
            'True',
            'True',
            1,
            'True',
        ], 240)

    download_subprocess_result = 0
    try:
        # assert 1==2
        download_ = video_hls()
        download_subprocess_result = download_.video_hls(
            hls_url, video_input, config_['connection_n_'])
    except AssertionError:
        download_subprocess_result = 1

    if download_subprocess_result != 0:
        try:
            print(
                'It seem there is problem in HLS stream, will use DASH stream instead'
            )
            # assert 1==2
            download_ = dash_download()
            # print(config_['connection_n_'],config_['video_quality'])
            download_subprocess_result = download_.download(
                dash_url,
                video_input,
                config_['connection_n_'],
                r=config_['video_quality'],
                abr='best')
        except:
            download_subprocess_result = 1

    if download_subprocess_result != 0:
        print(
            'It seem there is problem in DASH stream, will use External Library YoutubeDL instead'
        )
        with youtube_dl.YoutubeDL({'logger': MyLogger()}) as ydl:
            dash_info_dict = ydl.extract_info(dash_url, download=False)
        for stream in dash_info_dict['formats']:
            if not stream['height'] == None:
                dash_id_parse.update({stream['height']: stream['format_id']})
        # for i in dash_info_dict['formats']:
        #    print(i['format_id'], i['ext'], i['height'], i['tbr'], i['asr'], i['language'], i['format_note'], i['filesize'],
        #          i['vcodec'], i['acodec'], i['format'])
        # for i in hls_url_parse:
        #    print(i,hls_url_parse[i])
        if config_['video_quality'] == '1080p':
            try:
                dash_video_id = dash_id_parse[1080]
            except:
                pass
        elif config_['video_quality'] == '720p':
            try:
                dash_video_id = dash_id_parse[720]
            except:
                pass
        elif config_['video_quality'] == '480p':
            try:
                dash_video_id = dash_id_parse[480]
            except:
                pass
        elif config_['video_quality'] == '360p':
            try:
                dash_video_id = dash_id_parse[360]
            except:
                pass
        elif config_['video_quality'] == '240p':
            try:
                dash_video_id = dash_id_parse[240]
            except:
                pass

        def youtube_dl_proxy(*args, **kwargs):
            import sys
            if 'idlelib.run' in sys.modules:  # code to force this script to only run in console
                try:
                    import run_code_with_console
                    return run_code_with_console.run_code_with_console()
                except:
                    pass  # end of code to force this script to only run in console
            return youtube_dl.YoutubeDL(*args, **kwargs)
            pass

        # youtube_dl_proxy({'format': dash_video_id + ',bestaudio',
        #                    'outtmpl': video_input[:-3] + '.%(ext)s'}).download([dash_url])

        if not 'idlelib.run' in sys.modules:
            with youtube_dl.YoutubeDL({
                    'format': dash_video_id + ',bestaudio',
                    'outtmpl': video_input[:-3] + '.%(ext)s'
            }) as ydl:
                ydl.download([dash_url])
        else:
            youtube_dl_script = '''\
import youtube_dl
with youtube_dl.YoutubeDL(
                {'format': \'''' + dash_video_id + ''',bestaudio', 'outtmpl': r\'''' + video_input[:
                                                                                                   -3] + '''\' + '.%(ext)s'}) as ydl:
                ydl.download([\'\'\'''' + dash_url + '''\'\'\'])
'''
            #print(youtube_dl_script)
            command = 'where'  # Windows
            if os.name != "nt":  # non-Windows
                command = 'which'
            python_path_ = os.path.normpath(
                os.path.join(
                    os.path.split(subprocess.getoutput([command, 'pip3']))[0],
                    '..', 'python.exe'))
            try:
                subprocess.call([python_path_, '-c', youtube_dl_script])
            except FileNotFoundError:  # fix for old version windows that dont have 'where' command
                reg_ = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
                                      r'SOFTWARE\Python\PythonCore')
                python_request_v = [3, 0]
                if len(python_request_v) > 0:
                    if len(python_request_v) < 2:
                        python_request_v += [0]
                    python_request_v = python_request_v[
                        0] + python_request_v[1] / 10
                else:
                    python_request_v = 0.0
                for reg_i in range(0, winreg.QueryInfoKey(reg_)[0]):
                    reg_2 = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
                                           r'SOFTWARE\Python\PythonCore')
                    if float(winreg.EnumKey(reg_2, reg_i)) >= python_request_v and \
                       True if python_request_v == 0.0 else float(winreg.EnumKey(reg_2, reg_i)) < float(
                        round(python_request_v) + 1):
                        reg_2 = winreg.OpenKey(reg_2,
                                               winreg.EnumKey(reg_2, reg_i))
                        reg_2 = winreg.OpenKey(reg_2, r'PythonPath')
                        python_path_ = os.path.normpath(
                            os.path.join(
                                winreg.EnumValue(reg_2, 0)[1].split(';')[0],
                                '..', 'python.exe'))
                subprocess.call([python_path_, '-c', youtube_dl_script])
    """
    if not 'idlelib.run' in sys.modules:
        #video_hls(hls_url, video_input, config_['connection_n_'])
        try:
            #assert 1==2
            download_ = video_hls()
            download_.video_hls(hls_url, video_input, config_['connection_n_'])
        except AssertionError:
            try:
                print('It seem there is problem in HLS stream, will use DASH stream instead')
                #assert 1==2
                download_ = dash_download()
                # print(config_['connection_n_'],config_['video_quality'])
                download_.download(dash_url, video_input, config_['connection_n_'], r=config_['video_quality'], abr='best')
            except:
                print('It seem there is problem in DASH stream, will use External Library YoutubeDL instead')
                with youtube_dl.YoutubeDL({'logger': MyLogger()}) as ydl:
                    dash_info_dict = ydl.extract_info(dash_url, download=False)
                for stream in dash_info_dict['formats']:
                    if not stream['height'] == None:
                        dash_id_parse.update({stream['height']: stream['format_id']})
                # for i in dash_info_dict['formats']:
                #    print(i['format_id'], i['ext'], i['height'], i['tbr'], i['asr'], i['language'], i['format_note'], i['filesize'],
                #          i['vcodec'], i['acodec'], i['format'])
                # for i in hls_url_parse:
                #    print(i,hls_url_parse[i])
                if config_['video_quality'] == '1080p':
                    try:
                        dash_video_id = dash_id_parse[1080]
                    except:
                        pass
                elif config_['video_quality'] == '720p':
                    try:
                        dash_video_id = dash_id_parse[720]
                    except:
                        pass
                elif config_['video_quality'] == '480p':
                    try:
                        dash_video_id = dash_id_parse[480]
                    except:
                        pass
                elif config_['video_quality'] == '360p':
                    try:
                        dash_video_id = dash_id_parse[360]
                    except:
                        pass
                elif config_['video_quality'] == '240p':
                    try:
                        dash_video_id = dash_id_parse[240]
                    except:
                        pass
                with youtube_dl.YoutubeDL(
                        {'format': dash_video_id + ',bestaudio', 'outtmpl': video_input[:-3] + '.%(ext)s'}) as ydl:
                    ydl.download([dash_url])

    else:
        if os.path.lexists(os.path.abspath(os.path.join(".", "crunchy-xml-decoder", "hls.py"))):
            hls_s_path = os.path.abspath(os.path.join(".", "crunchy-xml-decoder"))
        elif os.path.lexists(os.path.abspath(os.path.join("..", "crunchy-xml-decoder", "hls.py"))):
            hls_s_path = os.path.abspath(os.path.join("..", "crunchy-xml-decoder"))
        else:
            print('hls script not found')
        hls_script = '''\
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
sys.path.append(r"''' + hls_s_path + '''")
from hls_ import video_hls

download_ = video_hls()
download_.video_hls("''' + hls_url + '''", r"''' + video_input + '''", ''' + str(config_['connection_n_']) + ''')
#video_hls("''' + hls_url + '''", r"''' + video_input + '''", ''' + str(config_['connection_n_']) + ''')'''
        # print(hls_script)
        open(os.path.join(".", "export", "hls_script_temp.py"), "w", encoding='utf-8').write(hls_script)
        hls_subprocess_result = subprocess.call([sys.executable.replace('pythonw.exe', 'python.exe'),
                                             os.path.join(".", "export", "hls_script_temp.py")])
        if not hls_subprocess_result == 0:
            print('It seem there is problem in HLS stream, will use DASH stream instead')
            subprocess.call([sys.executable.replace('pythonw.exe', 'python.exe'),
                             '-m','youtube_dl',
                             '-f', dash_video_id+',bestaudio',
                             '-o', video_input[:-3]+'.%(ext)s',
                             dash_url
                             ])



        os.remove(os.path.join(".", "export", "hls_script_temp.py"))
    """
    #decode(page_url)
    vilos_subtitle(page_url)
    mkv_merge(video_input, config_['video_quality'], 'English')