예제 #1
0
def Get_PlayList(url):
    plist = []

    #-- check if playlist should be decode
    if url[:4] != 'http':
        url = xppod.Decode(url)

    if url[:4] != 'http':
        return []

    html = get_HTML(url)

    try:
        pl = json.loads(html)
    except:
        pl = json.loads(xppod.Decode(html))

    for rec in pl['playlist']:
        plist.append({
            'comment':
            rec['comment'].encode('iso-8859-1').decode('utf-8'),
            'file':
            rec['file']
        })

    return plist
예제 #2
0
def Get_Movie_HTML():
    url = 'http://docu.im/search/result'

    #-- split years
    if '-' in par.year:
        year1 = par.year.split('-')[0]
        year2 = par.year.split('-')[1]
    else:
        year1 = ''
        year2 = ''

    #-- get country code
    if par.country <> '':
        country = '{"id":"%s","name": "%s"}'%(par.country, par.country_name)
    else:
        country = ''

    #-- assemble filter
    search_filter = '{"title":"%s","fyear":"%s","tyear":"%s","genres":[%s],"directors":[],"actors":[],"countries":[%s],"studios":[]}'%(par.search, year1, year2, par.genre, country)
    #-- get count
    if par.count == 0:
        values = {
                    't'         : 'count',
                    'f'         : search_filter
                 }

        post = urllib.urlencode(values)
        html = get_HTML(url, post, 'http://docu.im/search')
        list = demjson3.loads(html)

        par.count = int(list['count'])

    #-- get HTML
    values = {
                'viewAs'    : 'list',
                'p'         : par.page,
                'f'         : search_filter
             }

    post = urllib.urlencode(values)
    html = get_HTML(url, post, 'http://docu.im/search')
    list = demjson3.loads(html)

    par.max_page = int(list['pagination']['totalPages'])

    html = ''
    if par.max_page > 0:
        for rec in list['items']:
            html +=  rec['html']

    return html
예제 #3
0
파일: default.py 프로젝트: vkravets/ru
def Get_PlayList(pl_url, season = '', start_item = '', mode = 's'): #-- mode: s - seasons, e - episodes
    plist = []
    #-- get playlist items
    post = None
    html = get_HTML(pl_url, post)
    pl = xppod.Decode(html)
    pl = pl.encode('iso-8859-1').decode('utf-8').replace('\r','')
    is_found = False

    for r in json.loads(pl)['playlist']:
        if mode == 's':
            try:
                x = r['playlist']
            except:
                pl = []
                return pl
            plist.append({'comment':r['comment'].lstrip().encode('utf-8'), 'file':pl_url})
        else:
            if r['comment'].lstrip().encode('utf-8') == season or season == '' or season == '-':
                if season == '-':
                    if r['comment'].lstrip().encode('utf-8') == start_item or start_item == '':
                        is_found = True
                    if is_found:
                        plist.append({'comment':r['comment'].lstrip().encode('utf-8'), 'file':r['file']})
                else:
                    for rec in r['playlist']:
                        if rec['comment'].lstrip().encode('utf-8') == start_item or start_item == '':
                            is_found = True
                        if is_found:
                            plist.append({'comment':rec['comment'].lstrip().encode('utf-8'), 'file':rec['file']})


    return plist
예제 #4
0
	def _get_video_file2(self, id, video_id):
		if not self._cursor is None:
			query = u"SELECT file FROM list_plfile WHERE hash='%s'" %id
			self._execute(True, query)
			values = self._cursor.fetchall()
			if values:
				values = values[0]
				
				plh = Utils.get_HTML(values[0], __cookies__)
				pl = json.loads(plh.decode('utf-8'))
				list = []
				x = 0
				for rec in pl['playlist']:
					x += 1
					if 'playlist' in rec:
						for rec1 in rec['playlist']:
							lname = (rec['comment'] + ' - ' + rec1['comment']).replace('<b>', '').replace('</b>', '')
							lname = lname.replace('<br>', ' ').replace('</br>', ' ')
							
							list.append({'name': lname, 'url': Utils.normalizeFile(rec1['file']), 'id': x, 'fileid':id})
					else:
						lname = rec['comment'].replace('<b>', '').replace('</b>', '')
						lname = lname.replace('<br>', ' ').replace('</br>', ' ')
						
						list.append({'name': lname, 'url': Utils.normalizeFile(rec['file']), 'id': x, 'fileid':id})
	
				return list[int(video_id)-1]
		return 0
예제 #5
0
def Get_PlayList(pl_url, season = '', start_item = '', mode = 's'): #-- mode: s - seasons, e - episodes
    plist = []
    #-- get playlist items
    post = None
    html = get_HTML(pl_url, post)
    pl = xppod.Decode(html)
    pl = pl.encode('iso-8859-1').decode('utf-8').replace('\r','')
    is_found = False

    for r in json.loads(pl)['playlist']:
        if mode == 's':
            try:
                x = r['playlist']
            except:
                pl = []
                return pl
            plist.append({'comment':r['comment'].lstrip().encode('utf-8'), 'file':pl_url})
        else:
            if r['comment'].lstrip().encode('utf-8') == season or season == '' or season == '-':
                if season == '-':
                    if r['comment'].lstrip().encode('utf-8') == start_item or start_item == '':
                        is_found = True
                    if is_found:
                        plist.append({'comment':r['comment'].lstrip().encode('utf-8'), 'file':r['file']})
                else:
                    for rec in r['playlist']:
                        if rec['comment'].lstrip().encode('utf-8') == start_item or start_item == '':
                            is_found = True
                        if is_found:
                            plist.append({'comment':rec['comment'].lstrip().encode('utf-8'), 'file':rec['file']})


    return plist
예제 #6
0
def PLAY(params):
    # -- parameters
    url  = urllib.unquote_plus(params['url'])
    img  = urllib.unquote_plus(params['img'])
    name = urllib.unquote_plus(params['prg'])

    if url == '*':
        return False

    # -- check if video available
    html = get_HTML(url)

    rec = re.compile("jwplayer\('mediaspace'\).setup\({(.+?)}\);", re.MULTILINE|re.DOTALL).findall(html)[0]

    str =  '{'+rec.replace('\n','').replace(' ','').replace('\'','"')+'}'
    j1 = json.loads(str)

    v_server = j1['streamer']
    v_swf    = j1['flashplayer']
    v_stream = j1['file'][:-4]

    video = '%s app=file swfUrl=http://tvisio.tv%s pageUrl=%s playpath=%s swfVfy=1' % (v_server, v_swf, url, v_stream)

    i = xbmcgui.ListItem(name, path = urllib.unquote(video), thumbnailImage=img)
    i.setProperty('IsPlayable', 'true')

    xbmc.Player( xbmc.PLAYER_CORE_MPLAYER).play(video, i)
예제 #7
0
def Get_Movie_HTML():
    url = 'http://docu.im/search/result'

    #-- split years
    if '-' in par.year:
        year1 = par.year.split('-')[0]
        year2 = par.year.split('-')[1]
    else:
        year1 = ''
        year2 = ''

    #-- get country code
    if par.country <> '':
        country = '{"id":"%s","name": "%s"}' % (par.country, par.country_name)
    else:
        country = ''

    #-- assemble filter
    search_filter = '{"title":"%s","fyear":"%s","tyear":"%s","genres":[%s],"directors":[],"actors":[],"countries":[%s],"studios":[]}' % (
        par.search, year1, year2, par.genre, country)
    #-- get count
    if par.count == 0:
        values = {'t': 'count', 'f': search_filter}

        post = urllib.urlencode(values)
        html = get_HTML(url, post, 'http://docu.im/search')
        list = demjson3.loads(html)

        par.count = int(list['count'])

    #-- get HTML
    values = {'viewAs': 'list', 'p': par.page, 'f': search_filter}

    post = urllib.urlencode(values)
    html = get_HTML(url, post, 'http://docu.im/search')
    list = demjson3.loads(html)

    par.max_page = int(list['pagination']['totalPages'])

    html = ''
    if par.max_page > 0:
        for rec in list['items']:
            html += rec['html']

    return html
예제 #8
0
파일: default.py 프로젝트: Stevie-Bs/ru
def Get_PlayList(url):
    plist = []

    # -- check if playlist should be decode
    if url[:4] != "http":
        url = xppod.Decode(url)

    if url[:4] != "http":
        return []

    html = get_HTML(url)

    try:
        pl = json.loads(html)
    except:
        pl = json.loads(xppod.Decode(html))

    for rec in pl["playlist"]:
        plist.append({"comment": rec["comment"].encode("iso-8859-1").decode("utf-8"), "file": rec["file"]})

    return plist
예제 #9
0
def Get_PlayList(url, name):
    print url

    html = get_HTML(url)

    list = []
    # -- parsing web page --------------------------------------------------
    soup = BeautifulSoup(html, fromEncoding="windows-1251")
    #xbmc.log('[NOWFILMS.RU html=]'+str(soup))
    # -- get movie info
    allResults = soup.findAll('param', attrs={'name': 'flashvars'})

    #xbmc.log('[NOWFILMS.RU] found links =%s' %allResults)
    for res in allResults:
        video = ''
        #xbmc.log('[NOWFILMS.RU] processing result=%s' %res)
        for rec in res['value'].split('&'):
            #xbmc.log('[NOWFILMS.RU] processing rec=%s' %rec)
            if rec.split('=', 2)[0] == 'pl':
                video = rec.split('=', 1)[1]
            if rec.split('=', 2)[0] == 'file':
                video = rec.split('=', 1)[1]
            #if rec.split('=',1)[0] == 'st':
            #video = rec.split('=',1)[1]
        if video <> '':
            if video[-3:] == 'txt':
                html = get_HTML(video)
                html = html.replace('\n', '')
                if html[0] <> '[' and html[-1] == ']':
                    html = html[:-1]
                pl = json.loads(html.decode('utf-8'))

                for rec in pl['playlist']:
                    try:
                        for rec1 in rec['playlist']:
                            list.append({
                                'name':
                                rec['comment'].replace('<b>', '').replace(
                                    '</b>', '') + ' - ' + rec1['comment'],
                                'url':
                                rec1['file']
                            })
                    except:
                        list.append({
                            'name': rec['comment'],
                            'url': rec['file']
                        })
            else:
                list.append({'name': name, 'url': video})

    return list
예제 #10
0
def PLAY(params):
    # create play list
    pl=xbmc.PlayList(1)
    pl.clear()

    # -- parameters
    url  = urllib.unquote_plus(params['url'])
    name = urllib.unquote_plus(params['name'])
    img = urllib.unquote_plus(params['img'])
    track = int(urllib.unquote_plus(params['track']))

    header = {  'Host'                  :urlparse(url).hostname,
                'Referer'               :'http://asbook.net/player/uppod.swf',
                'User-Agent'            :'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)'
             }

    #------------------------------------------------
    html = get_URL(url)
    # -- parsing web page
    soup = BeautifulSoup(html, fromEncoding="windows-1251")

    #(------------- Change 12/03/2015 Evgenii S----------------------------------------
    #for j in soup.findAll('script', {'type':'text/javascript'}):
    #    if 'var flashvars = {' in j.text:
    #        pl = re.compile('var flashvars = {(.+?)}', re.MULTILINE|re.DOTALL).findall(j.text)
    #        b_url = pl[0].split(',')[1].replace('pl:','').replace('"','')
    packed_flash_data = soup.find('div', {'class':'b-fullpost__player_wrapper clearfix'}).contents[1].text
    unpacked_flash_data = eval('unpack' + packed_flash_data[packed_flash_data.find('}(')+1:-1])
    b_url = re.compile("json_url=\\'(.+?)\'", re.MULTILINE|re.DOTALL).findall(unpacked_flash_data)[0]
    #-------------- End Change 12/03/2015 Evgenii S------------------------------------
    #------------------------------------------------
    html = get_URL(b_url)

    n = 0
    playlist = json.loads(html)
    for rec in playlist['playlist']:
        n += 1
        if track <= n:
            s_name = rec['comment'].encode('utf-8')
            s_url  = rec['file']+'|'+urllib.urlencode(header)

            i = xbmcgui.ListItem(s_name, path = urllib.unquote(s_url), thumbnailImage=img)
            i.setInfo(type='music', infoLabels={    'title' :     s_name,
                                                    'tracknumber':      str(n)})
            pl.add(s_url, i)

    xbmc.Player().play(pl)
예제 #11
0
def Country_List():

    #-- get generes
    url = 'http://docu.im/country/autocomplete'
    html = get_HTML(url)

    list = demjson3.loads(html)

    for rec in list:
        name = rec['name'].encode('utf-8')
        country_id = rec['id'].encode('utf-8')
        i = xbmcgui.ListItem(name, iconImage=icon, thumbnailImage=icon)
        u = sys.argv[0] + '?mode=MOVIE' + get_Filter(country = country_id, country_name = name, count = 0)
        xbmcplugin.addDirectoryItem(h, u, i, True)

    xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
    xbmcplugin.endOfDirectory(h)
예제 #12
0
def Country_List():

    #-- get generes
    url = 'http://docu.im/country/autocomplete'
    html = get_HTML(url)

    list = demjson3.loads(html)

    for rec in list:
        name = rec['name'].encode('utf-8')
        country_id = rec['id'].encode('utf-8')
        i = xbmcgui.ListItem(name, iconImage=icon, thumbnailImage=icon)
        u = sys.argv[0] + '?mode=MOVIE' + get_Filter(
            country=country_id, country_name=name, count=0)
        xbmcplugin.addDirectoryItem(h, u, i, True)

    xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
    xbmcplugin.endOfDirectory(h)
예제 #13
0
def Get_PlayList(url, name):
	print url

	html = get_HTML(url)

	list = []
	# -- parsing web page --------------------------------------------------
	soup = BeautifulSoup(html, fromEncoding="windows-1251")
	#xbmc.log('[NOWFILMS.RU html=]'+str(soup))
	# -- get movie info
	allResults = soup.findAll('param', attrs={'name': 'flashvars'})

	#xbmc.log('[NOWFILMS.RU] found links =%s' %allResults)
	for res in allResults:
		video = ''
		#xbmc.log('[NOWFILMS.RU] processing result=%s' %res)
		for rec in res['value'].split('&'):
			#xbmc.log('[NOWFILMS.RU] processing rec=%s' %rec)
			if rec.split('=',2)[0] == 'pl':
				video = rec.split('=',1)[1]
			if rec.split('=',2)[0] == 'file':
				video = rec.split('=',1)[1]
			#if rec.split('=',1)[0] == 'st':
				#video = rec.split('=',1)[1]
		if video <> '': 
			if video[-3:] == 'txt':
				html = get_HTML(video)
				html = html.replace('\n', '')
				if html[0] <> '[' and html[-1] == ']':
					html = html[:-1]
				pl = json.loads(html.decode('utf-8'))

				for rec in pl['playlist']:
					try:
						for rec1 in rec['playlist']:
							list.append({'name': rec['comment'].replace('<b>','').replace('</b>','')+' - '+rec1['comment'], 'url': rec1['file']})
					except:
						list.append({'name': rec['comment'], 'url': rec['file']})
			else:
				list.append({'name': name, 'url': video})

	return list
예제 #14
0
파일: module1.py 프로젝트: serbra/ru
    else:
        is_Serial = False
except:
    is_Serial = False

if is_Serial == True:
    for rec in soup.find('div', {'id':'season-switch-items'}).findAll('div', {'class':'switch-item'}):
        season_name = rec.find('a').text
        season_id = rec.find('a').text.replace(u'сезон', '').replace(u'Сезон', '').replace(u' ', '')

        url  = 'http://docu.im/movie/player/%s/playlist.txt?season=%s'%(movie_id, season_id)
        html = get_HTML(url)

        info = Decode(html)

        rec = demjson3.loads(info)

        try:
            rec = demjson3.loads(rec['pl'])
        except:
            pass

        for t in rec['playlist']:
            print season_name+'  '+t['comment']
            print t['file']
            print ' '
else:
    url  = 'http://docu.im/movie/player/%s/style.txt'%(movie_id)
    html = get_HTML(url)

    info = Decode(html)
예제 #15
0
def Book_Info(params):
    #-- get filter parameters
    par = Get_Parameters(params)
    #== get book details =================================================
    url = par.url
    html = get_URL(url)

    # ----------------------------------------------------------------------
    b_name      = ''
    b_score     = ''
    b_img       = ''
    b_descr     = ''
    b_year      = 0
    b_autor     = ''
    b_genre     = ''
    b_actor     = ''
    b_publisher = ''
    b_bitrate   = ''
    b_duration  = 0

    # -- parsing web page --------------------------------------------------
    soup = BeautifulSoup(html, fromEncoding="windows-1251")

    b_name      = urllib.unquote(soup.find('h1', {'class':'b-maintitle'}).text)
    b_score     = soup.find('div' ,{'class':'mark'}).text.replace(',', '.')

    try:
        b_img       = soup.find('div', {'class':'b-searchpost__cover'}).find('img')['src']
    except:
        try:
            b_img = re.compile('\<img (.+?)\/>').findall(html)
            for e in b_img:
                if 'title=' in e:
                    b_img = re.compile('src=\"(.+?)\"').findall(e)[0]
                    break
        except:
            b_img = icon

    b_descr     = urllib.unquote(soup.find('div', {'class':'b-searchpost__text'}).text)

    for rec in soup.find('div', {'class':'b-searchpost__data'}).findAll('div', {'class': "row"}):
        if rec.find('i', {'class' : "b-sprt icon-10-2"}):
            b_year = int(rec.find('div', {'class' : "cell string"}).find('a').text)

        if rec.find('i', {'class' : "b-sprt icon-7-2"}):
            b_publisher = rec.find('div', {'class' : "cell string"}).find('a').text

        if rec.find('i', {'class' : "b-sprt icon-9-1"}):
            s = rec.find('div', {'class' : "cell string"}).text
            b_duration = int(s.split(':')[0])*60*60+int(s.split(':')[1])*60+int(s.split(':')[2])

        if rec.find('i', {'class' : "b-sprt icon-5-2"}):
            b_autor = rec.find('div', {'class' : "cell string"}).find('a').text

        if rec.find('i', {'class' : "b-sprt icon-6-2"}):
            b_actor = rec.find('div', {'class' : "cell string"}).find('a').text

    #(------------- Change 12/03/2015 Evgenii S----------------------------------------
    #for j in soup.findAll('script', {'type':'text/javascript'}):
    #    if 'var flashvars = {' in j.text:
    #        pl = re.compile('var flashvars = {(.+?)}', re.MULTILINE|re.DOTALL).findall(j.text)
    #        b_url = pl[0].split(',')[1].replace('pl:','').replace('"','')
    packed_flash_data = soup.find('div', {'class':'b-fullpost__player_wrapper clearfix'}).contents[1].text
    unpacked_flash_data = eval('unpack' + packed_flash_data[packed_flash_data.find('}(')+1:-1])
    b_url = re.compile("json_url=\\'(.+?)\'", re.MULTILINE|re.DOTALL).findall(unpacked_flash_data)[0]
    #-------------- End Change 12/03/2015 Evgenii S------------------------------------

    # -- parsing web page --------------------------------------------------
    html = get_URL(b_url)

    n=0

    playlist = json.loads(html)
    for rec in playlist['playlist']:
        n+=1
        s_name = rec['comment'].encode('utf-8')
        s_url  = rec['file']

        i = xbmcgui.ListItem(s_name, path = urllib.unquote(s_url), thumbnailImage=b_img)
        u = sys.argv[0] + '?mode=PLAY'
        u += '&url=%s'%urllib.quote_plus(url) #b_url)
        u += '&name=%s'%urllib.quote_plus(s_name)
        u += '&img=%s'%urllib.quote_plus(b_img)
        u += '&track=%s'%urllib.quote_plus(str(n))
        i.setInfo(type='music', infoLabels={    'album':       b_name,
                                                'title' :      s_name,
                        						'year':        b_year,
                        						'artist':      b_actor,
                        						'comment':     b_descr,
                        						'genre':       par.genre,
                                                'rating':      b_score})
        i.setProperty('fanart_image', b_img)
        xbmcplugin.addDirectoryItem(h, u, i, False)

    xbmcplugin.endOfDirectory(h)
예제 #16
0
def Book_Info(params):
    #-- get filter parameters
    par = Get_Parameters(params)
    #== get book details =================================================
    url = par.url
    html = get_URL(url)

    # ----------------------------------------------------------------------
    b_name = ''
    b_score = ''
    b_img = ''
    b_descr = ''
    b_year = 0
    b_autor = ''
    b_genre = ''
    b_actor = ''
    b_publisher = ''
    b_bitrate = ''
    b_duration = 0

    # -- parsing web page --------------------------------------------------
    soup = BeautifulSoup(html, fromEncoding="windows-1251")

    b_name = urllib.unquote(soup.find('h1', {'class': 'b-maintitle'}).text)
    b_score = soup.find('div', {'class': 'mark'}).text.replace(',', '.')

    try:
        b_img = soup.find('div', {
            'class': 'b-searchpost__cover'
        }).find('img')['src']
    except:
        try:
            b_img = re.compile('\<img (.+?)\/>').findall(html)
            for e in b_img:
                if 'title=' in e:
                    b_img = re.compile('src=\"(.+?)\"').findall(e)[0]
                    break
        except:
            b_img = icon

    b_descr = urllib.unquote(
        soup.find('div', {
            'class': 'b-searchpost__text'
        }).text)

    for rec in soup.find('div', {
            'class': 'b-searchpost__data'
    }).findAll('div', {'class': "row"}):
        if rec.find('i', {'class': "b-sprt icon-10-2"}):
            b_year = int(
                rec.find('div', {
                    'class': "cell string"
                }).find('a').text)

        if rec.find('i', {'class': "b-sprt icon-7-2"}):
            b_publisher = rec.find('div', {
                'class': "cell string"
            }).find('a').text

        if rec.find('i', {'class': "b-sprt icon-9-1"}):
            s = rec.find('div', {'class': "cell string"}).text
            b_duration = int(s.split(':')[0]) * 60 * 60 + int(
                s.split(':')[1]) * 60 + int(s.split(':')[2])

        if rec.find('i', {'class': "b-sprt icon-5-2"}):
            b_autor = rec.find('div', {'class': "cell string"}).find('a').text

        if rec.find('i', {'class': "b-sprt icon-6-2"}):
            b_actor = rec.find('div', {'class': "cell string"}).find('a').text

    #(------------- Change 12/03/2015 Evgenii S----------------------------------------
    #for j in soup.findAll('script', {'type':'text/javascript'}):
    #    if 'var flashvars = {' in j.text:
    #        pl = re.compile('var flashvars = {(.+?)}', re.MULTILINE|re.DOTALL).findall(j.text)
    #        b_url = pl[0].split(',')[1].replace('pl:','').replace('"','')
    #packed_flash_data = soup.find('div', {'class':'b-fullpost__player_wrapper clearfix'}).contents[1].text
    #unpacked_flash_data = eval('unpack' + packed_flash_data[packed_flash_data.find('}(')+1:-1])
    #b_url = re.compile("json_url=\\'(.+?)\'", re.MULTILINE|re.DOTALL).findall(unpacked_flash_data)[0]
    #-------------- End Change 12/03/2015 Evgenii S------------------------------------
    #(------------- Change 25/10/2017 rvlad1987----------------------------------------
    unpacked_flash_data = soup.find(
        'div', {
            'class': 'b-fullpost__player_wrapper clearfix'
        }).contents[1].text
    b_url = re.compile("json_url = \'(.+?)\'").findall(unpacked_flash_data)[0]
    #--------------End Change 25/10/2017 rvlad1987------------------------------------

    # -- parsing web page --------------------------------------------------
    html = get_URL(b_url)

    n = 0

    playlist = json.loads(html)
    for rec in playlist['playlist']:
        n += 1
        s_name = rec['comment'].encode('utf-8')
        s_url = rec['file']

        i = xbmcgui.ListItem(s_name,
                             path=urllib.unquote(s_url),
                             thumbnailImage=b_img)
        u = sys.argv[0] + '?mode=PLAY'
        u += '&url=%s' % urllib.quote_plus(url)  #b_url)
        u += '&name=%s' % urllib.quote_plus(s_name)
        u += '&img=%s' % urllib.quote_plus(b_img)
        u += '&track=%s' % urllib.quote_plus(str(n))
        i.setInfo(type='music',
                  infoLabels={
                      'album': b_name,
                      'title': s_name,
                      'year': b_year,
                      'artist': b_actor,
                      'comment': b_descr,
                      'genre': par.genre,
                      'rating': b_score
                  })
        i.setProperty('fanart_image', b_img)
        xbmcplugin.addDirectoryItem(h, u, i, False)

    xbmcplugin.endOfDirectory(h)
예제 #17
0
def Movie_Detail_List():
        #-- get movie detail
        url  = par.url
        html = get_HTML(url).replace("<span class='heading'>", "|<span class='heading'>").replace('<p>', '|Text:<p>')
        soup = BeautifulSoup(html, fromEncoding="utf-8", convertEntities=BeautifulSoup.HTML_ENTITIES)

        rec = soup.find('div', {'class':'movie full clearfix'})
        mi.title    = rec.find('div', {'class':'title'}).text.encode('utf-8')
        mi.url      = rec.find('div', {'class':'title'}).find('a')['href']
        mi.alter    = rec.find('div', {'class':'alt-title'}).text.encode('utf-8')
        mi.img      = rec.find('img')['src']
        for sp in rec.find('div', {'class':'description'}).text.split('|'):
           info = sp.split(':',1)
           try:
                if info[0].replace(' ','') == u'Страна':
                    mi.country = info[1].replace(',', ', ').encode('utf-8')
                elif info[0].replace(' ','') == u'Жанры':
                    mi.genre = info[1].replace(',', ', ').encode('utf-8')
                elif info[0].replace(' ','') == u'Режиссеры':
                    mi.director = info[1].replace(',', ', ').encode('utf-8')
                elif info[0].replace(' ','') == u'Text':
                    mi.text = hpar.unescape(info[1]).encode('utf-8')
                elif info[0].replace(' ','') == u'Год':
                    mi.year = int(info[1])
           except:
                pass

        movie_id = soup.find('div', {'class':'player-wrapper'}).find('div', {'id':'player'})['movie-id'].encode('utf-8')
        video_id = soup.find('div', {'class':'player-wrapper'}).find('div', {'id':'player'})['video-id'].encode('utf-8')

        try:
            if len(soup.find('div', {'id':'season-switch-items'})) > 0:
                is_Serial = True
            else:
                is_Serial = False
        except:
            is_Serial = False

        if is_Serial == True:
            for rec in soup.find('div', {'id':'season-switch-items'}).findAll('div', {'class':'switch-item'}):
                season_name = rec.find('a').text
                season_id = rec.find('a').text.replace(u'сезон', '').replace(u'Сезон', '').replace(u' ', '').encode('utf-8')

                url  = 'http://docu.im/movie/player/%s/playlist.txt?season=%s'%(movie_id, season_id)
                html = get_HTML(url)
                info = Decode(html)
                rec = demjson3.loads(info)
                try:
                    rec = demjson3.loads(rec['pl'])
                except:
                    pass

                for t in rec['playlist']:
                    name = season_name.encode('utf-8')+'  [COLOR FFC3FDB8]'+t['comment'].encode('utf-8')+'[/COLOR]'
                    mi.url   = t['file']

                    #-- add movie to the list ------------------------------------------
                    i = xbmcgui.ListItem(name, iconImage=par.img, thumbnailImage=par.img)
                    u = sys.argv[0] + '?mode=PLAY' + get_Filter(fname = mi.title, url = par.url+'|'+mi.url)

                    i.setInfo(type='video', infoLabels={'title':       mi.title,
                                                        'originaltitle': mi.alter,
                                						'year':        mi.year,
                                						'director':    mi.director,
                                						'plot':        mi.text,
                                						'country':     mi.country,
                                						'genre':       mi.genre})

                    #i.setProperty('fanart_image', mi.img)
                    xbmcplugin.addDirectoryItem(h, u, i, False)
            #xbmcplugin.endOfDirectory(h)
        else:
            url  = 'http://docu.im/movie/player/%s/style.txt'%(movie_id)
            html = get_HTML(url)
            info = Decode(html)

            rec = demjson3.loads(info)
            rec = demjson3.loads(rec['pl'])
            '''
            for t in rec['playlist']:
                par.name    = '[COLOR FFC3FDB8]'+t['comment'].encode('utf-8')+'[/COLOR]'
                par.url     = par.url+'|'+t['file']
                #-- add movie to the list ------------------------------------------
                i = xbmcgui.ListItem(name, iconImage=par.img, thumbnailImage=par.img)
                u = sys.argv[0] + '?mode=PLAY' + get_Filter(fname = mi.title, url = par.url+'|'+mi.url)
                xbmcplugin.addDirectoryItem(h, u, i, False)
            '''
            for t in rec['playlist']:
                    name = '[COLOR FFC3FDB8]'+t['comment'].encode('utf-8')+'[/COLOR]'
                    mi.url   = t['file']

                    #-- add movie to the list ------------------------------------------
                    i = xbmcgui.ListItem(name, iconImage=par.img, thumbnailImage=par.img)
                    u = sys.argv[0] + '?mode=PLAY' + get_Filter(fname = mi.title, url = par.url+'|'+mi.url)

                    i.setInfo(type='video', infoLabels={'title':       mi.title,
                                                        'originaltitle': mi.alter,
                                						'year':        mi.year,
                                						'director':    mi.director,
                                						'plot':        mi.text,
                                						'country':     mi.country,
                                						'genre':       mi.genre})

                    #i.setProperty('fanart_image', mi.img)
                    xbmcplugin.addDirectoryItem(h, u, i, False)

        xbmcplugin.endOfDirectory(h)
예제 #18
0
def PLAY(params):
    # create play list
    pl = xbmc.PlayList(1)
    pl.clear()

    # -- parameters
    url = urllib.unquote_plus(params['url'])
    name = urllib.unquote_plus(params['name'])
    img = urllib.unquote_plus(params['img'])
    track = int(urllib.unquote_plus(params['track']))

    header = {
        'Host':
        urlparse(url).hostname,
        'Referer':
        'http://asbook.co/player/audio.swf',
        'User-Agent':
        'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)'
    }

    #------------------------------------------------
    html = get_URL(url)
    # -- parsing web page
    soup = BeautifulSoup(html, fromEncoding="windows-1251")

    #(------------- Change 12/03/2015 Evgenii S----------------------------------------
    #for j in soup.findAll('script', {'type':'text/javascript'}):
    #    if 'var flashvars = {' in j.text:
    #        pl = re.compile('var flashvars = {(.+?)}', re.MULTILINE|re.DOTALL).findall(j.text)
    #        b_url = pl[0].split(',')[1].replace('pl:','').replace('"','')
    #packed_flash_data = soup.find('div', {'class':'b-fullpost__player_wrapper clearfix'}).contents[1].text
    #unpacked_flash_data = eval('unpack' + packed_flash_data[packed_flash_data.find('}(')+1:-1])
    #b_url = re.compile("json_url=\\'(.+?)\'", re.MULTILINE|re.DOTALL).findall(unpacked_flash_data)[0]
    #-------------- End Change 12/03/2015 Evgenii S------------------------------------
    #(------------- Change 25/10/2017 rvlad1987----------------------------------------
    unpacked_flash_data = soup.find(
        'div', {
            'class': 'b-fullpost__player_wrapper clearfix'
        }).contents[1].text
    b_url = re.compile("json_url = \'(.+?)\'").findall(unpacked_flash_data)[0]
    #--------------End Change 25/10/2017 rvlad1987------------------------------------
    #------------------------------------------------
    html = get_URL(b_url)

    n = 0
    playlist = json.loads(html)
    for rec in playlist['playlist']:
        n += 1
        if track <= n:
            s_name = rec['comment'].encode('utf-8')
            s_url = rec['file'] + '|' + urllib.urlencode(header)

            i = xbmcgui.ListItem(s_name,
                                 path=urllib.unquote(s_url),
                                 thumbnailImage=img)
            i.setInfo(type='music',
                      infoLabels={
                          'title': s_name,
                          'tracknumber': str(n)
                      })
            pl.add(s_url, i)

    xbmc.Player().play(pl)
예제 #19
0
def Movie_Detail_List():
    #-- get movie detail
    url = par.url
    html = get_HTML(url).replace("<span class='heading'>",
                                 "|<span class='heading'>").replace(
                                     '<p>', '|Text:<p>')
    soup = BeautifulSoup(html,
                         fromEncoding="utf-8",
                         convertEntities=BeautifulSoup.HTML_ENTITIES)

    rec = soup.find('div', {'class': 'movie full clearfix'})
    mi.title = rec.find('div', {'class': 'title'}).text.encode('utf-8')
    mi.url = rec.find('div', {'class': 'title'}).find('a')['href']
    mi.alter = rec.find('div', {'class': 'alt-title'}).text.encode('utf-8')
    mi.img = rec.find('img')['src']
    for sp in rec.find('div', {'class': 'description'}).text.split('|'):
        info = sp.split(':', 1)
        try:
            if info[0].replace(' ', '') == u'Страна':
                mi.country = info[1].replace(',', ', ').encode('utf-8')
            elif info[0].replace(' ', '') == u'Жанры':
                mi.genre = info[1].replace(',', ', ').encode('utf-8')
            elif info[0].replace(' ', '') == u'Режиссеры':
                mi.director = info[1].replace(',', ', ').encode('utf-8')
            elif info[0].replace(' ', '') == u'Text':
                mi.text = hpar.unescape(info[1]).encode('utf-8')
            elif info[0].replace(' ', '') == u'Год':
                mi.year = int(info[1])
        except:
            pass

    movie_id = soup.find('div', {
        'class': 'player-wrapper'
    }).find('div', {'id': 'player'})['movie-id'].encode('utf-8')
    video_id = soup.find('div', {
        'class': 'player-wrapper'
    }).find('div', {'id': 'player'})['video-id'].encode('utf-8')

    try:
        if len(soup.find('div', {'id': 'season-switch-items'})) > 0:
            is_Serial = True
        else:
            is_Serial = False
    except:
        is_Serial = False

    if is_Serial == True:
        for rec in soup.find('div', {
                'id': 'season-switch-items'
        }).findAll('div', {'class': 'switch-item'}):
            season_name = rec.find('a').text
            season_id = rec.find('a').text.replace(u'сезон', '').replace(
                u'Сезон', '').replace(u' ', '').encode('utf-8')

            url = 'http://docu.im/movie/player/%s/playlist.txt?season=%s' % (
                movie_id, season_id)
            html = get_HTML(url)
            info = Decode(html)
            rec = demjson3.loads(info)
            try:
                rec = demjson3.loads(rec['pl'])
            except:
                pass

            for t in rec['playlist']:
                name = season_name.encode('utf-8') + '  [COLOR FFC3FDB8]' + t[
                    'comment'].encode('utf-8') + '[/COLOR]'
                mi.url = t['file']

                #-- add movie to the list ------------------------------------------
                i = xbmcgui.ListItem(name,
                                     iconImage=par.img,
                                     thumbnailImage=par.img)
                u = sys.argv[0] + '?mode=PLAY' + get_Filter(
                    fname=mi.title, url=par.url + '|' + mi.url)

                i.setInfo(type='video',
                          infoLabels={
                              'title': mi.title,
                              'originaltitle': mi.alter,
                              'year': mi.year,
                              'director': mi.director,
                              'plot': mi.text,
                              'country': mi.country,
                              'genre': mi.genre
                          })

                #i.setProperty('fanart_image', mi.img)
                xbmcplugin.addDirectoryItem(h, u, i, False)
        #xbmcplugin.endOfDirectory(h)
    else:
        url = 'http://docu.im/movie/player/%s/style.txt' % (movie_id)
        html = get_HTML(url)
        info = Decode(html)

        rec = demjson3.loads(info)
        rec = demjson3.loads(rec['pl'])
        '''
            for t in rec['playlist']:
                par.name    = '[COLOR FFC3FDB8]'+t['comment'].encode('utf-8')+'[/COLOR]'
                par.url     = par.url+'|'+t['file']
                #-- add movie to the list ------------------------------------------
                i = xbmcgui.ListItem(name, iconImage=par.img, thumbnailImage=par.img)
                u = sys.argv[0] + '?mode=PLAY' + get_Filter(fname = mi.title, url = par.url+'|'+mi.url)
                xbmcplugin.addDirectoryItem(h, u, i, False)
            '''
        for t in rec['playlist']:
            name = '[COLOR FFC3FDB8]' + t['comment'].encode(
                'utf-8') + '[/COLOR]'
            mi.url = t['file']

            #-- add movie to the list ------------------------------------------
            i = xbmcgui.ListItem(name,
                                 iconImage=par.img,
                                 thumbnailImage=par.img)
            u = sys.argv[0] + '?mode=PLAY' + get_Filter(
                fname=mi.title, url=par.url + '|' + mi.url)

            i.setInfo(type='video',
                      infoLabels={
                          'title': mi.title,
                          'originaltitle': mi.alter,
                          'year': mi.year,
                          'director': mi.director,
                          'plot': mi.text,
                          'country': mi.country,
                          'genre': mi.genre
                      })

            #i.setProperty('fanart_image', mi.img)
            xbmcplugin.addDirectoryItem(h, u, i, False)

    xbmcplugin.endOfDirectory(h)
예제 #20
0
파일: default.py 프로젝트: serbra/ru
def Get_PlayList(url, name):
    html = get_HTML(url)
    list = []
    # -- parsing web page --------------------------------------------------
    soup = BeautifulSoup(html, fromEncoding="windows-1251")
    # -- get movie info
    allResults = soup.findAll('param', attrs={'name': 'flashvars'})
	#xbmc.log('[NOWFILMS.RU] found links =%s' %allResults)
    for res in allResults:
        video = ''
        #xbmc.log('[NOWFILMS.RU] processing result=%s' %res)
        for rec in res['value'].split('&'):
            #xbmc.log('[NOWFILMS.RU] processing rec=%s' %rec)
            if rec.split('=',2)[0] == 'pl':
                video = rec.split('=',1)[1]
            if rec.split('=',2)[0] == 'file':
                video = rec.split('=',1)[1]
            #if rec.split('=',1)[0] == 'st':
                #video = rec.split('=',1)[1]
        if video <> '':
            if video[-3:] == 'txt':
                html = get_HTML(video)
                html = html.replace('\n', '')
                if html[0] <> '[' and html[-1] == ']':
                    html = html[:-1]
                pl = json.loads(html.decode('utf-8'))

                for rec in pl['playlist']:
                    try:
                        for rec1 in rec['playlist']:
                            lname = (rec['comment']+' - '+rec1['comment']).replace('<b>','').replace('</b>','')
                            list.append({'name': lname, 'url': rec1['file']})
                    except:
                        lname = rec['comment'].replace('<b>','').replace('</b>','')
                        list.append({'name': lname, 'url': rec['file']})
            else:
				list.append({'name': name.replace('<b>','').replace('</b>',''), 'url': video})

    #if len(list) == 0:

    for res in re.compile('varflashvars={(.+?)}', re.MULTILINE|re.DOTALL).findall(soup.find('div', {'id':"dengger"}).text.replace(' ','')):
        video = ''
        #res = res.replace('"', '')

        for rec in res.split('",'):
            rec = rec.replace('"', '')
            #-- movie
            if rec.split(':', 1)[0] == 'file':
                video = rec.split(':', 1)[1]
            #-- serial
            if rec.split(':', 1)[0] == 'pl':
                video = rec.split(':', 1)[1]

        if video <> '':
            if video[-3:] == 'txt':
                html = get_HTML(video)
                html = html[html.index('{'):]

                html = html.replace('\n', '')
                if html[0] <> '[' and html[-1] == ']':
                	html = html[:-1]

                pl = json.loads(html.decode('utf-8'))

                for rec in pl['playlist']:
                	try:
                		for rec1 in rec['playlist']:
                			list.append({'name': rec['comment'].replace('<b>','').replace('</b>','')+' - '+rec1['comment'].replace('<b>','').replace('</b>',''), 'url': rec1['file']})
                	except:
                		list.append({'name': rec['comment'].replace('<b>','').replace('</b>',''), 'url': rec['file']})
            else:
                for v in video.split(','):
                    list.append({'name': name.replace('<b>','').replace('</b>','') +' ('+ v.split('.')[-2]+')', 'url': v})

    return list