示例#1
0
	def channels(self):
		html = client.request(self.base)
		matches = webutils.bs(html).findAll('div',{'class':'match'})
		match_infos = webutils.bs(html).findAll('div',{'class':'match_info'})
		events = self.__prepare_schedule(matches,match_infos)

		html = client.request('http://www.sports-x.net/index.phplivetv', referer=self.base)
		channels = webutils.bs(html).find('article',{'class':'main'}).findAll('a')
		events += self.__prepare_channels(channels)
		return events
示例#2
0
	def content(self,url):
		html = client.request(url)
		html = convert.unescape(html.decode('utf-8'))
		text = webutils.bs(html).find('div',{'class':'entry-content'}).getText().encode('utf-8', 'xmlcharrefreplace')

		try:
			video = webutils.bs(html).find('div',{'class':'entry-content'}).find('iframe')['src']
			video = self.resolve(video)
		except:
			video = None

		return text,video
示例#3
0
def resolve(url):
	try:
		if '.mp4' in url:
			url = url.replace('https','http')
			url += '|%s' % urllib.urlencode({'User-agent':client.agent(),'X-requested-with':constants.get_shockwave()})
			return url
		if url.startswith('//'):
			url = 'http:' + url
		result = client.request(url)
		html = result
		result = json.loads(result)
		try:
			f4m=result['content']['media']['f4m']
		except:
			reg=re.compile('"src":"http://(.+?).f4m"')
			f4m=re.findall(reg,html)[0]
			f4m='http://'+pom+'.f4m'

		result = client.request(f4m)
		soup = webutils.bs(result)
		try:
			base=soup.find('baseURL').getText()+'/'
		except:
			base=soup.find('baseurl').getText()+'/'

		linklist = soup.findAll('media')
		link = linklist[0]
		url = base + link['url']
		return url.replace('https','http')
	except:
		return
示例#4
0
	def categories(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		tag = soup.find('div',{'class':'video_playlists_content clear_fix'})
		cats = tag.findAll('div')
		cats = self.__prepare_cats(cats)
		return cats
示例#5
0
def resolve(url):
	try:
		if url.startswith('//'):
			url = 'http:' + url
		result = client.request(url)
		html = result
		result = json.loads(result)
		try:
			f4m=result['content']['media']['f4m']
		except:
			reg=re.compile('"src":"http://(.+?).f4m"')
			f4m=re.findall(reg,html)[0]
			f4m='http://'+pom+'.f4m'

		result = client.request(f4m)
		soup = webutils.bs(result)
		try:
			base=soup.find('baseURL').getText()+'/'
		except:
			base=soup.find('baseurl').getText()+'/'

		linklist = soup.findAll('media')
		link = linklist[0]
		url = base + link['url']
		return url
	except:
		return
 def events(self,url):
     html = client.request(url)
     soup = webutils.bs(html)
     events = soup.find('table',{'class':'streams'}).findAll('tr')
     events.pop(0)
     events = self.__prepare_events(events)
     return events
示例#7
0
文件: nbahd.py 项目: Kawakiw/kawakodi
	def links(self,url, img=' '):
		html = client.request(url)
		soup = webutils.bs(html)
		tags=soup.find('div',{'class':'entry-content rich-content'}).findAll('p')
		tags.pop(0)
		out=[]
		tag=tags[0]
		parts=tag.findAll('a')
		i = 1
		for part in parts:
			url = part['href']
			title = 'Part %s'%i
			img = ' '
			i+=1
			out.append((title,url,img))

		if len(out)==0:
			links=re.findall('<p><img src="(.+?)"/>\s*</p>\s*<p>\s*<a href="(.+?)" target="_blank">\s*<img src=".+?"/></a>\s*<a href="(.+?)" target="_blank">\s*<img src=".+?"/></a>\s*<a href="(.+?)" target="_blank">\s*<img src=".+?"/></a>\s*<a href="(.+?)" target="_blank">\s*<img src=".+?"/></a>\s*',html)
			i = 1
			pos = 0
			for link in links:
				img = link[0]
				for i in range(4):
					url = link[i+1]
					title = 'Part %s'%(i+1)
					out.append((title,url,img))


		return out
示例#8
0
	def resolve(self,url):
		html = client.request(url)
		soup = webutils.bs(html)
		url = soup.find('iframe',{'width':'600'})['src']
		if 'nhl' in url:
			url = url.split("playlist=")[-1]
			url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % url
			result = client.request(url)
			url = re.compile('"publishPoint":"(.+?)"').findall(result)[0]
			return url
		elif 'rutube' in url:
			url = 'http:' + url
			result = client.request(url)
			m3u8 = re.compile('video_balancer&quot;: {.*?&quot;m3u8&quot;: &quot;(.*?)&quot;}').findall(result)[0]
			result = client.request(m3u8)
			url = re.compile('"\n(.+?)\n').findall(result)
			url = url[::-1]
			return url[0]
		elif 'youtube' in url:
			import liveresolver
			return liveresolver.resolve(url)
		else:
			import urlresolver
			url = urlresolver.resolve(url)
			return url
示例#9
0
文件: nbahd.py 项目: Kawakiw/kawakodi
	def resolve(self,url):
		html=client.request(url)
		soup=webutils.bs(html)
		try:
			link=soup.find('iframe',{'frameborder':'0'})['src']
		except:    
			sd = re.findall('<source src="(.+?)" type=\'video/mp4\' data-res="360p">',html)[0]
			try:
				hd = re.findall('<source src="(.+?)" type=\'video/mp4\' data-res="720p">',html)[0]
			except:
				hd = sd
			return hd

		if 'http' not in link:
			link = 'http://nbahd.com' + link
		try:
			html = client.request(link)
			urls = re.findall('src="(.+?)" type="video/mp4"',html)
			try: url = urls[1]
			except: url = urls[0]
			return url
		except:
				try:
					import urlresolver
					resolved = urlresolver.resolve(link)
					return resolved
				except:
					return
示例#10
0
文件: nbahd.py 项目: c0ns0le/YCBuilds
	def resolve(self,url):
		html=client.request(url)
		soup=webutils.bs(html)
		try:
			link=soup.find('iframe',{'frameborder':'0'})['src']
		except:    
			sd = url=re.findall('<source src=["\']([^"\']+)["\'] type=["\']video/mp4["\'] data-res=["\']360p',html)[0]
			try:
				hd = url=re.findall('<source src=["\']([^"\']+)["\'] type=["\']video/mp4["\'] data-res=["\']720p',html)[0]
			except:
				hd = sd
			return hd

		if 'http' not in link:
			link = 'http://nbahd.com' + link
		try:
			html=client.request(link)
			sd = url=re.findall('<source src=["\']([^"\']+)["\'] type=["\']video/mp4["\'] data-res=["\']360p',html)[0]
			try:
				hd = url=re.findall('<source src=["\']([^"\']+)["\'] type=["\']video/mp4["\'] data-res=["\']720p',html)[0]
			except:
				hd = sd
			return hd
		except:
				try:
					import urlresolver
					resolved = urlresolver.resolve(link)
					return resolved
				except:
					return
示例#11
0
	def channels(self,url):
		self.url = url
		html = client.request(url, referer=self.base)
		soup = webutils.bs(html)
		channels = soup.find('div',{'class':'categoryChannels'}).findAll('li')
		events = self.__prepare_channels(channels)
		return events
示例#12
0
	def next_page(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		try:
			next = soup.find('span',{'class':'page-numbers current'}).findNext('a')['href']
		except:
			next = None
		return next
示例#13
0
	def next_page(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		try:
			next_page=soup.find('div',{'class':'wp-pagenavi'}).find('a',{'class':'nextpostslink'})['href']
		except:
			next_page=None
		return next_page
示例#14
0
 def links(self,url):
     html = client.request(url)
     soup = webutils.bs(html)
     links = soup.findAll('tr')
     links.pop(0)
     links.pop(0)
     links = self.__prepare_links(links)
     return links
示例#15
0
 def links(self,url):
     result = client.request(self.base)
     soup = webutils.bs(result)
     table = soup.find('span',{'class': url})
     links = table.findAll('tr')
     links.pop(0)
     links = self.__prepare_links(links)
     return links
示例#16
0
 def links(self, url):
     result = client.request(self.base)
     soup = webutils.bs(result)
     table = soup.find("span", {"class": url})
     links = table.findAll("tr")
     links.pop(0)
     links = self.__prepare_links(links)
     return links
示例#17
0
 def channels(self,url):
     import requests
     html = requests.get(url).text
     soup = webutils.bs(html)
     soup = soup.find('table',{'class':'main'})
     events = soup.findAll('td',{'colspan':'2', 'height':'38'})
     events = self.__prepare_events(events)
     return events
 def links(self,url):
     ur = url.split('@')
     url, tag = ur[0], ur[1]
     html = client.request(url)
     soup = webutils.bs(html)
     ls = soup.find('div',{'id':'channel%s'%tag}).findAll('a')
     links=self.__prepare_links(ls)
     return links
示例#19
0
	def next_page(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		try:
			next = soup.find('span',{'class':'page-numbers current'}).findNext('a')['href']
			log("{} | Next page: {}".format(info().mode, next), info().debug)
		except:
			log("{} | Next page not found".format(info().mode), info().debug)
			next = None
		return next
	def resolve(self,url):
		if 'youtu' in url:
			import urlresolver
			return urlresolver.resolve(url)
		if 'playwire' not in url:
			ref = url
			headers = {'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8','X-Requested-With':'XMLHttpRequest','Referer':ref, 'Origin':self.base, 'Host':'fullmatchesandshows.com'}
			s = requests.Session()

			video_d = re.findall('id=(.+?(\d+))',url)[0]
			video_id = video_d[1]
			url = url.replace('?id=%s'%video_d[0],'')
			html = client.request(url)

			acp_pid = re.findall("id=[\"\']acp_post[\"\'].+?value=[\"\'](\d+)[\"\']",html)[0]
			post_data = {'acp_currpage' : video_id,
						'acp_pid' : acp_pid,
						'acp_shortcode' : 'acp_shortcode',
						'action' : 'pp_with_ajax' 
						}
			result = s.post(self.post_url, data=post_data, headers=headers).content
			url = 'http:' + re.findall('(\/\/config\.playwire\.com\/[^\'\"]+)',result)[0]


		result = client.request(url)
		result = json.loads(result)
		try:
			f4m=result['content']['media']['f4m']
		except:
			reg=re.compile('"src":"(http://.+?.f4m)"')
			f4m=re.findall(reg,html)[0]
			

		result = client.request(f4m)
		soup = webutils.bs(result)
		try:
			base=soup.find('baseURL').getText()+'/'
		except:
			base=soup.find('baseurl').getText()+'/'

		linklist = soup.findAll('media')
		choices,links=[],[]
		for link in linklist:
			url = base + link['url']
			bitrate = link['bitrate']
			choices.append(bitrate)
			links.append(url)
			if len(links)==1:
				return links[0]
			if len(links)>1:
				import xbmcgui
				dialog = xbmcgui.Dialog()
				index = dialog.select('Select bitrate', choices)
			if index>-1:
				return links[index]
示例#21
0
	def events(self):
		html = client.request('http://www.laola1.tv/en-int/live-schedule', referer=self.base)
		html = convert.unescape(html.decode('utf-8'))
		soup = webutils.bs(html)
		import datetime
		now = datetime.datetime.now()
		cl = 'list list-day day-%s-%02d-%02d'%(now.year,int(now.month),int(now.day))
		section = str(soup.find('ul',{'class':cl}))
		events = re.findall('<img.+?src=[\"\']([^\"\']+)[\"\'].+\s*.+\s*.+\s*<a.+?href=[\"\']([^\"\']+)[\"\'].+\s*<h3>([^<]+)<.+\s*<h2>([^<]+)<.+\s*.+\s*.+\s*.+\s*.+\s*.+data-streamstart=[\"\']([^\"\']+)[\"\']',section)
		events = self.__prepare_events(events)
		return events
示例#22
0
	def next_page(self):
		
		try:
			html = client.request(self.url)
			soup = webutils.bs(html)
			next = soup.find('a',{'class':'last'}).findNext('a')['href']
		except:
			next = None
		return next

#<a href="#" class="td-ajax-next-page" id="next-page-td_uid_1_56cadcda8cbf3" data-td_block_id="td_uid_1_56cadcda8cbf3">
示例#23
0
 def channels(self):
     html = client.request(self.base)
     soup = webutils.bs(html)
     channels = soup.find('div',{'id':'chanels'}).findAll('li')
     events = []
     for c in channels:
        url = self.base + c.find('a')['href'] 
        title = c.getText()
        events.append((url,title,control.icon_path(info().icon)))
     events.sort(key=lambda x: x[1])
     return events
示例#24
0
文件: hrt.py 项目: kevintone/tdbaddon
	def categories(self):
		out=[]
		html = client.request(self.url)
		shows = client.parseDOM(html,'div', attrs={'class':'all_shows'})
		shows = client.parseDOM(shows, 'a',ret='href')
		for show in shows:
			soup = webutils.bs(show)
			url = 'http:' + soup.find('a')['href']
			title = soup.find('a').getText()
			out.append((url,title.encode('utf-8'),icon_path(info().icon)))
		return out
示例#25
0
	def items(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		items=soup.findAll('div',{'class':'thumb'})
		out=[]
		for item in items:
			url = item.find('a')['href']
			title=item.find('a')['title'].encode('utf-8')
			title = re.sub('<[^>]*>','',title)
			out+=[[title,url,icon_path(info().icon)]]

		return out
示例#26
0
 def next_page(self):
     html = client.request(self.url)
     soup = webutils.bs(html)
     try:
         next = soup.find('div', {
             'class': 'navigation'
         }).find('span', {
             'class': 'current'
         }).findNext('a')['href']
     except:
         next = None
     return next
示例#27
0
	def categories(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		items = soup.find('select', {'name':'category'}).findAll('option')
		cats = []
		ic = info().icon
		for item in items:
			name = item['value']
			url = self.base + urllib.quote(name)
			if name =='': name = 'All'
			cats.append((url, name, ic))
		return cats
示例#28
0
	def items(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		items=soup.findAll('div',{'class':'thumb'})
		out=[]
		for item in items:
			url = item.find('a')['href']
			title=item.find('a')['title'].encode('utf-8')
			title = re.sub('<[^>]*>','',title)
			out+=[[title,url,icon_path(info().icon)]]

		return out
示例#29
0
	def items(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		items = soup.find('div',{'id':'cat-container'}).findAll('li')
		out = []
		for item in items:
			url = item.find('a')['href']
			title = item.find('a')['title'].replace('Full Match: ','').encode('utf-8')
			img = item.find('img')['src']
			out.append((title,url,img))

		return out
示例#30
0
	def channels(self, url):
		if url == '#schedule':
			self.url = url
			return self.schedule()
		elif url == '#favourites':
			self.url = self.favourites_url
			return self.favourites()
		self.url = url.replace(' ','%20')
		html = client.request(self.url,referer=self.base)
		channels = webutils.bs(html).findAll('li')
		events = self.__prepare_channels(channels)
		return events
示例#31
0
 def next_page(self):
     html = client.request(self.url)
     soup = webutils.bs(html)
     try:
         next = soup.find('span', {
             'class': 'page-numbers current'
         }).findNext('a')['href']
         log("{} | Next page: {}".format(info().mode, next), info().debug)
     except:
         log("{} | Next page not found".format(info().mode), info().debug)
         next = None
     return next
示例#32
0
 def categories(self):
     html = client.request(self.url)
     soup = webutils.bs(html)
     items = soup.find('select', {'name': 'category'}).findAll('option')
     cats = []
     ic = info().icon
     for item in items:
         name = item['value']
         url = self.base + urllib.quote(name)
         if name == '': name = 'All'
         cats.append((url, name, ic))
     return cats
示例#33
0
	def categories(self):
		html = self.session.get(self.url).text
		soup = webutils.bs(html)
		items = soup.find('select', {'name':'category'}).findAll('option')
		cats = []
		ic = info().icon
		for item in items:
			name = item['value']
			url = self.base + name
			if name =='': name = 'All'
			cats.append((url, name, ic))
		return cats
示例#34
0
    def next_page(self):

        try:
            html = client.request(self.url)
            soup = webutils.bs(html)
            next = soup.find('a', {'class': 'last'}).findNext('a')['href']
        except:
            next = None
        return next


#<a href="#" class="td-ajax-next-page" id="next-page-td_uid_1_56cadcda8cbf3" data-td_block_id="td_uid_1_56cadcda8cbf3">
示例#35
0
文件: nbahd.py 项目: Kawakiw/kawakodi
	def items(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		items=soup.findAll('div',{'class':'thumb'})
		out=[]
		for item in items:
			url=self.base + item.find('a')['href']
			title=item.find('a')['title'].encode('utf-8')
			thumb=item.find('img')['src'].encode('utf-8')

			out+=[[title,url,thumb]]

		return out
示例#36
0
	def items(self):
		html = client.request(self.url)
		html = convert.unescape(html.decode('utf-8'))
		soup = webutils.bs(html)
		items = soup.find('div',{'id':'cat-container'}).findAll('li')
		out = []
		for item in items:
			url = item.find('a')['href']
			title = item.find('a')['title'].replace('Full Match: ','').encode('utf-8', 'xmlcharrefreplace')
			img = item.find('img')['src']
			out.append((title,url,img))

		return out
示例#37
0
 def items(self):
     out = []
     html = client.request(self.url)
     soup = webutils.bs(html)
     tag = soup.find('div', {'data-ajax-param': 'Epizode'})
     eps = tag.findAll('article')
     out = []
     for ep in eps:
         url = ep.find('a')['href']
         thumb = ep.find('img')['src']
         title = ep.find('h1').getText().encode('utf-8')
         out += [[title, url, thumb]]
     return out
示例#38
0
    def items(self):
        html = client.request(self.url)
        soup = webutils.bs(html)
        items = soup.findAll('div', {'class': 'thumb'})
        out = []
        for item in items:
            url = item.find('a')['href']
            title = item.find('a')['title'].encode('utf-8')
            thumb = item.find('img')['src'].encode('utf-8')

            out += [[title, url, thumb]]

        return out
示例#39
0
    def resolve(self, url):
        html = client.request(url)
        soup = webutils.bs(html)
        video = soup.find('iframe')['src']
        if 'youtube' in video:
            yt_id = self.yt_video_id(video)
            l = 'http://www.youtube.com/watch?v=' + yt_id

            import YDStreamExtractor
            YDStreamExtractor.disableDASHVideo(True)
            vid = YDStreamExtractor.getVideoInfo(l, quality=1)
            resolved = vid.streamURL()
            return resolved
示例#40
0
    def items(self):
        out = []
        urls = []
        img = control.icon_path(info().icon)
        html = client.request(self.url)
        if html is None:
            return []
        links = webutils.bs(html).find('div', {'id': 'primary'}).findAll('a')
        for l in links:
            a = re.findall('201[56]/\d\d/', l['href'])
            if len(a) != 0:
                out.append((l.getText(), l['href'], img))

        return out
示例#41
0
 def items(self):
     out = []
     html = client.request(self.url)
     soup = webutils.bs(html)
     lis = soup.findAll('li')
     for li in lis:
         try:
             thumb = li.find('img')['src']
             url = self.base + li.findAll('a')[1]['href']
             title = li.find('h2').getText().encode('utf-8')
             out.append([title, url, thumb])
         except:
             pass
     return out
示例#42
0
	def links(self,url):
		out=[]
		html = client.request(url)
		soup = webutils.bs(html)
		table = soup.find('table',{'align':'center', 'width':'96%', 'cellpadding':'1','cellspacing':'1'})
		links = table.findAll('td',{'width':'33%'})
		for link in links:
			url = self.base + link.find('a')['href']
			title = link.findAll('a')[1].find('b').getText()
			img = link.find('img')['src']
			out.append((title,url,img))


		return out
示例#43
0
    def events(self):
        out = []
        import requests
        s = requests.session()
        html = s.get(self.url).text
        soup = webutils.bs(html)
        articles = soup.findAll('article', {'class': 'post hentry'})
        for article in articles:
            s = article.findAll('img')[1]
            title = s['alt']
            img = s['src']
            url = article.find('h2').find('a')['href']
            out.append((url, title, img))

        return out
示例#44
0
    def items(self):
        html = client.request(self.url)
        soup = webutils.bs(html)
        items = soup.find('div', {'id': 'archive'}).findAll('li')
        out = []
        for item in items:
            try:
                title = item.find('div', {'class': 'cover'}).find('a')['title']
                url = item.find('div', {'class': 'cover'}).find('a')['href']
                img = item.find('div', {'class': 'cover'}).find('img')['src']
                out.append((title, url, img))
            except:
                pass

        return out
示例#45
0
    def items(self):
        html = client.request(self.url)
        soup = webutils.bs(html)
        out = []
        items = soup.find('div', {'id': 'archive-area'})
        items = items.findAll('li')
        for item in items:
            url = item.find('a')['href']
            title = item.find('a')['title']
            title = title.split('/')
            title = '%s (%s)' % (title[-1].strip(), title[-2].strip())
            img = item.find('img')['srcset'].split(',')[0].replace(' 300w', '')
            item = (title, url, img)
            out.append(item)

        return out
示例#46
0
	def channels(self,index):
		index = int(index)
		img = control.icon_path(info().icon)
		events = []
		html = client.request('http://af-proxy.appspot.com/arconaitv.me')
		lis = webutils.bs(html).findAll('ul',{'id':'mega_main_menu_ul'})[0]
		lis = lis.findNext('li')
		if index > 0:
			lis = lis.findNextSibling('li')
		if index > 1:
			lis = lis.findNextSibling('li')
		lis = lis.findAll('a')
		for li in lis:
			if li.getText()[1] != '-' :
				events.append((li['href'],li.getText(),img))
		events.sort(key=lambda x: x[1])
		return events
示例#47
0
    def items(self):
        out = []
        html = client.request(self.base)
        items = webutils.bs(html).findAll('p', {'class': 'MsoNormal'})
        words = [
            'thank', 'full', 'chrome', '&nbsp;', 'page', 'contact', 'you must?'
        ]
        for i in items:
            item = i.getText()
            if len(item) > 50 or any(w in item.lower() for w in words):
                continue
            if '(' in item:
                item = '[B][COLOR orange]%s[/COLOR][/B]' % item

            out.append((item, item, control.control.icon_path(info().icon)))

        return out
	def resolve(self,url):
		ref = url
		headers = {'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8','X-Requested-With':'XMLHttpRequest','Referer':ref, 'Origin':'http://thefootballcouch.com', 'Host':'thefootballcouch.com'}
		s = requests.Session()

		video_id = re.findall('id=(\d+)',url)[0]
		url = url.replace('?id=%s'%video_id,'')
		html = client.request(url)
		post_id = re.findall("action: 'playwirevideos',\s*postid:[\'\"](.+?)[\'\"]",html)[0]
		post_data = "action=playwirevideos&postid=%s&serialid=%s"%(post_id,video_id)
		result = s.post('http://thefootballcouch.com/wp-admin/admin-ajax.php', data=post_data, headers=headers).content
		url = 'http:' + re.findall('(\/\/config\.playwire\.com\/[^\'\"]+)',result)[0]


		result = client.request(url)
		result = json.loads(result)
		try:
			f4m=result['content']['media']['f4m']
		except:
			reg=re.compile('"src":"(http://.+?.f4m)"')
			f4m=re.findall(reg,html)[0]
			

		result = client.request(f4m)
		soup = webutils.bs(result)
		try:
			base=soup.find('baseURL').getText()+'/'
		except:
			base=soup.find('baseurl').getText()+'/'

		linklist = soup.findAll('media')
		choices,links=[],[]
		for link in linklist:
			url = base + link['url']
			bitrate = link['bitrate']
			choices.append(bitrate)
			links.append(url)
			if len(links)==1:
				return links[0]
			if len(links)>1:
				import xbmcgui
				dialog = xbmcgui.Dialog()
				index = dialog.select('Select bitrate', choices)
			if index>-1:
				return links[index]
示例#49
0
	def items(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		items = soup.find('div',{'class':'listhighlights'}).findAll('li')
		out = []
		for item in items:
			date = addon.unescape(item.find('div',{'class':'date_time column'}).getText())
			url = item.find('div',{'class':'play_btn column'}).find('a')['href']
			home = addon.unescape(item.find('div',{'class':'team home column'}).getText())
			away = addon.unescape(item.find('div',{'class':'team column'}).getText())
			result = addon.unescape(item.find('div',{'class':'result column'}).getText())
			league = addon.unescape(item.find('div',{'class':'leaguelogo column'}).find('a')['title'])
			title  = '%s (%s) [B]%s %s %s[/B]'%(date,league,home,result,away)
			import HTMLParser
			title = HTMLParser.HTMLParser().unescape(title).encode('utf-8')
			out.append((title,url,info().icon))

		return out
示例#50
0
    def events(self):
        out = []

        html = self.s.get(self.url).text
        soup = webutils.bs(html)
        chs = soup.findAll('tr', {'class': 'vevent'})
        for c in chs:
            country = c.find('td', {'class': 'competition'}).getText().strip()
            inf = c.find('a', {'class': 'url summary'})
            link = self.base + inf['href']
            ch = inf['title'].strip()
            img = c.find('td', {'class': 'event'}).find('img')['src']
            if img.startswith('//'):
                img = 'https:' + img

            title = u'(%s) %s' % (country, ch)
            if cache.get(self.links, 2000, link) != []:
                out.append((link, title, img))
        return out
示例#51
0
    def resolve(self, url):
        try:
            result = client.request(url)
            html = result
            url = 'http:' + re.findall('(\/\/config\.playwire\.com\/[^\'\"]+)',
                                       html)[0]
            result = client.request(url)

            result = json.loads(result)
            try:
                f4m = result['content']['media']['f4m']
            except:
                reg = re.compile('"src":"http://(.+?).f4m"')
                f4m = re.findall(reg, html)[0]
                f4m = 'http://' + pom + '.f4m'

            result = client.request(f4m)
            soup = webutils.bs(result)
            try:
                base = soup.find('baseURL').getText() + '/'
            except:
                base = soup.find('baseurl').getText() + '/'

            linklist = soup.findAll('media')
            choices, links = [], []
            for link in linklist:
                url = base + link['url']
                bitrate = link['bitrate']
                choices.append(bitrate)
                links.append(url)
                if len(links) == 1:
                    return links[0]
                if len(links) > 1:
                    import xbmcgui
                    dialog = xbmcgui.Dialog()
                    index = dialog.select('Select bitrate', choices)
                if index > -1:
                    return links[index]
            return

        except:
            return
示例#52
0
    def events(self):
        out = []

        html = requests.get(self.base).text
        leagues = webutils.bs(html).findAll('div', {'class': 'competition'})

        out = []

        for l in leagues:
            name = l.find('div', {'class': 'info'}).getText().strip()
            league_name = u'[COLOR blue]► {}[/COLOR]'.format(name)
            out.append(('x', league_name))
            events = l.findAll('div', {'class': 'col-md-6 col-sm-6'})
            for e in events:
                url = e.find('a')['href']
                names = re.sub('-live-stream[^\/$]*\/?', '', url)
                names = names.split('/')[-1].split('vs')
                divs = e.find('div', {
                    'class': 'match'
                }).findAll('div', {'class': 'team-name'})
                live = e.find('div', {'class': 'status live-indicator'})
                homeName = names[0].replace('-', ' ').title()
                awayName = names[1].replace('-', ' ').title()
                if live:
                    scores = e.findAll('div', {'class': 'score'})
                    home = scores[0].getText()
                    away = scores[1].getText()
                    title = u'([COLOR red]LIVE[/COLOR]) {} [B]{}[/B] - [B]{}[/B] {} | {}'.format(
                        homeName, home, away, awayName, live.getText())
                else:
                    time = e.find('div', {'class': 'status'}).getText()
                    try:
                        time = self.convert_time(time)
                    except:
                        time = None
                    if time is None:
                        continue
                    title = u'({}) [B]{} - {}[/B]'.format(
                        time, homeName, awayName)
                out.append((url, title))
        return out
示例#53
0
    def links(self, url):
        html = requests.get(url).text
        soup = webutils.bs(html)
        try:
            img = soup.find('meta', {'property': 'og:image'})['content']
        except:
            img = control.icon_path(info().icon)
        out = []

        videos = soup.findAll('a', {'class': 'link-iframe'})
        for v in videos:
            url = v['href']
            title = v.getText()
            img = info().icon
            out.append((url, title, img))

        if len(out) == 0:
            ifr = re.findall('iframe.+?src\s*=\s*[\"\']([^\"\']+)', html)[0]
            out.append((ifr, 'Link 1', img))

        return out
示例#54
0
    def items(self):
        html = client.request(self.url)
        soup = webutils.bs(html)
        items = soup.find('div', {'id': 'video_rows'}).findAll('div')
        out = []
        for item in items:
            try:
                url = self.base + item.find('a')['href']
                title = item.find('div', {
                    'class': 'video_row_info_name'
                }).getText().strip().replace('NFL 2015-2016 /',
                                             '').encode('utf-8')
                img = re.findall("background-image: url\('(.+?)'\);",
                                 str(item))[0]
                item = (title, url, img)
                if item not in out:
                    out.append(item)
            except:
                pass

        return out
示例#55
0
    def _links(self, url):
        out = []
        out2 = []
        html = requests.get(url, headers={'Referer': self.base}).text
        id = re.findall('streamsmatchid\s*=\s*(\d+)\;', html, flags=re.I)[0]

        uri = 'https://sportscentral.io/streams-table/{}/basketball?new-ui=1&origin=nbabite.com'.format(
            id)
        html = requests.get(uri,
                            headers={
                                'user-agent': constants.USER_AGENT,
                                'referer': url
                            }).text
        soup = webutils.bs(html).find('table',
                                      {'class': 'table streams-table-new'})

        try:
            rows = soup.findAll('tr')
        except:
            return []
        rows.pop(0)
        titles = {}
        for r in rows:
            h = r.findAll('td')
            title = '{} {} ({})'.format(h[7].getText().strip(),
                                        h[4].getText().strip(),
                                        h[5].getText().strip())
            url = r['data-stream-link']
            titles[url] = title

            out.append((url, title))

        links = [u[0] for u in out]
        ret = linkSearch.getLinks(links)
        out2 = []
        for u in ret:
            out2.append((u, titles[u]))

        return out2
示例#56
0
	def channels(self):
		html = client.request('http://www.streamhd.eu/tv/')
		channels = re.findall('<a href=[\"\'](/[^\"\']+)[\"\']>	<img.+?alt=[\"\'](.+?)\s*Live Stream[\"\'].+?src=[\"\']data:image/png;base64',html)
		out = self.__prepare_channels(channels)
		html = client.request(self.base)
		html = client.request(self.base)
		soup = webutils.bs(html)
		rows = soup.find('table',{'class':'table table-hover table-condensed table-striped'}).find('tbody').findAll('tr')
		for row in rows:
			tds = row.findAll('td')
			time = self.convert_time(tds[0].getText().replace('GMT','').strip())
			sport = tds[1].getText().strip()
			sub = tds[3].getText().strip()
			match = tds[4].getText().strip()
			url = self.base + tds[4].findAll('a')[0]['href']
			if sport!=sub:
				sport += '-%s'%sub

			title = '[COLOR orange](%s)[/COLOR] (%s) [B]%s[/B]'%(time,sport,match)
			out.append((url,title,control.icon_path(info().icon)))
		
		return out
示例#57
0
    def resolve(self, url):
        html = client.request(url)
        soup = webutils.bs(html)

        try:
            rreg = '<param name="playerID" value="(.+?)" /><param name="@videoPlayer" value="(.+?)" />'
            ids = re.compile(rreg).findall(html)[0]
            l = 'http://c.brightcove.com/services/viewer/federated_f9?&width=690&height=388&flashID=bcExperienceObj0&bgcolor=%23FFFFFF&ConvivaConfig.events=%20%3CConvivaEventsMap%3E%20%3Cevent%20name%3D%22mediaPlay%22%20module%3D%22videoPlayer%22%20eventType%3D%22mediaPlay%22%3E%20%3C%2Fevent%3E%20%3Cevent%20name%3D%22mediaError%22%20module%3D%22videoPlayer%22%20eventType%3D%22mediaError%22%3E%20%3Cattr%20key%3D%22errorCode%22%20type%3D%22eventField%22%20value%3D%22code%22%2F%3E%20%3C%2Fevent%3E%20%3C%2FConvivaEventsMap%3E%20&playerID=' + ids[
                0] + '&%40videoPlayer=' + ids[
                    1] + '&isVid=true&isUI=true&playerKey=AQ~~%2CAAAA1DWaSzk~%2CCZSkTksiPhQqITLsbi03E4KbEIFdh_zL'

        except:
            link = soup.find('iframe',
                             {'class': 'media-youtube-player'})['src']
            yt_id = self.yt_video_id(link)
            l = 'http://www.youtube.com/watch?v=' + yt_id

        import YDStreamExtractor
        YDStreamExtractor.disableDASHVideo(True)
        vid = YDStreamExtractor.getVideoInfo(l, quality=1)
        resolved = vid.streamURL()
        return resolved
示例#58
0
	def events(self,url):
		out = []
		self.s.headers.update({'referer': self.base})
		html = self.s.get(url).text
		
		soup = webutils.bs(html)
		table = soup.find('div', {'class':'col-12 col-md-9'})
		events = table.findAll('a')
		for event in events:
			title = event['title']
			url  = self.base + event['href']
			try:
				time = event.findAll('span')[1]['content']
			except:
				time = None
			if time:
				t1, t2 = time.split('T')
				time = t2
				year, month, day = t1.split('-')
				time, stamp = self.convert_time(time, year, month, day)
				title = u"({}) [B]{}[/B]".format(time, title)

			out.append((url, title))
		return out
示例#59
0
	def resolve(self,url):
		ref = url
		html = client.request(url)
		soup = webutils.bs(html)
		try:
			url = soup.find('iframe',{'width':'600'})['src']
		except:
			try:
				url = 'http:' + re.findall('(\/\/config\.playwire\.com\/[^\'\"]+)',html)[0]
			except:
				return
		if 'nhl' in url:
			url = url.split("playlist=")[-1]
			url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % url
			result = client.request(url)
			url = re.compile('"publishPoint":"(.+?)"').findall(result)[0]
			return url
		elif 'rutube' in url:
			url = re.findall('embed/(\d+)',url)[0]
			url = 'http://rutube.ru/api/play/options/'+url+'?format=json'
			result = client.request(url)
			jsx = json.loads(result)
			link = jsx['video_balancer']['m3u8']
			return link
		
		elif 'mail.ru' in url:
			link=url

			link = link.replace('https://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
			link = link.replace('http://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
			link = link.replace('html','json')
			s = requests.Session()
			f = s.get(link).text

			js = json.loads(f)
			token = s.cookies.get_dict()['video_key']
			url = js['videos'][-1]['url'] + '|%s'%(urllib.urlencode({'Cookie':'video_key=%s'%token, 'User-Agent':client.agent(), 'Referer':ref} ))
			return url

		elif 'youtube' in url:
			import liveresolver
			return liveresolver.resolve(url)
		elif 'playwire' in url:
			try:
				result = client.request(url)
				html = result
				result = json.loads(result)
				try:
					f4m=result['content']['media']['f4m']
				except:
					reg=re.compile('"src":"http://(.+?).f4m"')
					f4m=re.findall(reg,html)[0]
					f4m='http://'+pom+'.f4m'

				result = client.request(f4m)
				soup = webutils.bs(result)
				try:
					base=soup.find('baseURL').getText()+'/'
				except:
					base=soup.find('baseurl').getText()+'/'

				linklist = soup.findAll('media')
				choices,links=[],[]
				for link in linklist:
					url = base + link['url']
					bitrate = link['bitrate']
					choices.append(bitrate)
					links.append(url)
				if len(links)==1:
					return links[0]
				if len(links)>1:
					import xbmcgui
					dialog = xbmcgui.Dialog()
					index = dialog.select('Select bitrate', choices)
					if index>-1:
						return links[index]
				return
			except:
				return
		else:
			import urlresolver
			url = urlresolver.resolve(url)
			return url