예제 #1
0
	def channels(self):
		html = client.request(self.url, referer=self.base)
		html = convert.unescape(html.decode('utf-8'))		
		channels=re.compile('<strong>(.+?)</strong></a></td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>').findall(html)
		events = self.__prepare_channels(channels)

		html = client.request('http://www.livefootballol.com/sopcast-channel-list.html', referer=self.base)
		html = convert.unescape(html.decode('utf-8'))

		channels=re.compile('<strong>(.+?)</strong></a></td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>\s*<td>(.+?)</td>').findall(html)
		events = self.__prepare_channels(channels, ev=events)
		events.sort(key=lambda x: x[1])
		return events
	def links(self,url):
		out = []
		html = client.request(url)
		links = re.findall('<option value=[\"\']([^\"\']+)[\"\']>([^<]+)<',html)
		for link in links:
			out.append((link[1],convert.unescape(link[0]),control.icon_path(info().icon)))
		return out
예제 #3
0
 def events(self,url):
     html = client.request(url)
     html = convert.unescape(html.decode('utf-8'))
     #events = webutils.bs(html).findAll('div',{'class':'lshpanel'})
     events = client.parseDOM(html, 'div',attrs={'class':'lshpanel'})
     events = self.__prepare_events(events)
     return events
예제 #4
0
 def events(self,url):
     html = client.request(url)
     html = convert.unescape(html.decode('utf-8'))
     #events = webutils.bs(html).findAll('div',{'class':'lshpanel'})
     events = client.parseDOM(html, 'div',attrs={'class':'lshpanel'})
     events = self.__prepare_events(events)
     return events
예제 #5
0
 def events(self,url):
     result = client.request(url)
     result = convert.unescape(result.decode('utf-8'))
     reg = re.compile('(<h1 class="leagues".+?align="left"[^>]*>\s*<a([^>]*)>\s*<span class="[^"]+ ([^"]*)">[^>]*><span gday="[^"]*" class="matchtime">([\d:]*)</span>\s*([^<]+)\s*</a>\s*</h1>)')
     events = re.findall(reg,result)
     events = self.__prepare_events(events)
     return events
예제 #6
0
 def events(self):
     html = client.request(self.url, referer=self.base)
     html = convert.unescape(html.decode('utf-8'))
     events = re.findall(
         '<!-- blog headline-->\s*<h2><a href=[\"\']([^\"\']+)[\"\'] rel=[\"\']bookmark[\"\']>([^<]+)<',
         html)
     events = self.__prepare_events(events)
     return events
예제 #7
0
 def articles(self):
     html = client.request(self.base, referer=self.base)
     html = convert.unescape(html.decode('utf-8'))
     items = re.findall(
         'div class="widget-cat-img">\s*<a href="(.+?)" rel="bookmark" title="(.+?)"><img.+?lazy-src="(.+?)"',
         html)
     items = self.__prepare_articles(items)
     return items
예제 #8
0
 def events(self, url):
     result = client.request(url)
     result = convert.unescape(result.decode('utf-8'))
     reg = re.compile(
         '(<h1 class="leagues".+?align="left"[^>]*>\s*<a([^>]*)>\s*<span class="[^"]+ ([^"]*)">[^>]*><span gday="[^"]*" class="matchtime">([\d:]*)</span>\s*([^<]+)\s*</a>\s*</h1>)'
     )
     events = re.findall(reg, result)
     events = self.__prepare_events(events)
     return events
예제 #9
0
	def items(self):
		html = client.request(self.url)
		soup = webutils.bs(html)
		items = soup.find('div',{'class':'listhighlights'}).findAll('li')
		out = []
		for item in items:
			date = convert.unescape(item.find('div',{'class':'date_time column'}).getText())
			url = item.find('div',{'class':'play_btn column'}).find('a')['href']
			home = convert.unescape(item.find('div',{'class':'team home column'}).getText())
			away = convert.unescape(item.find('div',{'class':'team column'}).getText())
			result = convert.unescape(item.find('div',{'class':'result column'}).getText())
			league = convert.unescape(item.find('div',{'class':'leaguelogo column'}).find('a')['title'])
			title  = '%s (%s) [B]%s %s %s[/B]'%(date,league,home,result,away)
			import HTMLParser
			title = HTMLParser.HTMLParser().unescape(title).encode('utf-8')
			out.append((title,url,info().icon))

		return out
예제 #10
0
 def links(self, url):
     out = []
     html = client.request(url)
     links = re.findall('<option value=[\"\']([^\"\']+)[\"\']>([^<]+)<',
                        html)
     for link in links:
         out.append((link[1], convert.unescape(link[0]),
                     control.icon_path(info().icon)))
     return out
예제 #11
0
	def events(self):
		html = client.request('http://www.laola1.tv/en-int/live-schedule', referer=self.base)
		html = convert.unescape(html.decode('utf-8'))
		soup = webutils.bs(html)
		import datetime
		now = datetime.datetime.now()
		cl = 'list list-day day-%s-%02d-%02d'%(now.year,int(now.month),int(now.day))
		section = str(soup.find('ul',{'class':cl}))
		events = re.findall('<img.+?src=[\"\']([^\"\']+)[\"\'].+\s*.+\s*.+\s*<a.+?href=[\"\']([^\"\']+)[\"\'].+\s*<h3>([^<]+)<.+\s*<h2>([^<]+)<.+\s*.+\s*.+\s*.+\s*.+\s*.+data-streamstart=[\"\']([^\"\']+)[\"\']',section)
		events = self.__prepare_events(events)
		return events
예제 #12
0
 def __prepare_channels(self, channels):
     img = control.icon_path(info().icon)
     new = []
     for c in channels:
         url = self.base + c[0]
         title = convert.unescape(c[1])
         if '.ts' in url:
             url = 'plugin://plugin.video.f4mTester/?streamtype=TSDOWNLOADER&url=%s' % (
                 urllib.quote(url))
         new.append((url, title.encode('utf-8'), img))
     return new
예제 #13
0
 def __prepare_channels(self, urls, titles, imgs, epgs):
     new = []
     for i in range(len(urls)):
         url = urls[i]
         img = imgs[i]
         title = titles[i].decode('utf-8')
         epgx = self.get_epg(epgs[i])
         title = '[B]%s[/B] - [I][COLOR green]%s[/COLOR][/I]' % (title,
                                                                 epgx)
         title = convert.unescape(title)
         new.append((url, title.encode('utf-8'), img))
     return new
예제 #14
0
	def content(self,url):
		html = client.request(url)
		html = convert.unescape(html.decode('utf-8'))
		text = webutils.bs(html).find('div',{'class':'entry-content'}).getText().encode('utf-8', 'xmlcharrefreplace')

		try:
			video = webutils.bs(html).find('div',{'class':'entry-content'}).find('iframe')['src']
			video = self.resolve(video)
		except:
			video = None

		return text,video
예제 #15
0
	def items(self):
		html = client.request(self.url)
		html = convert.unescape(html.decode('utf-8'))
		items= re.findall('<a class=[\"\']cvp-ctr-track[\"\'].+?href=[\"\'](.+?)[\"\'] title=[\"\'](.+?)[\"\']\s*>\s*<img src=[\"\'](.+?)[\"\']',html)
		out=[]
		for item in items:
			url = item[0]
			title=item[1].replace('-Full Game Replay-','-').replace('Full Game Replay HD Quality','-').replace('Replay HD Quality','-')
			img = item[2]
			out+=[[title.encode('utf-8'),url,img]]

		return out
예제 #16
0
	def items(self):
		html = client.request(self.url)
		html = convert.unescape(html.decode('utf-8'))
		items= re.findall('<a class=[\"\']cvp-ctr-track[\"\'].+?href=[\"\'](.+?)[\"\'] title=[\"\'](.+?)[\"\']\s*>\s*<img src=[\"\'](.+?)[\"\']',html)
		out=[]
		for item in items:
			url = item[0]
			title=item[1].replace('-Full Game Replay-','-').replace('Full Game Replay HD Quality','-').replace('Replay HD Quality','-')
			img = item[2]
			out+=[[title.encode('utf-8'),url,img]]

		return out
예제 #17
0
 def links(self, url):
     out = []
     html = client.request(url)
     links = re.findall(
         '(?:<.+?>)+([A-Z][^:<]+).+?\s*(?:.+?)?<iframe.+?src=[\"\']([^\"\']+)[\"\']',
         html)
     i = 1
     for link in links:
         out.append((convert.unescape(link[0]).encode('utf-8'), link[1],
                     control.icon_path(info().icon)))
         i += 1
     return out
예제 #18
0
	def items(self):
		html = client.request(self.url)
		html = convert.unescape(html.decode('utf-8'))
		soup = webutils.bs(html)
		items = soup.find('div',{'id':'cat-container'}).findAll('li')
		out = []
		for item in items:
			url = item.find('a')['href']
			title = item.find('a')['title'].replace('Full Match: ','').encode('utf-8', 'xmlcharrefreplace')
			img = item.find('img')['src']
			out.append((title,url,img))

		return out
예제 #19
0
    def items(self):
        out = []
        urls = []
        html = client.request(
            'http://f1fullraces.com/category/full-race/2016/')
        items = re.findall(
            '<div class=[\"\']content-list-thumb[\"\']>\s*<a.+?href=[\"\']([^\"\']+)[\"\'].+?title=[\"\']([^\"\']+)[\"\']>\s*<img.+?src=[\"\']([^\"\']+)[\"\']',
            html)
        for item in items:
            out.append(
                (convert.unescape(item[1]).encode('utf-8'), item[0], item[2]))

        return out
예제 #20
0
    def __prepare_items(self, items, result):
        out = []
        for video in items:
            title = re.compile('<b>(.+?)</b>').findall(video)
            title = [i for i in title if '&ndash;' in i or '-' in i][-1]
            title = title.split('<b>')[-1]
            title = title.replace('&ndash;', '-')
            title = convert.unescape(webutils.remove_tags(title))
            title = title.encode('utf-8')
            url = self.base + re.compile('<a.+?href="(.+?)"').findall(video)[0]
            out += [(title, url, info().icon)]

        return out
예제 #21
0
	def items(self):
		html = client.request(self.url)
		html = convert.unescape(html.decode('utf-8'))
		soup = webutils.bs(html)
		items = soup.find('div',{'id':'cat-container'}).findAll('li')
		out = []
		for item in items:
			url = item.find('a')['href']
			title = item.find('a')['title'].replace('Full Match: ','').encode('utf-8', 'xmlcharrefreplace')
			img = item.find('img')['src']
			out.append((title,url,img))

		return out
예제 #22
0
	def __get_schedule(self,list):
		new = []
		new.append(('x','[COLOR yellow]LIVE EVENTS[/COLOR]',control.icon_path(info().icon)))
		html = client.request('http://www.ibrodtv.net/load.php')
		html = convert.unescape(html.decode('cp1252'))
		items = re.findall('class=[\"\']t[\"\']>(.+?)</span></div>\s*<div class=[\"\']name[\"\']>(.+?)</div>\s*<a href=[\"\'](.+?)[\"\']>',html)
		for item in items:
			title = item[1]
			url = item[2]
			time = self.convert_time(item[0])
			title = '[COLOR orange](%s)[/COLOR] [B]%s[/B]'%(time,title)
			new.append((url,title.encode('utf-8', 'xmlcharrefreplace'),control.icon_path(info().icon)))
		list = new+list
		return list
예제 #23
0
	def items(self):
		html = client.request(self.url,referer=self.base)
		html = convert.unescape(html.decode('utf-8'))
		html = client.parseDOM(html,'div', attrs={'class':'wpb_wrapper'})[0]
		items = re.findall('<img width="300" height="160" src="(.+?)" class="entry-thumb.+?">\s*<span class="td-video-play-ico">\s*<img width="40" class="td-retina".+?alt="video"/>\s*</span>\s*</a>\s*</div>\s*</div>\s*<h3 class="entry-title td-module-title" itemprop="name">\s*<a rel="bookmark" href="(.+?)" itemprop="url">(.+?)</a> ',html, flags=re.UNICODE)
		out = []
		for item in items:
			img = item[0]
			title = item[2].encode('utf-8', 'xmlcharrefreplace')
			url = item[1]
			item_x = (title,url,img)
			out.append(item_x)

		return out
	def items(self):
		html = client.request(self.url,referer=self.base)
		html = convert.unescape(html.decode('utf-8'))
		html = client.parseDOM(html,'div', attrs={'class':'wpb_wrapper'})[0]
		items = re.findall('<img width="300" height="160" src="(.+?)" class="entry-thumb.+?">\s*<span class="td-video-play-ico">\s*<img width="40" class="td-retina".+?alt="video"/>\s*</span>\s*</a>\s*</div>\s*</div>\s*<h3 class="entry-title td-module-title" itemprop="name">\s*<a rel="bookmark" href="(.+?)" itemprop="url">(.+?)</a> ',html, flags=re.UNICODE)
		out = []
		for item in items:
			img = item[0]
			title = item[2].encode('utf-8', 'xmlcharrefreplace')
			url = item[1]
			item_x = (title,url,img)
			out.append(item_x)

		return out
예제 #25
0
	def __get_schedule(self,list):
		new = []
		new.append(('x','[COLOR yellow]LIVE EVENTS[/COLOR]',control.icon_path(info().icon)))
		html = client.request('http://www.ibrodtv.net/load.php')
		html = convert.unescape(html.decode('cp1252'))
		items = re.findall('class=[\"\']t[\"\']>(.+?)</span></div>\s*<div class=[\"\']name[\"\']>(.+?)</div>\s*<a href=[\"\'](.+?)[\"\']>',html)
		for item in items:
			title = item[1]
			url = item[2]
			time = self.convert_time(item[0])
			title = '[COLOR orange](%s)[/COLOR] [B]%s[/B]'%(time,title)
			new.append((url,title.encode('utf-8', 'xmlcharrefreplace'),control.icon_path(info().icon)))
		list = new+list
		return list
예제 #26
0
	def __prepare_items(self,items,result):
		out=[]
		for video in items:
				title = re.compile('<b>(.+?)</b>').findall(video)
				title = [i for i in title if '&ndash;' in i or '-' in i][-1]
				title = title.split('<b>')[-1]
				title = title.replace('&ndash;', '-')
				title = convert.unescape(webutils.remove_tags(title))
				title = title.encode('utf-8')
				url = self.base + re.compile('<a.+?href="(.+?)"').findall(video)[0]
				out+=[(title,url,info().icon)]
			
			
		return out
예제 #27
0
 def resolve(self,url):
     html = client.request(url)
     try:
         html= convert.unescape(html)
     except:
         pass
     res = url
     try:
         res = urllib.unquote(re.findall('unescape\s*\(\s*[\"\']([^\"\']+)',html)[0])
         res = re.findall('(?:href|src)=[\"\']([^\"\']+)',res)[0]
     except:
         pass
     
     import liveresolver
     return liveresolver.resolve(res)
예제 #28
0
    def items(self):
        html = client.request(self.url)
        soup = webutils.bs(html)
        items = soup.find('div', {'class': 'listhighlights'}).findAll('li')
        out = []
        for item in items:
            date = convert.unescape(
                item.find('div', {
                    'class': 'date_time column'
                }).getText())
            url = item.find('div', {
                'class': 'play_btn column'
            }).find('a')['href']
            home = convert.unescape(
                item.find('div', {
                    'class': 'team home column'
                }).getText())
            away = convert.unescape(
                item.find('div', {
                    'class': 'team column'
                }).getText())
            result = convert.unescape(
                item.find('div', {
                    'class': 'result column'
                }).getText())
            league = convert.unescape(
                item.find('div', {
                    'class': 'leaguelogo column'
                }).find('a')['title'])
            title = '%s (%s) [B]%s %s %s[/B]' % (date, league, home, result,
                                                 away)
            import HTMLParser
            title = HTMLParser.HTMLParser().unescape(title).encode('utf-8')
            out.append((title, url, info().icon))

        return out
예제 #29
0
    def resolve(self, url):
        html = client.request(url)
        try:
            html = convert.unescape(html)
        except:
            pass
        res = url
        try:
            res = urllib.unquote(
                re.findall('unescape\s*\(\s*[\"\']([^\"\']+)', html)[0])
            res = re.findall('(?:href|src)=[\"\']([^\"\']+)', res)[0]
        except:
            pass

        import liveresolver
        return liveresolver.resolve(res)
예제 #30
0
    def content(self, url):
        html = client.request(url)
        html = convert.unescape(html.decode('utf-8'))
        text = webutils.bs(html).find('div', {
            'class': 'entry-content'
        }).getText().encode('utf-8', 'xmlcharrefreplace')

        try:
            video = webutils.bs(html).find('div', {
                'class': 'entry-content'
            }).find('iframe')['src']
            video = self.resolve(video)
        except:
            video = None

        return text, video
예제 #31
0
	def items(self):
		html = client.request(self.url)
		html = convert.unescape(html.decode('utf-8'))
		items = re.findall('<div class="td-module-thumb"><a href="(.+?)" rel="bookmark" title="(.+?)"><img.+?class="entry-thumb" src="(.+?)"',html)
		out = []
		urls=[]
		for item in items:
			url = item[0]
			title = item[1].encode('utf-8', 'xmlcharrefreplace')
			img = item[2]
			
			item = (title,url,img)
			if url not in urls:
				urls+=[url]
				out.append(item)

		return out
예제 #32
0
	def items(self):
		html = client.request(self.url)
		html = convert.unescape(html.decode('utf-8'))
		items = re.findall('<div class="td-module-thumb"><a href="(.+?)" rel="bookmark" title="(.+?)"><img.+?class="entry-thumb" src="(.+?)"',html)
		out = []
		urls=[]
		for item in items:
			url = item[0]
			title = item[1].encode('utf-8', 'xmlcharrefreplace')
			img = item[2]
			
			item = (title,url,img)
			if url not in urls:
				urls+=[url]
				out.append(item)

		return out
예제 #33
0
    def items(self):
        html = client.request(self.url)
        html = convert.unescape(html.decode("utf-8"))
        items = re.findall(
            "<a class=[\"']cvp-ctr-track[\"'].+?href=[\"'](.+?)[\"'] title=[\"'](.+?)[\"']\s*>\s*<img src=[\"'](.+?)[\"']",
            html,
        )
        out = []
        for item in items:
            url = item[0]
            title = (
                item[1]
                .replace("-Full Game Replay-", "-")
                .replace("Full Game Replay HD Quality", "-")
                .replace("Replay HD Quality", "-")
            )
            img = item[2]
            out += [[title.encode("utf-8"), url, img]]

        return out
예제 #34
0
    def __prepare_events(self, events):
        new = []
        events.pop(0)
        date_old = ''
        for event in events:
            items = client.parseDOM(event, 'td')
            i = 0

            for item in items:

                if i == 0:
                    date = item
                elif i == 1:
                    time = item.replace('CET', '').strip()
                elif i == 2:
                    sport = item
                elif i == 3:
                    competition = item
                elif i == 4:
                    event = webutils.remove_tags(item)
                elif i == 5:
                    url = item

                i += 1
            try:
                time, date = self.convert_time(time, date)
                if date != date_old:
                    date_old = date
                    new.append(
                        ('x', '[COLOR yellow]%s[/COLOR]' % date, info().icon))

                sport = '%s - %s' % (sport, competition)
                event = re.sub('\s+', ' ', event)
                title = '[COLOR orange](%s)[/COLOR] (%s) [B]%s[/B]' % (
                    time, sport, convert.unescape(event))
                title = title.encode('utf-8')
                new.append((url, title, info().icon))
            except:
                pass

        return new
예제 #35
0
    def __prepare_events(self,events):
        new = []
        events.pop(0)
        date_old = ''
        for event in events:
            items = client.parseDOM(event,'td')
            i = 0
            
            for item in items:

                if i==0:
                    date = item
                elif i==1:
                    time = item.replace('CET','').strip()
                elif i==2:
                    sport = item
                elif i==3:
                    competition = item
                elif i==4:
                    event = webutils.remove_tags(item)
                elif i==5:
                    url = item

                i += 1
            try:
                time, date = self.convert_time(time,date)
                if date != date_old:
                    date_old = date
                    new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))

                sport = '%s - %s'%(sport,competition)
                event = re.sub('\s+',' ',event)
                title = '[COLOR orange](%s)[/COLOR] (%s) [B]%s[/B]'%(time,sport,convert.unescape(event))
                title = title.encode('utf-8')
                new.append((url,title, info().icon))
            except:
                pass
            
        
        return new
예제 #36
0
    def links(self, url, img=' '):
        if self.base not in url:
            url = self.base + url
        ref = url
        out = []
        html = client.request(url)
        html = convert.unescape(html.decode('utf-8'))
        soup = webutils.bs(html)

        dailys = re.findall(
            'src=[\"\'](//(?:www.)?dailymotion.com/embed/video/[^\"\']+)[\"\']',
            html)
        vks = re.findall(
            'src=[\"\'](//(?:www.)?vk.com/video_ext.php[^\"\']+)[\"\']', html)
        gvid720 = re.findall(
            'src=[\"\'](https?://.+?google.+?/[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']720p[\"\']',
            html)
        gvid360 = re.findall(
            'src=[\"\'](https?://.+?google.+?[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']360p[\"\']',
            html)
        mailru = re.findall(
            '(https?://(?:www.)?videoapi.my.mail.ru/videos/[^\"\']+)[\"\']',
            html)
        opnld = re.findall('(https?://(?:www.)?openload.co/[^\"\']+)[\"\']',
                           html)
        uptstrm = re.findall(
            '(https?://(?:www(?:[\d+])?.)?uptostream.com[^\"\']+)[\"\']', html)
        veevr = re.findall('(https?://(?:www.)?veevr.com[^\"\']+)[\"\']', html)
        plywr = re.findall('(//config.playwire.com/[^\"\']+)[\"\']', html)
        speedvideo = re.findall(
            '(https?://(?:www.)?speedvideo.net/[^\"\']+)[\"\']', html)
        videowood = re.findall(
            '(https?://(?:www.)?videowood.tv/video/[^\"\']+)[\"\']', html)
        wstream = re.findall(
            '(https?://(?:www.)?wstream.video/[^\"\']+)[\"\']', html)
        urls = []

        i = 0
        for v in plywr:
            i += 1
            title = 'Playwire video %s' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)

        i = 0
        for v in veevr:
            i += 1

            url = v
            from resources.lib.resolvers import veevr
            urlx = veevr.resolve(url)
            log(urlx)
            for url in urlx:
                if url[0] not in urls:
                    title = 'Veevr video %s' % url[1].replace(
                        '<sup>HD</sup>', '')
                    out.append((title, url[0], icon_path(info().icon)))
                    urls.append(url[0])

        i = 0
        for v in uptstrm:
            from resources.lib.resolvers import uptostream
            urlx = uptostream.resolve(v)
            log(urlx)
            i += 1
            for u in urlx:
                q = u[1]
                title = 'Uptostream video n.%s %s' % (i, q)
                url = u[0]
                if url not in urls:
                    out.append((title, url, icon_path(info().icon)))
                    urls.append(url)

        i = 0
        for v in dailys:
            i += 1
            title = 'Dailymotion video %s' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)

        i = 0
        for v in vks:
            i += 1
            title = 'VK.com video %s' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)

        i = 0
        for v in gvid720:
            i += 1
            title = 'GVIDEO link %s 720p' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)

        i = 0
        for v in gvid360:
            i += 1
            title = 'GVIDEO link %s 360p' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)

        i = 0
        for v in opnld:
            i += 1
            title = 'Openload link %s' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)

        i = 0
        for v in speedvideo:
            i += 1
            title = 'Speedvideo link %s' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)
        i = 0
        for v in videowood:
            i += 1
            title = 'Videowood link %s' % i
            url = v
            if url not in urls:
                out.append((title, url, icon_path(info().icon)))
                urls.append(url)
        i = 0
        for v in wstream:
            i += 1
            title = 'Wstream link %s' % i
            url = v + '?referer=' + ref
            if url not in urls:

                out.append((title, url, icon_path(info().icon)))
                urls.append(url)

        i = 0
        for v in mailru:
            link = v
            i += 1
            title = 'Mail.ru video %s' % i
            link = link.replace(
                'https://videoapi.my.mail.ru/videos/embed/mail/',
                'http://videoapi.my.mail.ru/videos/mail/')
            link = link.replace('html', 'json')
            cookieJar = cookielib.CookieJar()
            opener = urllib2.build_opener(
                urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
            conn = urllib2.Request(link)
            connection = opener.open(conn)
            f = connection.read()
            connection.close()
            js = json.loads(f)
            for cookie in cookieJar:
                token = cookie.value
            js = js['videos']
            for x in js:
                url = x['url'] + '|%s' % (urllib.urlencode(
                    {
                        'Cookie': 'video_key=%s' % token,
                        'User-Agent': client.agent(),
                        'Referer': ref
                    }))
                title = 'Mail.ru video ' + x['key']
                if url not in urls:
                    out.append((title, url, icon_path(info().icon)))
                    urls.append(url)
        return out
예제 #37
0
	def articles(self):
		html = client.request(self.base, referer=self.base)
		html = convert.unescape(html.decode('utf-8'))
		items = re.findall('div class="widget-cat-img">\s*<a href="(.+?)" rel="bookmark" title="(.+?)"><img.+?lazy-src="(.+?)"', html)
		items = self.__prepare_articles(items)
		return items
예제 #38
0
    def __prepare_events(self, events):
        new = []
        events.pop(0)
        date_old = ''
        time = ''
        sport = ''
        competition = ''
        for event in events:
            items = client.parseDOM(event, 'td')
            i = 0
            for item in items:
                if i == 0:
                    date = item
                elif i == 1:
                    time = item.replace('CET', '').strip()
                elif i == 2:
                    sport = item
                elif i == 3:
                    competition = item
                elif i == 4:
                    event = webutils.remove_tags(item)
                elif i == 5:
                    url = item
                i += 1

            try:
                #if time != '' and date !='' and 'Last update' not in date:
                time, date = self.convert_time(time, date)
            except:
                pass

            sport = '(%s - %s)' % (sport, competition)
            event = re.sub('\s+', ' ', event)
            title = '[COLOR orange]%s[/COLOR]  [B]%s[/B]' % (
                time, convert.unescape(event))
            atm1 = addon.get_setting('atm1')
            atm2 = addon.get_setting('atm2')
            if atm1 in title:
                title = title.replace(atm1, atm2)
            data_py = os.path.join(addon.get_path().decode('utf-8'), 'data_py')
            f = open(data_py, 'r')
            datos = f.read()
            f.close()
            src = re.findall("bus:'(.*?)',ico:'(.*?)',set:'(.*?)'", datos)

            # f = open('C:/Users/Javier/AppData/Roaming/Kodi/addons/plugin.video.laliga/ztab.txt','w+')
            # f.write(str(len(src)))
            # f.close()

            hay = False
            first = ''
            for bus, ico, stn in src:
                if first == '':
                    first = stn
                if addon.get_setting(stn) == 'true':
                    hay = True
                    break
            if not hay:
                addon.set_setting(first, 'true')
            for bus, ico, stn in src:
                if bus in sport and addon.get_setting(stn) == 'true':
                    if date != date_old:
                        date_old = date
                        new.append(('x', '[COLOR yellow]%s[/COLOR]' % date,
                                    info().icon))
                    if mylang().spa:
                        if atm2 in title:
                            title = title.replace('[B]', '[B][COLOR tomato]')
                            title = title.replace('[/B]', '[/COLOR][/B]')
                            ico = 'atm'
                        if title.find('SPAIN') != -1:
                            title = title.replace(
                                'SPAIN',
                                '[COLOR red]ES[COLOR yellow]PA[/COLOR]ÑA[/COLOR]'
                                .decode('utf-8'))
                    title = title.encode('utf-8')
                    new.append((url, title, info(ico).icon))
                    break
        return new
예제 #39
0
	def links(self,url, img=' '):
		if self.base not in url:
			url = self.base + url
		ref = url
		out = []
		html = client.request(url)
		html = convert.unescape(html.decode('utf-8'))
		soup = webutils.bs(html)

		dailys = re.findall('src=[\"\'](//(?:www.)?dailymotion.com/embed/video/[^\"\']+)[\"\']',html)
		vks = re.findall('src=[\"\'](//(?:www.)?vk.com/video_ext.php[^\"\']+)[\"\']',html)
		gvid720 = re.findall('src=[\"\'](https?://.+?google.+?/[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']720p[\"\']',html)
		gvid360 = re.findall('src=[\"\'](https?://.+?google.+?[^\"\']+)" type=[\"\']video/mp4[\"\'] data-res=[\"\']360p[\"\']',html)
		mailru = re.findall('(https?://(?:www.)?videoapi.my.mail.ru/videos/[^\"\']+)[\"\']',html)
		opnld = re.findall('(https?://(?:www.)?openload.co/[^\"\']+)[\"\']',html)
		uptstrm = re.findall('(https?://(?:www(?:[\d+])?.)?uptostream.com[^\"\']+)[\"\']',html)
		veevr = re.findall('(https?://(?:www.)?veevr.com[^\"\']+)[\"\']',html)
		plywr = re.findall('(//config.playwire.com/[^\"\']+)[\"\']',html)
		speedvideo = re.findall('(https?://(?:www.)?speedvideo.net/[^\"\']+)[\"\']',html)
		videowood = re.findall('(https?://(?:www.)?videowood.tv/video/[^\"\']+)[\"\']',html)
		wstream = re.findall('(https?://(?:www.)?wstream.video/[^\"\']+)[\"\']',html)
		urls = []

		i = 0
		for v in plywr:
			i+=1
			title = 'Playwire video %s'%i
			url = v 
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in veevr:
			i+=1
			
			url = v
			from resources.lib.resolvers import veevr
			urlx = veevr.resolve(url)
			log(urlx)
			for url in urlx:
				if url[0] not in urls:
					title = 'Veevr video %s'%url[1].replace('<sup>HD</sup>','')
					out.append((title,url[0],icon_path(info().icon)))
					urls.append(url[0])

		i = 0
		for v in uptstrm:
			from resources.lib.resolvers import uptostream
			urlx =  uptostream.resolve(v)
			log(urlx)
			i+=1
			for u in urlx:
				q = u[1]
				title = 'Uptostream video n.%s %s'%(i,q)
				url = u[0] 
				if url not in urls:
					out.append((title,url,icon_path(info().icon)))
					urls.append(url)

		i = 0
		for v in dailys:
			i+=1
			title = 'Dailymotion video %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in vks:
			i+=1
			title = 'VK.com video %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in gvid720:
			i+=1
			title = 'GVIDEO link %s 720p'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in gvid360:
			i+=1
			title = 'GVIDEO link %s 360p'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in opnld:
			i+=1
			title = 'Openload link %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)

		i = 0
		for v in speedvideo:
			i+=1
			title = 'Speedvideo link %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)
		i = 0
		for v in videowood:
			i+=1
			title = 'Videowood link %s'%i
			url = v
			if url not in urls:
				out.append((title,url,icon_path(info().icon)))
				urls.append(url)
		i = 0
		for v in wstream:
			i+=1
			title = 'Wstream link %s'%i
			url = v + '?referer=' + ref
			if url not in urls:

				out.append((title,url,icon_path(info().icon)))
				urls.append(url)


		i = 0
		for v in mailru:
			link = v
			i+=1
			title = 'Mail.ru video %s'%i
			link = link.replace('https://videoapi.my.mail.ru/videos/embed/mail/','http://videoapi.my.mail.ru/videos/mail/')
			link = link.replace('html','json')
			cookieJar = cookielib.CookieJar()
			opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar), urllib2.HTTPHandler())
			conn = urllib2.Request(link)
			connection = opener.open(conn)
			f = connection.read()
			connection.close()
			js = json.loads(f)
			for cookie in cookieJar:
				token = cookie.value
			js = js['videos']
			for x in js:
				url = x['url'] + '|%s'%(urllib.urlencode({'Cookie':'video_key=%s'%token, 'User-Agent':client.agent(), 'Referer':ref} ))
				title = 'Mail.ru video ' + x['key']
				if url not in urls:
					out.append((title,url,icon_path(info().icon)))
					urls.append(url)
		return out
예제 #40
0
	def channels(self):
		html = client.request(self.url, referer=self.base)
		html = convert.unescape(html.decode('utf-8'))
		events = re.findall('<!-- blog headline-->\s*<h2><a href=[\"\']([^\"\']+)[\"\'] rel=[\"\']bookmark[\"\']>([^<]+)<',html)
		events = self.__prepare_events(events)
		return events
예제 #41
0
 def parse_m3u(self, content):
     content = convert.unescape(content)
     content = re.sub('(^.+?)#EXTINF', '#EXTINF', content, flags=re.DOTALL)
     self.lll = content + '\n' + self.lll
예제 #42
0
    def __prepare_events(self,events):
        new = []
        events.pop(0)
        date_old = ''
        time = ''
        sport = ''
        competition = ''
        for event in events:
            items = client.parseDOM(event,'td')
            i = 0
            for item in items:

                if i==0:
                    date = item
                elif i==1:
                    time = item.replace('CET','').strip()
                elif i==2:
                    sport = item
                elif i==3:
                    competition = item
                elif i==4:
                    event = webutils.remove_tags(item)
                elif i==5:
                    url = item

                i += 1
            try:
                time, date = self.convert_time(time,date)
                sport = '%s - %s'%(sport,competition)
                event = re.sub('\s+',' ',event)
                title = '[COLOR orange](%s)[/COLOR] (%s) [B]%s[/B]'%(time,sport,convert.unescape(event))
                title2 = '[COLOR orange]%s[/COLOR]  [B]%s[/B]'%(time,convert.unescape(event))
                atm = 'ATLETICO MADRID'
                atmb = 'ATLETICO DE MADRID'
                lig = 'SPANISH LALIGA)'
                lg2 = 'SPANISH LALIGA2'
                prm = 'PREMIER LEAGUE'
                fra = 'FRENCH LIGUE1'
                ale = 'BUNDESLIGA'
                ita = 'ITALIA SERIE A'
                ucl = 'UEFA CHAMPIONS LEAGUE'
                uel = 'UEFA EUROPA LEAGUE'
                cop = 'COPA DEL REY'
                fwc = 'FIFA WORLD CUP'
                title2 = title2.replace(atm,atmb)
                primera = addon.get_setting('primera')
                segunda = addon.get_setting('segunda')
                premier = addon.get_setting('premier')
                francia = addon.get_setting('francia')
                italia = addon.get_setting('italia')
                alemania = addon.get_setting('alemania')
                champions = addon.get_setting('champions')
                eurleague = addon.get_setting('eurleague')
                copa = addon.get_setting('copa')
                fwcup = addon.get_setting('fwcup')
                if segunda=='false' and premier=='false' and francia=='false' and italia=='false' and alemania=='false' and champions=='false' and eurleague=='false' and copa=='false':
                    primera='true'
                if (title.find(atm)!=-1 or title.find(atmb)!=-1) and (primera=='true'):
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    title2 = title2.replace('[B]','[B][COLOR tomato]')
                    title2 = title2.replace('[/B]','[/COLOR][/B]')
                    new.append((url,title2, info().ico2))
                elif title.find(lig)!=-1 and primera=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().icon))
                elif title.find(lg2)!=-1 and segunda=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().icsg))
                elif title.find(prm)!=-1 and premier=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().icpr))
                elif title.find(fra)!=-1 and francia=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().icfr))
                elif title.find(ita)!=-1 and italia=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().icit))
                elif title.find(ale)!=-1 and alemania=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().ical))
                elif title.find(ucl)!=-1 and champions=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().iccl))
                elif title.find(uel)!=-1 and eurleague=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().icel))
                elif title.find(cop)!=-1 and copa=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    new.append((url,title2, info().icop))
                elif title.find(fwc)!=-1 and fwcup=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    title = title.encode('utf-8')
                    if title2.find('SPAIN')!=-1:
                        title2 = title2.replace('SPAIN','[COLOR red]ES[COLOR yellow]PA[/COLOR]ÑA[/COLOR]'.decode('utf-8'))
                    new.append((url,title2, info().icfw))

            except:
                pass
        
        return new
예제 #43
0
    def __prepare_events(self,events):
        new = []
        events.pop(0)
        date_old = ''
        time = ''
        sport = ''
        competition = ''
        for event in events:
            items = client.parseDOM(event,'td')
            i = 0
            for item in items:
                if i==0:
                    date = item
                elif i==1:
                    time = item.replace('CET','').strip()
                elif i==2:
                    sport = item
                elif i==3:
                    competition = item
                elif i==4:
                    event = webutils.remove_tags(item)
                elif i==5:
                    url = item
                i += 1

            try:
            #if time != '' and date !='' and 'Last update' not in date:
                time, date = self.convert_time(time,date)
            except:
                pass

            sport = '(%s - %s)'%(sport,competition)
            event = re.sub('\s+',' ',event)
            title = '[COLOR orange]%s[/COLOR]  [B]%s[/B]'%(time,convert.unescape(event))
            atm1 = addon.get_setting('atm1')
            atm2 = addon.get_setting('atm2')
            if atm1 in title:
                title = title.replace(atm1,atm2)
            data_py = os.path.join(addon.get_path().decode('utf-8'), 'data_py')
            f = open(data_py,'r')
            datos = f.read()
            f.close()
            src = re.findall("bus:'(.*?)',ico:'(.*?)',set:'(.*?)'",datos)
            hay = False
            first = ''
            for bus,ico,stn in src:
                if first == '':
                    first = stn
                if addon.get_setting(stn)=='true':
                    hay = True
                    break
            if not hay:
                addon.set_setting(first,'true')
            for bus,ico,stn in src:
                if bus in sport and addon.get_setting(stn)=='true':
                    if date != date_old:
                        date_old = date
                        new.append(('x','[COLOR yellow]%s[/COLOR]'%date, info().icon))
                    if mylang().spa:
                        if atm2 in title:
                            title = title.replace('[B]','[B][COLOR tomato]')
                            title = title.replace('[/B]','[/COLOR][/B]')
                            ico='atm'
                        if title.find('SPAIN')!=-1:
                            title = title.replace('SPAIN','[COLOR red]ES[COLOR yellow]PA[/COLOR]ÑA[/COLOR]'.decode('utf-8'))
                    title = title.encode('utf-8')
                    new.append((url,title,info(ico).icon))
                    break
        return new