def __get_schedule(self,list): new = [] new.append(('x','[COLOR yellow]LIVE EVENTS[/COLOR]',control.icon_path(info().icon))) html = client.request('http://www.ibrodtv.net/load.php') html = convert.unescape(html.decode('cp1252')) items = re.findall('class=[\"\']t[\"\']>(.+?)</span></div>\s*<div class=[\"\']name[\"\']>(.+?)</div>\s*<a href=[\"\'](.+?)[\"\']>',html) for item in items: title = item[1] url = item[2] time = self.convert_time(item[0]) title = '[COLOR orange](%s)[/COLOR] [B]%s[/B]'%(time,title) new.append((url,title.encode('utf-8', 'xmlcharrefreplace'),control.icon_path(info().icon))) list = new+list return list
def links(self, url): out = [] html = client.request(url) try: img = re.findall( 'class="wpb_wrapper">\s*<a href=[\"\']([^\"\']+)[\"\']', html)[0] except: img = control.icon_path(info().icon) links = re.findall( 'id=[\"\']([^\"\']+)[\"\']><a href=[\"\']#\d+[\"\']><div.+?>([^<]+)<', html) if len(links) < 2: links = re.findall( 'href=[\"\']([^\"\']+)[\"\']><div class="acp_title">([^<]+)<', html) if len(links) < 2: try: urlx = 'http:' + re.findall( '(\/\/config\.playwire\.com\/[^\'\"]+)', html)[0] except: urlx = re.findall('iframe.+?src=[\"\'](.+?youtu[^\"\']+)', html)[0] title = self.clean( re.findall('og:title.+?content=[\"\']([^\"\']+)[\"\']', html)[0]) out.append((title, urlx, img)) return out else: for link in links: title = link[1] urlx = link[0] out.append((title, urlx, img)) return out for link in links: urlx = url + '?id=' + link[0] title = self.clean(link[1].upper()) try: img = re.findall( 'class="wpb_wrapper">\s*<a href=[\"\']([^\"\']+)[\"\']', html)[0] except: img = control.icon_path(info().icon) out.append((title, urlx, img)) return out
def __prepare_schedule(self, matches, match_infos): new = [] urls = [] for i in range(len(matches)): match = matches[i] time = match.find('div', { 'style': 'float:left;padding-top:1px;' }).getText() time = self.convert_time(time) title = match.find( 'div', { 'style': 'float:left;padding-top:1px;padding-left:10px;' }).getText() infs = match_infos[i].findAll('div', {'style': 'margin-top: -2px;'}) urls = match_infos[i].findAll('a') for i in range(len(infs)): inf = infs[i].getText() url = self.base + urls[i]['href'] titl = '[COLOR orange](%s)[/COLOR] %s - %s' % (time, title, inf) if url not in urls: new.append((url, titl.encode('utf-8'), control.icon_path(info().icon))) urls.append(url) return new
def __init__(self): self.mode = 'livefootballvideo_fm' self.name = 'livefootballvideo.com (full matches)' self.icon = control.icon_path('livefootballvideo.png') self.paginated = True self.categorized = False self.multilink = True
def __prepare_channels(self,headers,items): new=[] i = 0 for header in headers: if header.getText()=='LIVE EVENTS': new = self.__get_schedule(new) i+=1 continue new.append(('x','[COLOR yellow]%s[/COLOR]'%header.getText(),control.icon_path(info().icon))) channels = items[i].findAll('li') for channel in channels: url = self.base + '/' + channel.find('a')['href'] title = channel.getText() new.append((url,title,control.icon_path(info().icon))) i+=1 return new
def __init__(self): self.mode = 'ourmatch' self.name = 'ourmatch.net' self.icon = control.icon_path('ourmatch.png') self.paginated = True self.categorized = True self.multilink = True
def channels(self): html = client.request('http://www.streamhd.eu/tv/') channels = re.findall( '<a href=[\"\'](/[^\"\']+)[\"\']> <img.+?alt=[\"\'](.+?)\s*Live Stream[\"\'].+?src=[\"\']data:image/png;base64', html) out = self.__prepare_channels(channels) html = client.request(self.base) html = client.request(self.base) soup = webutils.bs(html) rows = soup.find( 'table', { 'class': 'table table-hover table-condensed table-striped' }).find('tbody').findAll('tr') for row in rows: tds = row.findAll('td') time = self.convert_time(tds[0].getText().replace('GMT', '').strip()) sport = tds[1].getText().strip() sub = tds[3].getText().strip() match = tds[4].getText().strip() url = self.base + tds[4].findAll('a')[0]['href'] if sport != sub: sport += '-%s' % sub title = '[COLOR orange](%s)[/COLOR] (%s) [B]%s[/B]' % (time, sport, match) out.append((url, title, control.icon_path(info().icon))) return out
def links(self, url): out = [] html = requests.get(url).text try: img = re.findall('og\:image.+?content\s*=\s*[\"\']([^\"\']+)', html)[0] except: img = control.icon_path(info().icon) try: import resolveurl except: return [] iframe = re.findall('iframe.+?src\s*=\s*[\"\']([^\"\']+)', html) i = 1 for l in iframe: if l.startswith('//'): l = 'http:' + l hmf = resolveurl.HostedMediaFile(url=l) if hmf.valid_url(): title = 'Link %s' % i i += 1 out.append((l, title, img)) return out
def __init__(self): self.mode = 'livetv_nba' self.name = 'livetv.sx archive' self.icon = control.icon_path('nbastream.png') self.paginated = False self.categorized = False self.multilink = True
def __init__(self): self.mode = 'fullmatchesandshows' self.name = 'fullmatchesandshows.com' self.icon = control.icon_path('fms.png') self.paginated = True self.categorized = True self.multilink = True
def __init__(self): self.mode = 'livetv_nhl' self.name = 'livetv.sx (NHL full replays & highlights)' self.icon = control.icon_path('nhlstream.jpg') self.paginated = False self.categorized = False self.multilink = True
def links(self, url): out = [] html = requests.get(url).text try: img = re.findall( 'class="wpb_wrapper">\s*<a href=[\"\']([^\"\']+)[\"\']', html)[0] except: img = control.icon_path(info().icon) links = re.findall( '(?:acp_title[\"\']>|h.>)([^<]+)<\/.+?\s*.+?iframe.+?src\s*=\s*[\"\']([^\"\']+)', html) link_dict = {k: v for v, k in links} try: import resolveurl except: return [] videos = resolveurl.filter_source_list([l[1] for l in links]) for v in videos: url = v title = link_dict[v] out.append((url, title, img)) return out
def links(self,url): out = [] html = client.request(url) links = re.findall('<option value=[\"\']([^\"\']+)[\"\']>([^<]+)<',html) for link in links: out.append((link[1],convert.unescape(link[0]),control.icon_path(info().icon))) return out
def schedule(self): out = [] html = client.request(self.base) sch = self.sch_dict() for s in sch: out.append((s[0].replace('embed','view'),s[0],control.icon_path(info().icon))) return out
def categories(self): cats=[] html = client.request(self.base) td_block_id = re.findall('<script>var (block_td_uid.+?)[\s=]',html)[0] html_cut = html#re.findall('td-subcat-list.*?>(.+?)</u',html)[0] items = re.findall('data-td_filter_value=[\"\'](\d+)[\"\'][^<]+>([^<]+)<',html_cut) for item in items: cats.append((item[0],item[1],control.icon_path(info().icon))) return cats
def links(self, url): out = [] html = client.request(url) links = re.findall('<option value=[\"\']([^\"\']+)[\"\']>([^<]+)<', html) for link in links: out.append((link[1], convert.unescape(link[0]), control.icon_path(info().icon))) return out
def categories(self): out = [] html = client.request('http://racing4everyone.eu/yr2016/') cats = re.findall('color: #008000;[\"\'] href=[\"\']([^\"\']+)[\"\']>([^<]+)</a></span></li>',html) img = control.icon_path(info().icon) for c in cats: out.append((c[0],c[1],img)) return out
def categories(self): categs = [ [ 'http://searchapp2.nba.com/nba-search/query.jsp?section=channels%2F*%7Cgames%2F*%7Cflip_video_diaries%7Cfiba&sort=recent&hide=true&type=advvideo&npp=15&start=1', 'NBA Video (All feeds)', control.icon_path(info().icon) ], [ 'http://searchapp2.nba.com/nba-search/query.jsp?section=channels%2Ftop_plays&sort=recent&hide=true&type=advvideo&npp=15&start=1', 'Top Plays', control.icon_path(info().icon) ], [ 'http://searchapp2.nba.com/nba-search/query.jsp?section=games%2F*%7Cchannels%2Fplayoffs&sort=recent&hide=true&type=advvideo&npp=15&start=1', 'Highlights', control.icon_path(info().icon) ] ] return categs
def categories(self): cats = [] html = client.request(self.base) td_block_id = re.findall('<script>var (block_td_uid.+?)[\s=]', html)[0] html_cut = html #re.findall('td-subcat-list.*?>(.+?)</u',html)[0] items = re.findall( 'data-td_filter_value=[\"\'](\d+)[\"\'][^<]+>([^<]+)<', html_cut) for item in items: cats.append((item[0], item[1], control.icon_path(info().icon))) return cats
def __prepare_channels(self,channels): new=[] urls=[] for channel in channels: url = channel['href'] title = channel.getText() if url not in urls: urls.append(url) new.append((url,title,control.icon_path(info().icon))) #new.pop(-1) return new
def channels(self): html = client.request(self.base) soup = webutils.bs(html) channels = soup.find('div', {'id': 'chanels'}).findAll('li') events = [] for c in channels: url = self.base + c.find('a')['href'] title = c.getText() events.append((url, title, control.icon_path(info().icon))) events.sort(key=lambda x: x[1]) return events
def categories(self): out = [] html = client.request('http://racing4everyone.eu/yr2016/') cats = re.findall( 'color: #008000;[\"\'] href=[\"\']([^\"\']+)[\"\']>([^<]+)</a></span></li>', html) img = control.icon_path(info().icon) for c in cats: out.append((c[0], c[1], img)) return out
def links(self, url): out = [] url = url.replace('(', '\(').replace(')', '\)').replace(' ', '\s*') html = client.request('http://www.tribina.net/index.html') rg = '%s</h3>(.+?)<h3' % url area = re.findall(rg, html, flags=re.DOTALL)[0] links = re.findall('<a href=[\"\']([^\"\']+)[\"\'].+?>([^<]+)</a>', area) for l in links: out.append((l[0], l[1], control.icon_path(info().icon))) return out
def __prepare_channels(self, channels): new = [] urls = [] img = control.icon_path(info().icon) for channel in channels: url = self.base + channel[0] title = channel[1] if url not in urls: new.append((url, title, img)) urls.append(url) return new
def categories(self): out = [] img = control.icon_path(info().icon) s = requests.Session() html = s.get('http://www.streamgaroo.com/live-television/').text cats = re.findall('href=[\"\']([^\"\']+)[\"\']><img.+?alt=[\"\']([^\"\']+)[\"\']>',html) for c in cats: out.append((c[0],c[1],img)) if c[1]=='United States': break return out
def __prepare_channels(self,channels): new=[] urls=[] img = control.icon_path(info().icon) for channel in channels: url = self.base + channel[0] title = channel[1] if url not in urls: new.append((url,title,img)) urls.append(url) return new
def __prepare_channels(self, channels): img = control.icon_path(info().icon) new = [] for c in channels: url = self.base + c[0] title = convert.unescape(c[1]) if '.ts' in url: url = 'plugin://plugin.video.f4mTester/?streamtype=TSDOWNLOADER&url=%s' % ( urllib.quote(url)) new.append((url, title.encode('utf-8'), img)) return new
def categories(self): img = control.icon_path(info().icon) categories = [('following', 'Following', img), ('all', 'All', img), ('people', 'People', img), ('nature', 'Nature', img), ('creative', 'Creative', img), ('music_cafe', 'Music Cafe', img), ('news_tech', 'News & Tech', img), ('lifestyles', 'Lifestyles', img), ('misc', 'Misc', img), ('espanol', 'Espanol', img), ('vapers', 'Vapers', img), ('breakers', 'Breakers', img), ('gamers', 'Gamers', img)] return categories
def channels(self): html = client.request(self.base) soup = webutils.bs(html) channels = soup.find('div',{'id':'chanels'}).findAll('li') events = [] for c in channels: url = self.base + c.find('a')['href'] title = c.getText() events.append((url,title,control.icon_path(info().icon))) events.sort(key=lambda x: x[1]) return events
def links(self, url): out = [] html = client.request(url) links = re.findall( '(?:<.+?>)+([A-Z][^:<]+).+?\s*(?:.+?)?<iframe.+?src=[\"\']([^\"\']+)[\"\']', html) i = 1 for link in links: out.append((convert.unescape(link[0]).encode('utf-8'), link[1], control.icon_path(info().icon))) i += 1 return out
def items(self): html = client.request(self.url) soup = webutils.bs(html) items = soup.findAll('div', {'class': 'thumb'}) out = [] for item in items: url = item.find('a')['href'] title = item.find('a')['title'].encode('utf-8') title = re.sub('<[^>]*>', '', title) out += [[title, url, control.icon_path(info().icon)]] return out
def categories(self): out = [] html = client.request(self.base) last_page = int( re.findall('class=[\"\']switchDigit[\"\'] href=[\"\']([^\"\']+)', html)[-1].split('-')[-1]) img = control.icon_path(info().icon) for i in range(last_page, last_page - 21, -1): url = 'http://www.sultanovic.net/forum/12-1580-' + str(i) title = 'Stranica %s' % i out.append((url, title, img)) return out
def links(self, url): out=[] html = client.request(url) try: img = re.findall('class="wpb_wrapper">\s*<a href=[\"\']([^\"\']+)[\"\']',html)[0] except: img = control.icon_path(info().icon) links = re.findall('id=[\"\']([^\"\']+)[\"\']><a href=[\"\']#\d+[\"\']><div.+?>([^<]+)<',html) if len(links)<2: links = re.findall('href=[\"\']([^\"\']+)[\"\']><div class="acp_title">([^<]+)<',html) if len(links)<2: try: urlx = 'http:' + re.findall('(\/\/config\.playwire\.com\/[^\'\"]+)',html)[0] except: urlx = re.findall('iframe.+?src=[\"\'](.+?youtu[^\"\']+)',html)[0] title = self.clean(re.findall('og:title.+?content=[\"\']([^\"\']+)[\"\']',html)[0]) out.append((title,urlx,img)) return out else: for link in links: title = link[1] urlx = link[0] out.append((title,urlx,img)) return out for link in links: urlx = url + '?id=' + link[0] title = self.clean(link[1].upper()) try: img = re.findall('class="wpb_wrapper">\s*<a href=[\"\']([^\"\']+)[\"\']',html)[0] except: img = control.icon_path(info().icon) out.append((title,urlx,img)) return out
def categories(self): urls = [] img = control.icon_path(info().icon) out = [('http://ourmatch.net','Latest matches',img)] html = client.request(self.base) cats = re.findall('<li class=[\"\']hover-tg[\"\']><a href=[\"\'](.+?)[\"\']>(.+?)</a></li>',html) for c in cats: url = c[0] title = c[1] title = re.sub(' <span class="icon country.+?></span> ','',title) if url not in urls: out.append((url,title,img)) urls.append(url) return out
def items(self): out = [] urls = [] img = control.icon_path(info().icon) html = client.request(self.url) if html is None: return [] links = webutils.bs(html).find('div', {'id': 'primary'}).findAll('a') for l in links: a = re.findall('201[56]/\d\d/', l['href']) if len(a) != 0: out.append((l.getText(), l['href'], img)) return out
def items(self): out = [] urls=[] img = control.icon_path(info().icon) html = client.request(self.url) if html is None: return [] links = webutils.bs(html).find('div',{'id':'primary'}).findAll('a') for l in links: a = re.findall('201[56]/\d\d/',l['href']) if len(a)!=0: out.append((l.getText(),l['href'],img)) return out
def items(self): out = [] html = client.request(self.base) items = webutils.bs(html).findAll('p',{'class':'MsoNormal'}) words = ['thank','full','chrome',' ','page','contact','you must?'] for i in items: item = i.getText() if len(item)>50 or any(w in item.lower() for w in words): continue if '(' in item: item = '[B][COLOR orange]%s[/COLOR][/B]'%item out.append((item,item,control.icon_path(info().icon))) return out
def categories(self): urls = [] img = control.icon_path(info().icon) out = [('http://ourmatch.net', 'Latest matches', img)] html = client.request(self.base) cats = re.findall( '<li class=[\"\']hover-tg[\"\']><a href=[\"\'](.+?)[\"\']>(.+?)</a></li>', html) for c in cats: url = c[0] title = c[1] title = re.sub(' <span class="icon country.+?></span> ', '', title) if url not in urls: out.append((url, title, img)) urls.append(url) return out
def channels(self,index): index = int(index) img = control.icon_path(info().icon) events = [] html = client.request('http://af-proxy.appspot.com/arconaitv.me') lis = webutils.bs(html).findAll('ul',{'id':'mega_main_menu_ul'})[0] lis = lis.findNext('li') if index > 0: lis = lis.findNextSibling('li') if index > 1: lis = lis.findNextSibling('li') lis = lis.findAll('a') for li in lis: if li.getText()[1] != '-' : events.append((li['href'],li.getText(),img)) events.sort(key=lambda x: x[1]) return events
def channels(self): html = client.request('http://www.streamhd.eu/tv/') channels = re.findall('<a href=[\"\'](/[^\"\']+)[\"\']> <img.+?alt=[\"\'](.+?)\s*Live Stream[\"\'].+?src=[\"\']data:image/png;base64',html) out = self.__prepare_channels(channels) html = client.request(self.base) html = client.request(self.base) soup = webutils.bs(html) rows = soup.find('table',{'class':'table table-hover table-condensed table-striped'}).find('tbody').findAll('tr') for row in rows: tds = row.findAll('td') time = self.convert_time(tds[0].getText().replace('GMT','').strip()) sport = tds[1].getText().strip() sub = tds[3].getText().strip() match = tds[4].getText().strip() url = self.base + tds[4].findAll('a')[0]['href'] if sport!=sub: sport += '-%s'%sub title = '[COLOR orange](%s)[/COLOR] (%s) [B]%s[/B]'%(time,sport,match) out.append((url,title,control.icon_path(info().icon))) return out
def categories(self): img = control.icon_path(info().icon) categories = [('following','Following',img), ('all','All',img),('people','People',img),('nature','Nature',img),('creative','Creative',img),('music_cafe','Music Cafe',img), ('news_tech','News & Tech',img),('lifestyles','Lifestyles',img),('misc','Misc',img),('espanol','Espanol',img),('vapers','Vapers',img),('breakers','Breakers',img), ('gamers','Gamers',img)] return categories
def categories(self): img = control.icon_path(info().icon) cats = (('0','Shows',img),('1','Live TV',img),('2','Movies',img)) return cats
def categories(self): categs=[['http://searchapp2.nba.com/nba-search/query.jsp?section=channels%2F*%7Cgames%2F*%7Cflip_video_diaries%7Cfiba&sort=recent&hide=true&type=advvideo&npp=15&start=1','NBA Video (All feeds)',control.icon_path(info().icon)], ['http://searchapp2.nba.com/nba-search/query.jsp?section=channels%2Ftop_plays&sort=recent&hide=true&type=advvideo&npp=15&start=1','Top Plays',control.icon_path(info().icon)], ['http://searchapp2.nba.com/nba-search/query.jsp?section=games%2F*%7Cchannels%2Fplayoffs&sort=recent&hide=true&type=advvideo&npp=15&start=1','Highlights',control.icon_path(info().icon)]] return categs