def get_channels(self, url): cache = Cache(self.__settings['path'], minutes=180) epg = EPG(self.__settings) # Busca los canales en cache channels = cache.load(url) if channels: # Actualiza la EPG de los canales epg.update_metadata(channels) return channels # No están en cache, los obtiene channels = [] # GET url page = tools.get_web_page(url) # Obtiene los nombres y urls de los canales chs = re.findall(r'#EXTINF:.*,(.*)<br\s/>\s(http[s]?://.*)<', page, re.U) if not chs: raise WebSiteError( u'Problema en Movistar+', u'No se han encontrado canales en la lista seleccionada', time=self.__settings['notify_secs']) # Añade los canales encontrados a la lista for ch in chs: ch_name = tools.str_sanitize(ch[0]) ch_link = tools.str_sanitize(ch[1]) if not (ch_link.endswith('.m3u8') or ch_link.endswith('.m3u')): channels.append({ 'name': ch_name, 'video': ch_link, 'icon': tools.build_path( self.__settings['path'], 'bein.png' if self.__sports else 'movistar.png'), 'fanart': tools.build_path( self.__settings['path'], 'bein_art.jpg' if self.__sports else 'movistar_art.jpg') }) if len(channels) == 0: raise WebSiteError( u'No hay canales', u'La lista no contiene canales que se puedan reproducir', time=self.__settings['notify_secs']) # Añade la EPG a los canales if not self.__sports: epg.add_metadata(channels) # Guarda los canales en caché y los devuelve cache.save(url, channels) return channels
def get_menu(self): """ Get MovistarTV channel lists :return: The list of MovistarTV channel lists :rtype: list """ cache = Cache(self.__settings['path'], minutes=60) # Busca las listas de canales en cache ch_lists = cache.load( self.__sports_url if self.__sports else self.__channels_url) if ch_lists: return ch_lists # No están en cache, los obtiene ch_lists = [] # GET http://iptv.filmover.com/category/spain/ page = tools.get_web_page( self.__sports_url if self.__sports else self.__channels_url) # Busca todas URL de listas de canales # Una por día, la primera es la lista más reciente urls = re.findall( r'<h2\s*class="entry-tit.*le">\s*<a href="(.*)"\s*rel="bookmark">(.*)</a></h2>', page, re.U) if not (urls and type(urls) == list and len(urls) > 0): raise WebSiteError(u'Lista de canales no encontrada', u'Los de Movistar+ han hecho cambios en la Web', time=self.__settings['notify_secs']) for url in urls: ch_lists.append({ 'name': tools.str_sanitize(url[1]), 'channel_url': tools.str_sanitize(url[0]), 'icon': tools.build_path( self.__settings['path'], 'bein.png' if self.__sports else 'movistar.png'), 'fanart': tools.build_path( self.__settings['path'], 'bein_art.jpg' if self.__sports else 'movistar_art.jpg') }) if len(ch_lists) == 0: raise WebSiteError( u'Problema en Movistar+', u'No se han encontrado listas de canales en la Web', time=self.__settings['notify_secs']) cache.save(self.__sports_url if self.__sports else self.__channels_url, ch_lists) return ch_lists
def __get_competition_art(self, competition): return { 'icon': art.get_competition_icon(competition, self.__settings['path'], default='futbol.png'), 'fanart': tools.build_path(self.__settings['path'], 'futbol_art.jpg') }
def get_menu(self): """ Get the list of LiveFootbalLOL categories: agenda and competitions :return: The list of LiveFootbalLOL categories :rtype: list """ return [{ 'name': 'Hoy y mañana', 'icon': tools.build_path(self.__settings['path'], 'hoy_manana.png'), 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg') }, { 'name': 'Agenda 7 días', 'icon': tools.build_path(self.__settings['path'], 'siete_dias.png'), 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg') }, { 'name': 'Competiciones', 'icon': tools.build_path(self.__settings['path'], 'competiciones.png'), 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg') }]
def save(self, url, data): content = {} try: content['timestamp'] = time.time() content['data'] = data with open( tools.build_path( self.__path, '%s.json' % hashlib.sha224(url).hexdigest(), 'cache'), 'w') as fp: json.dump(content, fp, indent=4) tools.write_log("Cache: new '%s'" % url) except IOError: tools.write_log("Cache: can't write '%s'" % url, xbmc.LOGERROR)
def __get_links(self, channels, urls): """ Get link list of channels URL's for a given string :param channels: The channels string :type: channels: str :return: The list of Arenavision events :rtype: list """ ch_list = re.findall(r'[\d-]+\s*\[[\w]+\]', channels, re.U) if not ch_list: tools.write_log('No se pueden extraer los enlaces: %s' % ch_list, xbmc.LOGERROR) return None chs = [] for ch_data in ch_list: ch_numbers = re.findall(r'[\d]+', ch_data, re.U) ch_lang = re.findall(r'\[[\w]+\]', ch_data, re.U) for ch_number in ch_numbers: chs.append({ 'name': 'AV%s %s' % (ch_number, ch_lang[0] if ch_lang else '[---]'), 'icon': tools.build_path(self.__settings['path'], 'arenavision.jpg'), 'fanart': tools.build_path(self.__settings['path'], 'arenavision_art.jpg'), 'link': urls[int(ch_number) - 1] }) return chs
def load(self, url, log_read_ok=True): try: with open( tools.build_path( self.__path, '%s.json' % hashlib.sha224(url).hexdigest(), 'cache'), 'r') as fp: content = json.load(fp) if datetime.datetime.now() <= datetime.datetime.fromtimestamp(content['timestamp']) + \ datetime.timedelta(minutes=self.__minutes): if log_read_ok: tools.write_log("Cache: '%s' read ok" % url) return content['data'] else: tools.write_log("Cache: '%s' expired" % url) return None except IOError: return None
def get_menu(self): """ Get the list of Arenavision categories: agenda, sports and competition :return: The list of Arenavision categories :rtype: list """ return [{ 'name': 'Hoy y mañana', 'icon': tools.build_path(self.__settings['path'], 'hoy_manana.png'), 'fanart': tools.build_path(self.__settings['path'], 'arenavision_art.jpg') }, { 'name': 'Agenda 7 días', 'icon': tools.build_path(self.__settings['path'], 'siete_dias.png'), 'fanart': tools.build_path(self.__settings['path'], 'arenavision_art.jpg') }, { 'name': 'Deportes', 'icon': tools.build_path(self.__settings['path'], 'deportes.png'), 'fanart': tools.build_path(self.__settings['path'], 'arenavision_art.jpg') }, { 'name': 'Competiciones', 'icon': tools.build_path(self.__settings['path'], 'competiciones.png'), 'fanart': tools.build_path(self.__settings['path'], 'arenavision_art.jpg') }]
def get_channels(self, event_url): """ Get LiveFootbalLOL channels by a given event URL :param event_url: The event URL :type: event_url: str :return: The list of LiveFootbalLOL event links :rtype: list """ cache = Cache(self.__settings['path'], minutes=10) # Monta la URL del evento e_url = '%s%s' % (self.__web_url[:-1] if event_url.startswith('/') else self.__web_url, event_url) # Busca los canales del evento en caché channels = cache.load(e_url, True) if channels: return channels # Los datos de los canales no están en cache # Vuelve a obtenerlos channels = [] # GET e_url page = tools.get_web_page(e_url) # Busca la jornada # match_week = re.findall(r'[Mm][Aa][Tt][Cc][Hh]\s[Ww][Ee]{2}[Kk]</td>\s*<td>([0-9]+)</td>', page, re.U) # Obtiene la tabla de datos de los canales soup = BeautifulSoup(page, 'html5lib') table = soup.find( 'table', attrs={'class': 'uk-table uk-table-hover uk-table-striped'}) # Obtiene los datos de los canales prev_lang = None for row in table.findAll("tr")[2:]: cells = row.findAll("td") # Obtiene los datos generales del canal ch_name = tools.str_sanitize(cells[1].get_text()) ch_lang = tools.str_sanitize(cells[0].get_text()) # ¿Hay ya enlaces? if 'will be here' in ch_name: match = re.findall( r'[Mm][Aa][Tt][Cc][Hh]</td>\s*<td><strong>(.*)</strong></td>', page, re.U) if len(channels) > 0: break else: raise WebSiteError( match[0] if match else u'LiveFootbalLOL', u'Todavía no se han publicado los enlaces del partido', time=self.__settings['notify_secs']) # Si no es un enlace acestream continua ch_link = tools.str_sanitize(cells[1].find('a').get('href')) if not ch_link or 'acestream' not in ch_name.lower(): continue # Obtiene el idioma if not ch_lang or not re.findall(r'(\[[A-Z]{2}\])', ch_lang, re.U): ch_lang = prev_lang if prev_lang else '[--]' prev_lang = ch_lang if ch_lang else '[--]' # Obtiene los datos extendidos y los hashlinks del canal channel_data = self.__get_channel_data(cache, ch_link) if channel_data: for link in channel_data['links']: channels.append({ 'name': self.__get_channel_name(channel_data['name'], channel_data['bitrate'], link['hd'], ch_lang), 'icon': art.get_channel_icon(channel_data['name'], self.__settings['path']), 'fanart': tools.build_path(self.__settings['path'], 'lfol_art.jpg'), 'hash': link['hash'] }) if len(channels) == 0: match = re.findall( r'[Mm][Aa][Tt][Cc][Hh]</td>\s*<td><strong>(.*)</strong></td>', page, re.U) raise WebSiteError( u'%s' % (match[0]) if match else u'LiveFootbalLOL.me', u'Hay enlaces del partido pero no son de acestream. Inténtalo más tarde...', time=self.__settings['notify_secs']) # Guarda los eventos en caché cache.save(e_url, channels) return channels
def get_channels(self, event_url): """ Get LiveFootballVideo channels by a given event URL :param event_url: The event URL :type: event_url: str :return: The list of LiveFootballVideo event links :rtype: list """ cache = Cache(self.__settings['path'], minutes=10) # Busca los canales del evento en caché channels = cache.load(event_url, True) if channels: return channels # Los datos de los canales no están en cache # Vuelve a obtenerlos channels = [] # GET event_url page = tools.get_web_page(event_url) # Obtiene el bloque que contiene la tabla de enlaces acestream soup = BeautifulSoup(page, 'html5lib') div = soup.find('div', attrs={'id': 'livelist'}) if not div: raise WebSiteError( u'No hay enlaces', u'Los de LiveFootballVideo han hecho cambios en la Web', time=self.__settings['notify_secs']) # Obtiene la tabla de datos de los canales table = div.find('table', attrs={'class': 'streamtable'}) if not table: # No hay enlaces match = re.findall(r'class="thick">(.*)</h3>', page, re.U) raise WebSiteError( u'%s - %s' % (match[0], match[1]) if match else u'LiveFootballVideo.com', u'El partido ya ha terminado, no hay enlaces' if re.findall( r'game was ended', page, re.U) else u'Todavía no se han publicado los enlaces del partido', time=self.__settings['notify_secs']) # Obtiene los datos de los canales for row in table.findAll("tr")[1:-1]: cells = row.findAll("td") # Datos del canal ch_name = tools.str_sanitize(cells[1].get_text()) ch_type = tools.str_sanitize(cells[0].find('a').get('title')) ch_lang = tools.str_sanitize(cells[2].get_text()) ch_rate = tools.str_sanitize(cells[3].get_text()) ch_link = tools.str_sanitize(cells[4].find('a').get('href')) if ch_link.startswith('http://arenavision.'): ch_link = '%s%s' % (Arenavision.web_url, re.findall(r'http://.*/([0-3][0-9]).*', ch_link, re.U)[0]) # Si no es un enlace acestream continua if not tools.str_sanitize(ch_type).lower( ) == 'acestream' and 'acestream' not in ch_link: continue # Prepara el idioma ch_lang = '--' if not ch_lang or '-' in ch_lang else ch_lang # Prepara el bitrate ch_rate = 'desconocido' if not ch_rate or '-' in ch_rate else ch_rate channels.append({ 'name': self.__get_channel_name(ch_name, ch_rate, ch_lang), 'icon': art.get_channel_icon(ch_name, self.__settings['path']), 'fanart': tools.build_path(self.__settings['path'], 'lfv_art.jpg'), 'link': ch_link }) if len(channels) == 0: match = re.findall(r'class="thick">(.*)</h3>', page, re.U) raise WebSiteError( u'%s - %s' % (match[0], match[1]) if match else u'LiveFootballVideo.com', u'Hay enlaces del partido pero no son de acestream. Inténtalo más tarde...', time=self.__settings['notify_secs']) # Guarda los eventos en caché cache.save(event_url, channels) return channels