def get_channels(self, url):
        cache = Cache(self.__settings['path'], minutes=180)
        epg = EPG(self.__settings)

        # Busca los canales en cache
        channels = cache.load(url)
        if channels:
            # Actualiza la EPG de los canales
            epg.update_metadata(channels)
            return channels

        # No están en cache, los obtiene
        channels = []

        # GET url
        page = tools.get_web_page(url)

        # Obtiene los nombres y urls de los canales
        chs = re.findall(r'#EXTINF:.*,(.*)<br\s/>\s(http[s]?://.*)<', page,
                         re.U)
        if not chs:
            raise WebSiteError(
                u'Problema en Movistar+',
                u'No se han encontrado canales en la lista seleccionada',
                time=self.__settings['notify_secs'])

        # Añade los canales encontrados a la lista
        for ch in chs:
            ch_name = tools.str_sanitize(ch[0])
            ch_link = tools.str_sanitize(ch[1])
            if not (ch_link.endswith('.m3u8') or ch_link.endswith('.m3u')):
                channels.append({
                    'name':
                    ch_name,
                    'video':
                    ch_link,
                    'icon':
                    tools.build_path(
                        self.__settings['path'],
                        'bein.png' if self.__sports else 'movistar.png'),
                    'fanart':
                    tools.build_path(
                        self.__settings['path'], 'bein_art.jpg'
                        if self.__sports else 'movistar_art.jpg')
                })

        if len(channels) == 0:
            raise WebSiteError(
                u'No hay canales',
                u'La lista no contiene canales que se puedan reproducir',
                time=self.__settings['notify_secs'])

        # Añade la EPG a los canales
        if not self.__sports:
            epg.add_metadata(channels)

        # Guarda los canales en caché y los devuelve
        cache.save(url, channels)
        return channels
    def get_menu(self):
        """
        Get MovistarTV channel lists

        :return: The list of MovistarTV channel lists
        :rtype: list
        """
        cache = Cache(self.__settings['path'], minutes=60)

        # Busca las listas de canales en cache
        ch_lists = cache.load(
            self.__sports_url if self.__sports else self.__channels_url)
        if ch_lists:
            return ch_lists

        # No están en cache, los obtiene
        ch_lists = []

        # GET http://iptv.filmover.com/category/spain/
        page = tools.get_web_page(
            self.__sports_url if self.__sports else self.__channels_url)

        # Busca todas URL de listas de canales
        # Una por día, la primera es la lista más reciente
        urls = re.findall(
            r'<h2\s*class="entry-tit.*le">\s*<a href="(.*)"\s*rel="bookmark">(.*)</a></h2>',
            page, re.U)
        if not (urls and type(urls) == list and len(urls) > 0):
            raise WebSiteError(u'Lista de canales no encontrada',
                               u'Los de Movistar+ han hecho cambios en la Web',
                               time=self.__settings['notify_secs'])

        for url in urls:
            ch_lists.append({
                'name':
                tools.str_sanitize(url[1]),
                'channel_url':
                tools.str_sanitize(url[0]),
                'icon':
                tools.build_path(
                    self.__settings['path'],
                    'bein.png' if self.__sports else 'movistar.png'),
                'fanart':
                tools.build_path(
                    self.__settings['path'],
                    'bein_art.jpg' if self.__sports else 'movistar_art.jpg')
            })

        if len(ch_lists) == 0:
            raise WebSiteError(
                u'Problema en Movistar+',
                u'No se han encontrado listas de canales en la Web',
                time=self.__settings['notify_secs'])

        cache.save(self.__sports_url if self.__sports else self.__channels_url,
                   ch_lists)
        return ch_lists
Beispiel #3
0
    def __get_all_events(self):
        """
        Get all Torrent-TV.ru events

        :return: The list of Torrent-TV.ru events
        :rtype: list
        """
        cache = Cache(self.__settings['path'])

        # Busca la agenda en cache
        events = cache.load(self.__agenda_url)
        if events:
            return events

        # No está en cache, la obtiene
        events = []

        # GET http://super-pomoyka.us.to/trash/ttv-list/ttv.json
        channels = tools.get_web_page(self.__agenda_url)

        # Busca todas las etiquetas name, url y cat
        # y las guarda en una lista de tuplas ('etiqueta', 'valor')
        data = re.findall(r'(name|url|cat)":"([^"]*)"', channels, re.U)
        if not (data and type(data) == list and len(data) > 0):
            raise WebSiteError(
                u'Lista de canales no encontrada',
                u'Los de TorrentTV.ru han hecho cambios en la Web',
                time=self.__settings['notify_secs'])

        # Recorre la lista de 3 en 3
        for x in range(0, len(data) / 3):
            name = data[x * 3][1]
            url = data[x * 3 + 1][1]
            cat = data[x * 3 + 2][1]
            events.append({'name': name, 'url': url, 'cat': cat})

        if len(events) == 0:
            raise WebSiteError(u'Problema en TorrentTV',
                               u'No hay canales o no hay enlaces en la Web',
                               time=self.__settings['notify_secs'])

        cache.save(self.__agenda_url, events)
        return events
Beispiel #4
0
    def __get_epg_data(self):
        """
        Get a list containing the EPG data
    
        :return: The list containing the EPG data
        :rtype: list
        """
        cache = Cache(self.__settings['path'], minutes=180)

        # Busca la EPG en cache
        epg = cache.load(self.__channels_epg, log_read_ok=False)
        if epg:
            return epg

        # No está en cache, la obtiene
        epg = []

        # GET url
        try:
            page = tools.get_web_page(self.__channels_epg)
        except WebSiteError as e:
            tools.write_log('%s: %s' % (e.title, e.message))
            return epg

        # Busca los datos en formato json
        data = re.findall(r'data-json="(.*)"', page, re.U)
        if not data:
            tools.write_log('data-json= not found in %s' % self.__channels_epg)
            return epg

        # Carga y guarda la EPG
        try:
            epg = json.loads(tools.str_sanitize(data[0]))['channels']
            cache.save(self.__channels_epg, epg)
        except (ValueError, IndexError, KeyError):
            tools.write_log('Malformed data-json= in %s' % self.__channels_epg)

        return epg
Beispiel #5
0
    def get_all_events(self):
        """
        Get all Arenavision events

        :return: The list of Arenavision events
        :rtype: list
        """
        cache = Cache(self.__settings['path'])

        # Busca la URI de la agenda y los enlaces de los canales en caché
        page = cache.load(self.web_url, False)
        if page:
            # La URI de la agenda está en caché, busca también los eventos
            events = cache.load(page['agenda'])
            if events:
                for event in events:
                    event['name'] = self.__get_event_name(
                        event['event'], event['date'], event['time'],
                        event['competition'])
                return events

        # La URI de la agenda y los enlaces no están en cache
        # Vuelve a obtener la agenda, los enlaces a los canales y los eventos
        events = []

        # GET arenavision.in
        page = tools.get_web_page(self.web_url)

        # Averigua la URI de la agenda y los enlaces de los canales
        # buscando en todas las URL de la página principal:
        # 'av*1' para los canales AV1, AV2...
        # 'sc' para la agenda
        urls = self.__get_urls(page)
        if not urls:
            raise WebSiteError(
                u'Agenda no encontrada',
                u'Los de Arenavision han hecho cambios en la Web',
                time=self.__settings['notify_secs'])

        # Guarda la URI de la agenda y los enlaces de los canales en caché
        cache.save(self.web_url, urls)

        # GET agenda
        agenda = tools.get_web_page(urls['agenda'])

        # Obtiene la tabla de eventos
        soup = BeautifulSoup(agenda, 'html5lib')
        table = soup.find('table', attrs={'class': 'auto-style1'})

        for row in table.findAll("tr")[1:-2]:
            cells = row.findAll("td")
            links = self.__get_links(tools.str_sanitize(cells[5].get_text()),
                                     urls['channels'])
            if links and len(links) > 0:
                time_e = re.findall(r'([0-2][0-9]:[0-5][0-9])',
                                    cells[1].get_text(), re.U)
                time_e = time_e[0] if time_e else '00:00'
                competition_art = self.__get_competition_art(
                    cells[2].get_text(), cells[3].get_text())
                events.append({
                    'date':
                    tools.str_sanitize(cells[0].get_text()),
                    'time':
                    tools.str_sanitize(time_e),
                    'sport':
                    tools.str_sanitize(cells[2].get_text()),
                    'competition':
                    tools.str_sanitize(cells[3].get_text()),
                    'event':
                    tools.str_sanitize(cells[4].get_text()),
                    'channels':
                    links,
                    'name':
                    self.__get_event_name(
                        tools.str_sanitize(cells[4].get_text()),
                        tools.str_sanitize(cells[0].get_text()),
                        tools.str_sanitize(time_e),
                        tools.str_sanitize(cells[3].get_text())),
                    'icon':
                    competition_art['icon'],
                    'fanart':
                    competition_art['fanart']
                })

        if len(events) == 0:
            raise WebSiteError(u'Problema en la agenda',
                               u'No hay eventos, ve a la Web y compruébalo',
                               time=self.__settings['notify_secs'])

        # Guarda los eventos en caché
        cache.save(urls['agenda'], events)

        return events
Beispiel #6
0
    def __get_channel_data(cache, url):
        """
        Get channel data for an URL

        :param url: The channel URL
        :type: url: str
        :return: The Acestream channel data
        :rtype: dict
        """
        # Busca los datos del canal en caché
        channel_data = cache.load(url, True)
        if channel_data:
            return channel_data

        # Los datos del canal no están en cache
        # Vuelve a obtenerlos

        # GET url
        page = tools.get_web_page(url)

        # Obtiene la tabla de canales
        soup = BeautifulSoup(page, 'html5lib')
        table = soup.find('table', attrs={'class': 'uk-table'})

        # Datos del canal
        ch_name = ''
        ch_sign = ''
        ch_rate = ''
        ch_links = []

        # Obtiene los datos del canal
        for row in table.findAll("tr"):
            cells = row.findAll("td")
            cell_0 = tools.str_sanitize(cells[0].get_text())
            if len(cells) == 2:
                if 'Name' in cell_0:
                    ch_name = tools.str_sanitize(cells[1].get_text())
                elif 'Bitrate' in cell_0:
                    ch_rate = tools.str_sanitize(cells[1].get_text())
                elif 'Signal' in cell_0:
                    ch_sign = tools.str_sanitize(cells[1].get_text())
            elif 'acestream://' in cell_0:
                hashes = re.findall(
                    r'[acestrm:/]*([0-9a-f]{40})',
                    tools.str_sanitize(cells[0].find('a').get('href')), re.U)
                if hashes:
                    ch_links.append({
                        'hash': hashes[0],
                        'hd': '(HD)' in cell_0
                    })

        if len(ch_links) == 0:
            return None

        channel_data = {
            'name': ch_name,
            'bitrate': ch_rate,
            'signal': ch_sign,
            'links': ch_links
        }

        # Guarda los datos del canal en caché
        cache.save(url, channel_data)
        return channel_data
Beispiel #7
0
    def get_channels(self, event_url):
        """
        Get LiveFootbalLOL channels by a given event URL

        :param event_url: The event URL
        :type: event_url: str
        :return: The list of LiveFootbalLOL event links
        :rtype: list
        """
        cache = Cache(self.__settings['path'], minutes=10)

        # Monta la URL del evento
        e_url = '%s%s' % (self.__web_url[:-1] if event_url.startswith('/') else
                          self.__web_url, event_url)

        # Busca los canales del evento en caché
        channels = cache.load(e_url, True)
        if channels:
            return channels

        # Los datos de los canales no están en cache
        # Vuelve a obtenerlos
        channels = []

        # GET e_url
        page = tools.get_web_page(e_url)

        # Busca la jornada
        # match_week = re.findall(r'[Mm][Aa][Tt][Cc][Hh]\s[Ww][Ee]{2}[Kk]</td>\s*<td>([0-9]+)</td>', page, re.U)

        # Obtiene la tabla de datos de los canales
        soup = BeautifulSoup(page, 'html5lib')
        table = soup.find(
            'table',
            attrs={'class': 'uk-table uk-table-hover uk-table-striped'})

        # Obtiene los datos de los canales
        prev_lang = None
        for row in table.findAll("tr")[2:]:
            cells = row.findAll("td")

            # Obtiene los datos generales del canal
            ch_name = tools.str_sanitize(cells[1].get_text())
            ch_lang = tools.str_sanitize(cells[0].get_text())

            # ¿Hay ya enlaces?
            if 'will be here' in ch_name:
                match = re.findall(
                    r'[Mm][Aa][Tt][Cc][Hh]</td>\s*<td><strong>(.*)</strong></td>',
                    page, re.U)
                if len(channels) > 0:
                    break
                else:
                    raise WebSiteError(
                        match[0] if match else u'LiveFootbalLOL',
                        u'Todavía no se han publicado los enlaces del partido',
                        time=self.__settings['notify_secs'])

            # Si no es un enlace acestream continua
            ch_link = tools.str_sanitize(cells[1].find('a').get('href'))
            if not ch_link or 'acestream' not in ch_name.lower():
                continue

            # Obtiene el idioma
            if not ch_lang or not re.findall(r'(\[[A-Z]{2}\])', ch_lang, re.U):
                ch_lang = prev_lang if prev_lang else '[--]'
            prev_lang = ch_lang if ch_lang else '[--]'

            # Obtiene los datos extendidos y los hashlinks del canal
            channel_data = self.__get_channel_data(cache, ch_link)
            if channel_data:
                for link in channel_data['links']:
                    channels.append({
                        'name':
                        self.__get_channel_name(channel_data['name'],
                                                channel_data['bitrate'],
                                                link['hd'], ch_lang),
                        'icon':
                        art.get_channel_icon(channel_data['name'],
                                             self.__settings['path']),
                        'fanart':
                        tools.build_path(self.__settings['path'],
                                         'lfol_art.jpg'),
                        'hash':
                        link['hash']
                    })

        if len(channels) == 0:
            match = re.findall(
                r'[Mm][Aa][Tt][Cc][Hh]</td>\s*<td><strong>(.*)</strong></td>',
                page, re.U)
            raise WebSiteError(
                u'%s' % (match[0]) if match else u'LiveFootbalLOL.me',
                u'Hay enlaces del partido pero no son de acestream. Inténtalo más tarde...',
                time=self.__settings['notify_secs'])

        # Guarda los eventos en caché
        cache.save(e_url, channels)

        return channels
Beispiel #8
0
    def get_all_events(self):
        """
        Get all LiveFootbalLOL events

        :return: The list of LiveFootbalLOL events
        :rtype: list
        """
        cache = Cache(self.__settings['path'])

        # Busca la URI de la agenda y los enlaces de los canales en caché
        page = cache.load(self.__web_url, False)
        if page:
            # La URI de la agenda está en caché, busca también los eventos
            events = cache.load(page['agenda'])
            if events:
                for event in events:
                    event['name'] = self.__get_event_name(
                        event['event'], event['date'], event['time'],
                        event['competition'])
                return events

        # La URI de la agenda no está en cache
        # Vuelve a obtener la agenda y los eventos
        events = []

        # GET livefootballol.in
        page = tools.get_web_page(self.__web_url)

        # Averigua la URI de la agenda
        urls = self.__get_urls(page)
        if not urls:
            raise WebSiteError(
                u'Agenda no encontrada',
                u'Los de LiveFootbalLOL han hecho cambios en la Web',
                time=self.__settings['notify_secs'])

        # Guarda la URI de la agenda en caché
        cache.save(self.__web_url, urls)

        # GET agenda
        agenda = tools.get_web_page(urls['agenda'])

        # Obtiene la tabla de eventos
        a_events = re.findall(
            r'([0-9]{1,2}:[0-9]{2})\s*<a href=[\'"]?(/streaming/(.*)/[0-9]{2}-[0-9]{2}-[0-9]{4}-.*)[\'"]>(.*)</a>',
            agenda, re.U)

        # Obtiene las ligas
        a_leagues = re.findall(
            r'<b>(.*)</b></li>\s*<li>[0-9]{1,2}:[0-9]{2}\s*'
            r'<a href=[\'"]?/streaming/(.*)/[0-9]{2}-[0-9]{2}-[0-9]{4}-.*[\'"]>',
            agenda, re.U)

        for a_event in a_events:
            league = self.__get_competition_name(a_event[2], a_leagues)
            competition_art = self.__get_competition_art(league)
            c_date = re.findall(r'([0-9]{2}-[0-9]{2}-[0-9]{4})-',
                                tools.str_sanitize(a_event[1]), re.U)
            if c_date:
                events.append({
                    'date':
                    c_date[0],
                    'time':
                    tools.str_sanitize(a_event[0]),
                    'competition':
                    tools.str_sanitize(league),
                    'event':
                    tools.str_sanitize(a_event[3]),
                    'channel_url':
                    a_event[1],
                    'name':
                    self.__get_event_name(tools.str_sanitize(a_event[3]),
                                          c_date[0],
                                          tools.str_sanitize(a_event[0]),
                                          tools.str_sanitize(league)),
                    'icon':
                    competition_art['icon'],
                    'fanart':
                    competition_art['fanart']
                })

        if len(events) == 0:
            raise WebSiteError(
                u'Problema en la agenda',
                u'Está vacía o no hay enlaces, puedes comprobarlo en la Web',
                time=self.__settings['notify_secs'])

        # Guarda los eventos en caché
        cache.save(urls['agenda'], events)

        return events
Beispiel #9
0
    def get_all_events(self):
        """
        Get all LiveFootballVideo events

        :return: The list of LiveFootballVideo events
        :rtype: list
        """
        cache = Cache(self.__settings['path'])
        agenda_url = '%sstreaming' % self.__web_url

        # Busca los eventos en cache
        events = cache.load(agenda_url)
        if events:
            for event in events:
                event['name'] = self.__get_event_name(event)
            return events

        # Los eventos no están en cache, vuelve a obtenerlos
        web_events = []
        events = []

        # GET livefootballvideo.com/streaming
        page = tools.get_web_page(agenda_url)

        # Obtiene el número de páginas de la agenda
        total_pages = self.__get_number_of_pages(page)
        if not total_pages:
            total_pages = 1

        # Obtiene la tabla de eventos
        for page_number in range(0, total_pages):
            # GET livefootballvideo.com/streaming/page/{page_number}
            page = tools.get_web_page(
                '%s/page/%i' %
                (agenda_url, page_number + 1)) if page_number > 0 else page
            e = re.findall(
                '<li\s*(?:class="odd")?>\s*<div\s*class="leaguelogo\s*column">\s*<img.+?src=".+?"\s*alt=".+?"/>'
                +
                '\s*</div>\s*<div\s*class="league\s*column">\s*<a\s*href=".+?"\s*title=".+?">(.+?)</a>\s*</div>'
                +
                '\s*<div\s*class="date_time\s*column"><span\s*class="starttime\s*time"\s*rel="(.+?)">.+?</span>'
                +
                '\s*-\s*<span\s*class="endtime\s*time"\s*rel="(.+?)">.+?</span></div>\s*<div\s*class="team'
                +
                '\s*column"><img.+?alt="(.+?)"\s*src=".+?"><span>.+?</span></div>\s*<div\s*class="versus'
                +
                '\s*column">vs.</div>\s*<div\s*class="team\s*away\s*column"><span>(.+?)</span><img.+?alt=".+?"'
                +
                '\s*src=".+?"></div>\s*<div\s*class="live_btn\s*column">\s*<a\s*(class="online")?\s*href="(.+?)">',
                page)
            if e:
                web_events.extend(map(list, e))

        if len(web_events) == 0:
            raise WebSiteError(u'Problema en la agenda',
                               u'No hay eventos, puedes comprobarlo en la Web',
                               time=self.__settings['notify_secs'])

        for event in web_events:
            competition = tools.str_sanitize(event[0])
            competition_art = self.__get_competition_art(event[0])
            start = float(tools.str_sanitize(event[1]))
            end = float(tools.str_sanitize(event[2]))
            team1 = tools.str_sanitize(event[3])
            team2 = tools.str_sanitize(event[4])
            events.append({
                'start':
                start,
                'end':
                end,
                'date':
                datetime.datetime.fromtimestamp(start).strftime('%d/%m/%y'),
                'time':
                datetime.datetime.fromtimestamp(start).strftime('%H:%M'),
                'competition':
                competition,
                'team1':
                team1,
                'team2':
                team2,
                'channel_url':
                tools.str_sanitize(event[6]),
                'name':
                self.__get_event_name({
                    'start': start,
                    'end': end,
                    'competition': competition,
                    'team1': team1,
                    'team2': team2
                }),
                'icon':
                competition_art['icon'],
                'fanart':
                competition_art['fanart']
            })

        if len(events) == 0:
            raise WebSiteError(
                u'Problema en la agenda',
                u'Está vacía o no hay enlaces, puedes comprobarlo en la Web',
                time=self.__settings['notify_secs'])

        # Guarda los eventos en caché
        cache.save(agenda_url, events)

        return events
Beispiel #10
0
    def get_channels(self, event_url):
        """
        Get LiveFootballVideo channels by a given event URL

        :param event_url: The event URL
        :type: event_url: str
        :return: The list of LiveFootballVideo event links
        :rtype: list
        """
        cache = Cache(self.__settings['path'], minutes=10)

        # Busca los canales del evento en caché
        channels = cache.load(event_url, True)
        if channels:
            return channels

        # Los datos de los canales no están en cache
        # Vuelve a obtenerlos
        channels = []

        # GET event_url
        page = tools.get_web_page(event_url)

        # Obtiene el bloque que contiene la tabla de enlaces acestream
        soup = BeautifulSoup(page, 'html5lib')
        div = soup.find('div', attrs={'id': 'livelist'})
        if not div:
            raise WebSiteError(
                u'No hay enlaces',
                u'Los de LiveFootballVideo han hecho cambios en la Web',
                time=self.__settings['notify_secs'])

        # Obtiene la tabla de datos de los canales
        table = div.find('table', attrs={'class': 'streamtable'})
        if not table:
            # No hay enlaces
            match = re.findall(r'class="thick">(.*)</h3>', page, re.U)
            raise WebSiteError(
                u'%s - %s' %
                (match[0], match[1]) if match else u'LiveFootballVideo.com',
                u'El partido ya ha terminado, no hay enlaces' if re.findall(
                    r'game was ended', page, re.U) else
                u'Todavía no se han publicado los enlaces del partido',
                time=self.__settings['notify_secs'])

        # Obtiene los datos de los canales
        for row in table.findAll("tr")[1:-1]:
            cells = row.findAll("td")

            # Datos del canal
            ch_name = tools.str_sanitize(cells[1].get_text())
            ch_type = tools.str_sanitize(cells[0].find('a').get('title'))
            ch_lang = tools.str_sanitize(cells[2].get_text())
            ch_rate = tools.str_sanitize(cells[3].get_text())
            ch_link = tools.str_sanitize(cells[4].find('a').get('href'))

            if ch_link.startswith('http://arenavision.'):
                ch_link = '%s%s' % (Arenavision.web_url,
                                    re.findall(r'http://.*/([0-3][0-9]).*',
                                               ch_link, re.U)[0])

            # Si no es un enlace acestream continua
            if not tools.str_sanitize(ch_type).lower(
            ) == 'acestream' and 'acestream' not in ch_link:
                continue

            # Prepara el idioma
            ch_lang = '--' if not ch_lang or '-' in ch_lang else ch_lang

            # Prepara el bitrate
            ch_rate = 'desconocido' if not ch_rate or '-' in ch_rate else ch_rate

            channels.append({
                'name':
                self.__get_channel_name(ch_name, ch_rate, ch_lang),
                'icon':
                art.get_channel_icon(ch_name, self.__settings['path']),
                'fanart':
                tools.build_path(self.__settings['path'], 'lfv_art.jpg'),
                'link':
                ch_link
            })

        if len(channels) == 0:
            match = re.findall(r'class="thick">(.*)</h3>', page, re.U)
            raise WebSiteError(
                u'%s - %s' %
                (match[0], match[1]) if match else u'LiveFootballVideo.com',
                u'Hay enlaces del partido pero no son de acestream. Inténtalo más tarde...',
                time=self.__settings['notify_secs'])

        # Guarda los eventos en caché
        cache.save(event_url, channels)

        return channels