Exemple #1
0
    def getheadlines(self, arg=None, menuw=None):
        headlines = []
        rawheadlines = []
        rawheadlines = self.getsiteheadlines()
        for title, link, description in rawheadlines:
            mi = menu.MenuItem('%s' % title, self.show_details, 0)
            mi.arg = mi
            mi.link = link

            description = description.replace('\n\n', '&#xxx;').replace('\n', ' ').\
                          replace('&#xxx;', '\n')
            description = description.replace('<p>',
                                              '\n').replace('<br>', '\n')
            description = description.replace('<p>',
                                              '\n').replace('<br/>', '\n')
            description = description + '\n \n \nLink: ' + link
            description = unicode(
                BeautifulStoneSoup(
                    description,
                    convertEntities=BeautifulStoneSoup.HTML_ENTITIES))
            description = util.htmlenties2txt(description, 'unicode')

            mi.description = re.sub('<.*?>', '', description)

            headlines.append(mi)

        if (len(headlines) == 0):
            headlines += [
                menu.MenuItem(_('No Headlines found'), menuw.back_one_menu, 0)
            ]

        headlines_menu = menu.Menu(_('Headlines'), headlines)
        menuw.pushmenu(headlines_menu)
        menuw.refresh()
Exemple #2
0
    def getheadlines(self, arg=None, menuw=None):
        """
        this returns a menu with VideoItems for each headline
        """
        headlines = []
        rawheadlines = []
        rawheadlines = self.getsiteheadlines()
        for title, link, description in rawheadlines:
            # create a VideoItem for each headline
            mi = VideoItem(link, self.parent)
            mi.name = title

            description = description.replace('\n\n', '&#xxx;').replace('\n', ' ').\
                          replace('&#xxx;', '\n')
            description = description.replace('<p>', '\n').replace('<br>', '\n')
            description = description.replace('<p>', '\n').replace('<br/>', '\n')
            description = description + '\n \n \nLink: ' + link
            description = util.htmlenties2txt(description)

            mi.plot = re.sub('<.*?>', '', description)

            headlines.append(mi)

        # create the menu
        if (len(headlines) == 0):
            headlines += [menu.MenuItem(_('No Headlines found'), menuw.back_one_menu, 0)]

        headlines_menu = menu.Menu(_('Headlines'), headlines)
        menuw.pushmenu(headlines_menu)
        menuw.refresh()
Exemple #3
0
    def getheadlines(self, arg=None, menuw=None):
        headlines = []
        rawheadlines = []
        rawheadlines = self.getsiteheadlines()
        for title, link, description in rawheadlines:
            mi = menu.MenuItem('%s' % title, self.show_details, 0)
            mi.arg = mi
            mi.link = link

            description = description.replace('\n\n', '&#xxx;').replace('\n', ' ').\
                          replace('&#xxx;', '\n')
            description = description.replace('<p>', '\n').replace('<br>', '\n')
            description = description.replace('<p>', '\n').replace('<br/>', '\n')
            description = description + '\n \n \nLink: ' + link
            description = unicode(BeautifulStoneSoup(description, convertEntities=BeautifulStoneSoup.HTML_ENTITIES))
            description = util.htmlenties2txt(description, 'unicode')

            mi.description = re.sub('<.*?>', '', description)

            headlines.append(mi)


        if (len(headlines) == 0):
            headlines += [menu.MenuItem(_('No Headlines found'), menuw.back_one_menu, 0)]

        headlines_menu = menu.Menu(_('Headlines'), headlines)
        menuw.pushmenu(headlines_menu)
        menuw.refresh()
Exemple #4
0
 def video_list(parent, title, user):
     """Get the video list for a specific user"""
     _debug_('video_list(parent=%r, title=%r, user=%r)' % (parent, title, user), 2)
     items = []
     feed = 'http://gdata.youtube.com/feeds/users/' + user + '/uploads?orderby=updated'
     service = gdata.service.GDataService(server='gdata.youtube.com')
     box = PopupBox(text=_('Loading video list'))
     box.show()
     for video in service.GetFeed(feed).entry:
         date = video.published.text.split('T')
         if video.link[1].href.find('watch?v=') >= 0:
             id = video.link[1].href.split('watch?v=');
         elif video.link[0].href.find('watch?v=') >= 0:
             id = video.link[0].href.split('watch?v=');
         else:
             continue
         mi = menu.MenuItem(date[0] + ' ' + video.title.text, parent.downloadvideo, id[1])
         mi.arg = (video.title.text, id[1])
         text = util.htmlenties2txt(video.content)
         mi.description = re.search('<span>([^\<]*)<',text).group(1)
         tempimage = re.search('src="([^\"]*)"',text).group(1)
         file = config.YOUTUBE_DIR + '/' + id[1].replace('-','_') + '.jpg'
         if not os.path.exists(file):
             aimage = urllib.urlretrieve(tempimage,file)
         mi.image = file
         items.append(mi)
     box.destroy()
     return items
Exemple #5
0
    def getheadlines(self, arg=None, menuw=None):
        headlines = []
        rawheadlines = []
        rawheadlines = self.getsiteheadlines()
        for title, link, description in rawheadlines:
            mi = menu.MenuItem('%s' % title, self.show_details, 0)
            mi.arg = (mi, menuw)
            mi.link = link

            description = description.replace('\n\n', '&#xxx;').replace('\n', ' ').\
                          replace('&#xxx;', '\n')
            description = description.replace('<p>',
                                              '\n').replace('<br>', '\n')
            description = description.replace('<p>',
                                              '\n').replace('<br/>', '\n')
            description = description + '\n \n \nLink: ' + link
            description = util.htmlenties2txt(description)

            mi.description = re.sub('<.*?>', '', description)

            headlines.append(mi)

        if (len(headlines) == 0):
            headlines += [
                menu.MenuItem(_('No Headlines found'), menuw.goto_prev_page, 0)
            ]

        headlines_menu = menu.Menu(_('Headlines'), headlines)
        rc.app(None)
        menuw.pushmenu(headlines_menu)
        menuw.refresh()
Exemple #6
0
    def filmaffinity_search(self, arg=None, menuw=None):
        """
        search filmaffinity for this item
        """
        box = PopupBox(text=_('Searching in FilmAffinity...'))
        box.show()

        items = []

        try:
            duplicates = []
            if self.disc_set:
                self.searchstring = self.item.media.label
            else:
                self.searchstring = self.item.name

            for id, name, year in self.guessFilmAffinity(
                    self.searchstring, self.disc_set):
                try:
                    uname = Unicode(name)
                    for i in self.item.parent.play_items:
                        if i.name == uname:
                            if not i in duplicates:
                                duplicates.append(i)
                except:
                    pass
                items.append(
                    menu.MenuItem('%s (%s)' % (htmlenties2txt(name), year),
                                  self.filmaffinity_create_fxd, (id, year)))
        except:
            box.destroy()
            box = PopupBox(text=_(
                'Connection error: Probably connection timeout, try again'))
            box.show()
            time.sleep(2)
            box.destroy()
            traceback.print_exc()
            return

        box.destroy()
        if config.FILMAFFINITY_AUTOACCEPT_SINGLE_HIT and len(items) == 1:
            self.filmaffinity_create_fxd(arg=items[0].arg, menuw=menuw)
            return

        if items:
            moviemenu = menu.Menu(_('FILMAFFINITY Query'), items)
            menuw.pushmenu(moviemenu)
            return

        box = PopupBox(text=_('No info available'))
        box.show()
        time.sleep(2)
        box.destroy()
        return
Exemple #7
0
    def moviecovers_search(self, arg=None, menuw=None):
        """
        search moviecovers for this item
        """
        box = PopupBox(text=_('Searching Moviecovers.com...'))
        box.show()

        items = []

        try:
            duplicates = []
            if self.disc_set:
                self.searchstring = self.item.media.label
            else:
                self.searchstring = self.item.name

            for id, name, year, type, origin in self.guessMoviecovers(
                    self.searchstring, self.disc_set):
                try:
                    for i in self.item.parent.play_items:
                        if i.name == name:
                            if not i in duplicates:
                                duplicates.append(i)
                except:
                    pass
                items.append(
                    menu.MenuItem(
                        '%s (%s, %s)' % (htmlenties2txt(name), year, type),
                        self.moviecovers_create_fxd, (id, origin)))
        except:
            box.destroy()
            box = PopupBox(text=_(
                'Connection error : Probably connection timeout, try again'))
            box.show()
            time.sleep(2)
            box.destroy()
            traceback.print_exc()
            return

        box.destroy()
        if config.MOVIECOVERS_AUTOACCEPT_SINGLE_HIT and len(items) == 1:
            self.moviecovers_create_fxd(arg=items[0].arg, menuw=menuw)
            return

        if items:
            moviemenu = menu.Menu(_('MOVIECOVERS Query'), items)
            menuw.pushmenu(moviemenu)
            return

        box = PopupBox(text=_('No information available from Moviecovers.com'))
        box.show()
        time.sleep(2)
        box.destroy()
        return
Exemple #8
0
    def filmaffinity_search(self, arg=None, menuw=None):
        """
        search filmaffinity for this item
        """
        box = PopupBox(text=_('Searching in FilmAffinity...'))
        box.show()

        items = []

        try:
            duplicates = []
            if self.disc_set:
                self.searchstring = self.item.media.label
            else:
                self.searchstring = self.item['title']

            for id, name, year in self.guessFilmAffinity(self.searchstring, self.disc_set):
                try:
                    uname = Unicode(name)
                    for i in self.item.parent.play_items:
                        if i.name == uname:
                            if not i in duplicates:
                                duplicates.append(i)
                except:
                    pass
                items.append(menu.MenuItem('%s (%s)' % (htmlenties2txt(name), year),
                    self.filmaffinity_create_fxd, (id, year)))
        except:
            box.destroy()
            box = PopupBox(text=_('Connection error: Probably connection timeout, try again'))
            box.show()
            time.sleep(2)
            box.destroy()
            traceback.print_exc()
            return

        box.destroy()
        if config.FILMAFFINITY_AUTOACCEPT_SINGLE_HIT and len(items) == 1:
            self.filmaffinity_create_fxd(arg=items[0].arg, menuw=menuw)
            return

        if items:
            moviemenu = menu.Menu(_('FILMAFFINITY Query'), items)
            menuw.pushmenu(moviemenu)
            return

        box = PopupBox(text=_('No info available'))
        box.show()
        time.sleep(2)
        box.destroy()
        return
Exemple #9
0
    def moviecovers_search(self, arg=None, menuw=None):
        """
        search moviecovers for this item
        """
        box = PopupBox(text=_('Searching Moviecovers.com...'))
        box.show()

        items = []

        try:
            duplicates = []
            if self.disc_set:
                self.searchstring = self.item.media.label
            else:
                self.searchstring = self.item.name

            for id,name,year,type,origin in self.guessMoviecovers(self.searchstring, self.disc_set):
                try:
                    for i in self.item.parent.play_items:
                        if i.name == name:
                            if not i in duplicates:
                                duplicates.append(i)
                except:
                    pass
                items.append(menu.MenuItem('%s (%s, %s)' % (htmlenties2txt(name), year, type),
                    self.moviecovers_create_fxd, (id, origin)))
        except:
            box.destroy()
            box = PopupBox(text=_('Connection error : Probably connection timeout, try again'))
            box.show()
            time.sleep(2)
            box.destroy()
            traceback.print_exc()
            return

        box.destroy()
        if config.MOVIECOVERS_AUTOACCEPT_SINGLE_HIT and len(items) == 1:
            self.moviecovers_create_fxd(arg=items[0].arg, menuw=menuw)
            return

        if items:
            moviemenu = menu.Menu(_('MOVIECOVERS Query'), items)
            menuw.pushmenu(moviemenu)
            return

        box = PopupBox(text=_('No information available from Moviecovers.com'))
        box.show()
        time.sleep(2)
        box.destroy()
        return
Exemple #10
0
    def imdb_search(self, arg=None, menuw=None):
        """
        search imdb for this item
        """
        fxd = FxdImdb()

        box = PopupBox(text=_('searching IMDB...'))
        box.show()

        items = []

        try:
            duplicates = []
            if self.disc_set:
                self.searchstring = self.item.media.label
            else:
                self.searchstring = self.item.name

            for id, name, year, type in fxd.guessImdb(self.searchstring,
                                                      self.disc_set):
                try:
                    for i in self.item.parent.play_items:
                        if i.name == name:
                            if not i in duplicates:
                                duplicates.append(i)
                except:
                    pass
                try:
                    items.append(
                        menu.MenuItem(
                            '%s (%s, %s)' % (htmlenties2txt(name), year, type),
                            self.imdb_create_fxd, (id, year)))
                except UnicodeError, e:
                    print e
        except Exception, e:
            print 'imdb_search:', e
            box.destroy()
            box = PopupBox(text=_('Unknown error while connecting to IMDB'))
            box.show()
            time.sleep(2)
            box.destroy()
            return
Exemple #11
0
 def __init__(self, video, id, parent):
     VideoItem.__init__(self, local_server.get_url('/youtube/%s' % id), parent)
     self.name = unicode(video.title.text)
     if video.content.type == "text" and video.content.text:
         self.description = unicode(video.content.text)
     elif video.content.type == "html":
         text = util.htmlenties2txt(unicode(video.content.text), 'unicode')
         match = re.search('<span>([^\<]*)<', text)
         if match:
             self.description = decodeAcute(match.group(1))
         else:
             self.description = text
         match = re.search('src="([^\"]*)"', text)
         if match:
             self.image = match.group(1)
     else:
         self.description = ""
     self.description += '\n' + _('User') + ': ' + video.author[0].name.text
     date = video.published.text.split('T')
     self.description += '. ' + date[0]
     self.plot = self.description
Exemple #12
0
 def __init__(self, video, id, parent):
     VideoItem.__init__(self, local_server.get_url('/youtube/%s' % id),
                        parent)
     self.name = unicode(video.title.text)
     if video.content.type == "text" and video.content.text:
         self.description = unicode(video.content.text)
     elif video.content.type == "html":
         text = util.htmlenties2txt(unicode(video.content.text), 'unicode')
         match = re.search('<span>([^\<]*)<', text)
         if match:
             self.description = decodeAcute(match.group(1))
         else:
             self.description = text
         match = re.search('src="([^\"]*)"', text)
         if match:
             self.image = match.group(1)
     else:
         self.description = ""
     self.description += '\n' + _('User') + ': ' + video.author[0].name.text
     date = video.published.text.split('T')
     self.description += '. ' + date[0]
     self.plot = self.description
    def cwd(self, arg=None, menuw=None):
        """
        Download the url and create a menu with more links
        """
        txdata = None
        txheaders = {
            'User-Agent': 'freevo %s (%s)' % (config.VERSION, sys.platform),
            'Accept-Language': 'en-us',
        }

        popup = PopupBox(text=_('Downloading link list...'))
        popup.show()
        try:
            req = urllib2.Request(self.url, txdata, txheaders)
            response = urllib2.urlopen(req)
        except:
            popup.destroy()
            box = AlertBox(text=_('Failed to download %s') % self.url)
            box.show()
            return

        # base for this url
        self.base = response.geturl()[:response.geturl().rfind('/') + 1]

        # normalize the text so that it can be searched
        all = ''
        for line in response.read().split('\n'):
            all += line + ' '
        all = all.replace('\r', '').replace('\t', ' ')

        # find names for links (text between <a>)
        name_map = {}
        m = re.compile('href="([^"]*)">([^<]*)</a>', re.I).findall(all)
        if m:
            for url, title in m:
                while title.find('  ') > 0:
                    title = title.replace('  ', ' ')
                title = util.htmlenties2txt(title.lstrip().rstrip())
                name_map[url] = title

        # now search for links, normal links and movie links together
        all_urls = []
        movie_regexp = re.compile('.*(mov|avi|mpg|asf)$', re.I)
        for m in (re.compile('href="(.*?)"', re.I).findall(all),
                  re.compile('"(http.[^"]*.(mov|avi|mpg|asf))"',
                             re.I).findall(all)):
            if m:
                for url in m:
                    if isinstance(url, tuple):
                        url = url[0]
                    all_urls.append(url)

        # now split all_urls into link_urls (more links) and
        # movie_urls (video)
        link_urls = []
        movie_urls = []

        if all_urls:
            for url in all_urls:
                long_url = self.make_complete_url(response.geturl(), url)

                # bad url?
                if not long_url:
                    continue

                # find a title
                title = url
                if name_map.has_key(url):
                    title = name_map[url]
                else:
                    title = title.replace('.html', '').replace('.php', '')

                # remove blacklisted urls
                for b in self.blacklist_regexp:
                    if b(long_url):
                        break
                else:
                    # movie or link?
                    if movie_regexp.match(long_url):
                        movie_urls.append((long_url, url, title))
                    else:
                        link_urls.append((long_url, url, title))

        items = []

        # add all link urls
        if link_urls:
            for long, short, title in link_urls:
                # should all links be displayed?
                if (not self.all_links) and long.find(self.base) != 0:
                    continue
                # don't display self
                if long == self.url:
                    continue
                # search for duplicate links
                for l in items:
                    if l.url == long:
                        # increase counter, this link seems to be
                        # important
                        l.counter += 1
                        break

                else:
                    # add link as new new
                    l = Link(title, long, self.blacklist_regexp, self.autoplay,
                             self.all_links, self)
                    l.url_name = short
                    l.image = None
                    items.append(l)

        # sort all items
        items.sort(lambda l, o: cmp(l.sort().upper(), o.sort().upper()))

        # add part of the url to the name in case a title is used for
        # more than one item
        for l in items:
            for o in items:
                if l.name == o.name and l.name.find('(') == -1 and not l == o:
                    # duplicate found, get last part of the url
                    url = l.url[l.url.rfind('/') + 1:]
                    if not url:
                        url = l.url[l.url[:-1].rfind('/') + 1:]
                    if url:
                        l.name = '%s (%s)' % (l.name, url)
                    # same for the other
                    url = o.url[o.url.rfind('/') + 1:]
                    if not url:
                        url = o.url[o.url[:-1].rfind('/') + 1:]
                    if url:
                        o.name = '%s (%s)' % (o.name, url)

        # now search for movies
        movies = []
        if movie_urls:
            for long, short, title in movie_urls:
                # search for duplicate links
                for l in movies:
                    if l.filename == long:
                        break
                else:
                    movies.append(VideoItem(long, self, parse=False))
                    if title.find('/') != -1:
                        title = 'Video: ' + long[long.rfind('/') + 1:]
                    movies[-1].name = title

        # all done
        popup.destroy()
        if len(movies) == 1 and arg == 'autoplay':
            movies[0].play(menuw=menuw)
        elif len(movies) == 1 and arg == 'autoplay_max':
            movies[0].play_max_cache(menuw=menuw)
        elif items or movies:
            menuw.pushmenu(menu.Menu(self.name, movies + items))
Exemple #14
0
    def cwd(self, arg=None, menuw=None):
        """
        Download the url and create a menu with more links
        """
        txdata = None
        txheaders = {
            'User-Agent': 'freevo %s (%s)' % (config.VERSION, sys.platform),
            'Accept-Language': 'en-us',
            }

        popup = PopupBox(text=_('Downloading link list...'))
        popup.show()
        try:
            req = urllib2.Request(self.url, txdata, txheaders)
            response = urllib2.urlopen(req)
        except:
            popup.destroy()
            box = AlertBox(text=_('Failed to download %s') % self.url)
            box.show()
            return

        # base for this url
        self.base = response.geturl()[:response.geturl().rfind('/')+1]

        # normalize the text so that it can be searched
        all = ''
        for line in response.read().split('\n'):
            all += line + ' '
        all = all.replace('\r', '').replace('\t', ' ')

        # find names for links (text between <a>)
        name_map = {}
        m = re.compile('href="([^"]*)">([^<]*)</a>', re.I).findall(all)
        if m:
            for url, title in m:
                while title.find('  ') > 0:
                    title = title.replace('  ', ' ')
                title = util.htmlenties2txt(title.lstrip().rstrip())
                name_map[url] = title


        # now search for links, normal links and movie links together
        all_urls = []
        movie_regexp = re.compile('.*(mov|avi|mpg|asf)$', re.I)
        for m in (re.compile('href="(.*?)"', re.I).findall(all),
                  re.compile('"(http.[^"]*.(mov|avi|mpg|asf))"', re.I).findall(all)):
            if m:
                for url in m:
                    if isinstance(url, tuple):
                        url = url[0]
                    all_urls.append(url)


        # now split all_urls into link_urls (more links) and
        # movie_urls (video)
        link_urls  = []
        movie_urls = []

        if all_urls:
            for url in all_urls:
                long_url  = self.make_complete_url(response.geturl(), url)

                # bad url?
                if not long_url:
                    continue

                # find a title
                title = url
                if name_map.has_key(url):
                    title = name_map[url]
                else:
                    title = title.replace('.html', '').replace('.php', '')

                # remove blacklisted urls
                for b in self.blacklist_regexp:
                    if b(long_url):
                        break
                else:
                    # movie or link?
                    if movie_regexp.match(long_url):
                        movie_urls.append((long_url, url, title))
                    else:
                        link_urls.append((long_url, url, title))



        items  = []

        # add all link urls
        if link_urls:
            for long, short, title in link_urls:
                # should all links be displayed?
                if (not self.all_links) and long.find(self.base) != 0:
                    continue
                # don't display self
                if long == self.url:
                    continue
                # search for duplicate links
                for l in items:
                    if l.url == long:
                        # increase counter, this link seems to be
                        # important
                        l.counter += 1
                        break

                else:
                    # add link as new new
                    l = Link(title, long, self.blacklist_regexp, self.autoplay,
                             self.all_links, self)
                    l.url_name = short
                    l.image = None
                    items.append(l)


        # sort all items
        items.sort(lambda l, o: cmp(l.sort().upper(),
                                    o.sort().upper()))

        # add part of the url to the name in case a title is used for
        # more than one item
        for l in items:
            for o in items:
                if l.name == o.name and l.name.find('(') == -1 and not l == o:
                    # duplicate found, get last part of the url
                    url = l.url[l.url.rfind('/')+1:]
                    if not url:
                        url = l.url[l.url[:-1].rfind('/')+1:]
                    if url:
                        l.name = '%s (%s)' % (l.name, url)
                    # same for the other
                    url = o.url[o.url.rfind('/')+1:]
                    if not url:
                        url = o.url[o.url[:-1].rfind('/')+1:]
                    if url:
                        o.name = '%s (%s)' % (o.name, url)

        # now search for movies
        movies = []
        if movie_urls:
            for long, short, title in movie_urls:
                # search for duplicate links
                for l in movies:
                    if l.filename == long:
                        break
                else:
                    movies.append(VideoItem(long, self, parse=False))
                    if title.find('/') != -1:
                        title = 'Video: ' + long[long.rfind('/')+1:]
                    movies[-1].name = title

        # all done
        popup.destroy()
        if len(movies) == 1 and arg=='autoplay':
            movies[0].play(menuw=menuw)
        elif len(movies) == 1 and arg=='autoplay_max':
            movies[0].play_max_cache(menuw=menuw)
        elif items or movies:
            menuw.pushmenu(menu.Menu(self.name, movies + items))