def download_play(self, arg=None, menuw=None): pop = PopupBox("Descargando programa") pop.show() video = VideoItem(_fetch_image(arg['flv']), self) pop.destroy() video.image = _fetch_image(arg['image']) video.menuw = menuw video.play()
def cwd(self, arg=None, menuw=None): """ Download the url and create a menu with more links """ txdata = None txheaders = { 'User-Agent': 'freevo %s (%s)' % (config.VERSION, sys.platform), 'Accept-Language': 'en-us', } popup = PopupBox(text=_('Downloading link list...')) popup.show() try: req = urllib2.Request(self.url, txdata, txheaders) response = urllib2.urlopen(req) except: popup.destroy() box = AlertBox(text=_('Failed to download %s') % self.url) box.show() return # base for this url self.base = response.geturl()[:response.geturl().rfind('/') + 1] # normalize the text so that it can be searched all = '' for line in response.read().split('\n'): all += line + ' ' all = all.replace('\r', '').replace('\t', ' ') # find names for links (text between <a>) name_map = {} m = re.compile('href="([^"]*)">([^<]*)</a>', re.I).findall(all) if m: for url, title in m: while title.find(' ') > 0: title = title.replace(' ', ' ') title = util.htmlenties2txt(title.lstrip().rstrip()) name_map[url] = title # now search for links, normal links and movie links together all_urls = [] movie_regexp = re.compile('.*(mov|avi|mpg|asf)$', re.I) for m in (re.compile('href="(.*?)"', re.I).findall(all), re.compile('"(http.[^"]*.(mov|avi|mpg|asf))"', re.I).findall(all)): if m: for url in m: if isinstance(url, tuple): url = url[0] all_urls.append(url) # now split all_urls into link_urls (more links) and # movie_urls (video) link_urls = [] movie_urls = [] if all_urls: for url in all_urls: long_url = self.make_complete_url(response.geturl(), url) # bad url? if not long_url: continue # find a title title = url if name_map.has_key(url): title = name_map[url] else: title = title.replace('.html', '').replace('.php', '') # remove blacklisted urls for b in self.blacklist_regexp: if b(long_url): break else: # movie or link? if movie_regexp.match(long_url): movie_urls.append((long_url, url, title)) else: link_urls.append((long_url, url, title)) items = [] # add all link urls if link_urls: for long, short, title in link_urls: # should all links be displayed? if (not self.all_links) and long.find(self.base) != 0: continue # don't display self if long == self.url: continue # search for duplicate links for l in items: if l.url == long: # increase counter, this link seems to be # important l.counter += 1 break else: # add link as new new l = Link(title, long, self.blacklist_regexp, self.autoplay, self.all_links, self) l.url_name = short l.image = None items.append(l) # sort all items items.sort(lambda l, o: cmp(l.sort().upper(), o.sort().upper())) # add part of the url to the name in case a title is used for # more than one item for l in items: for o in items: if l.name == o.name and l.name.find('(') == -1 and not l == o: # duplicate found, get last part of the url url = l.url[l.url.rfind('/') + 1:] if not url: url = l.url[l.url[:-1].rfind('/') + 1:] if url: l.name = '%s (%s)' % (l.name, url) # same for the other url = o.url[o.url.rfind('/') + 1:] if not url: url = o.url[o.url[:-1].rfind('/') + 1:] if url: o.name = '%s (%s)' % (o.name, url) # now search for movies movies = [] if movie_urls: for long, short, title in movie_urls: # search for duplicate links for l in movies: if l.filename == long: break else: movies.append(VideoItem(long, self, parse=False)) if title.find('/') != -1: title = 'Video: ' + long[long.rfind('/') + 1:] movies[-1].name = title # all done popup.destroy() if len(movies) == 1 and arg == 'autoplay': movies[0].play(menuw=menuw) elif len(movies) == 1 and arg == 'autoplay_max': movies[0].play_max_cache(menuw=menuw) elif items or movies: menuw.pushmenu(menu.Menu(self.name, movies + items))