def _search_person_cb(what): print "searching for " + what page = util.request(BASE_URL + "hledat/complete-films/?q=" + urllib.quote(what)) results = [] data = util.substr(page, '<div id="search-creators', '<div class="footer') for m in re.finditer( '<h3 class="subject"[^<]+<a href="(?P<url>[^"]+)[^>]+>(?P<name>[^<]+).+?<p>(?P<info>[^<]+)', data, re.DOTALL | re.IGNORECASE, ): results.append((m.group("url"), m.group("name") + " (" + m.group("info") + ")")) for m in re.finditer( "<li(?P<item>.+?)</li>", util.substr(data, '<ul class="creators others', "</div"), re.DOTALL | re.IGNORECASE ): base = re.search('<a href="(?P<url>[^"]+)[^>]+>(?P<name>[^<]+)', m.group("item")) if base: name = base.group("name") for n in re.finditer("<span[^>]*>(?P<data>[^<]+)", m.group("item")): name = "%s %s" % (name, n.group("data")) results.append((base.group("url"), name)) for url, name in results: info = scrapper._empty_info() info["url"] = url add_person(name, info) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def add_items(items, showing={}): for url, name in items: showing_url = None if url in showing.keys(): showing_url = showing[url] info = scrapper.get_cached_info(furl(url)) if info: add_item(name, info, showing_url) else: info = scrapper._empty_info() info["url"] = url add_item(name, info, showing_url) xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE) xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_RATING) xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_YEAR)
def artists(params): if not "type" in params.keys(): page = util.request(furl("tvurci/statistiky")) for m in re.finditer( '<div id="(?P<type>[^"]+)[^<]+<h2 class="header">(?P<name>[^<]+)', page, re.DOTALL | re.IGNORECASE ): xbmcutil.add_dir(m.group("name"), {"artists": "", "type": m.group("type")}, icon()) return xbmcplugin.endOfDirectory(int(sys.argv[1])) typ = params["type"] page = util.request(furl("tvurci/statistiky/?expand=" + typ)) data = util.substr(page, '<div id="' + typ + '"', '<div class="footer') results = [] if not "subtype" in params.keys(): for m in re.finditer('<h3 class="label">(?P<name>[^<]+)', data, re.DOTALL | re.IGNORECASE): results.append(m.group("name")) if len(results) > 0: index = 0 for name in results: params["subtype"] = str(index) index += 1 xbmcutil.add_dir(name, params, icon()) return xbmcplugin.endOfDirectory(int(sys.argv[1])) else: for m in re.finditer( '<li[^<]+<a href="(?P<url>[^"]+)[^>]+>(?P<name>[^<]+)</a>(?P<data>[^<]+)', data, re.DOTALL | re.IGNORECASE, ): results.append((m.group("name") + m.group("data"), m.group("url"))) else: subtype = int(params["subtype"]) index = 0 for m in re.finditer('<h3 class="label">(?P<name>[^<]+)', data, re.DOTALL | re.IGNORECASE): if index == subtype: subtype = m.group("name") break index += 1 data = util.substr(data, subtype, "</div>") for m in re.finditer( '<li[^<]+<a href="(?P<url>[^"]+)[^>]+>(?P<name>[^<]+)</a>(?P<data>[^<]+)', data, re.DOTALL | re.IGNORECASE ): results.append((m.group("name") + m.group("data"), m.group("url"))) for index, (name, url) in enumerate(results): info = scrapper._empty_info() info["url"] = url add_person("%i. %s" % (index + 1, name), info) return xbmcplugin.endOfDirectory(int(sys.argv[1]))
def favourites(p): if p["fav"] == "": data = login() if data: userid = get_userid(data) if userid: xbmcutil.add_dir("Oblíbené filmy", {"fav": userid + "oblibene-filmy/"}, icon()) xbmcutil.add_dir("Oblíbené seriály", {"fav": userid + "oblibene-serialy/"}, icon()) xbmcutil.add_dir("Oblíbené pořady", {"fav": userid + "oblibene-porady/"}, icon()) xbmcutil.add_dir("Oblíbení herci", {"fav": userid + "oblibeni-herci/"}, icon()) xbmcutil.add_dir("Oblíbené herečky", {"fav": userid + "oblibene-herecky/"}, icon()) xbmcutil.add_dir("Oblíbení režiséři", {"fav": userid + "oblibeni-reziseri/"}, icon()) xbmcutil.add_dir("Oblíbení skladatelé", {"fav": userid + "oblibeni-skladatele/"}, icon()) return xbmcplugin.endOfDirectory(int(sys.argv[1])) data = login(p["fav"]) if not data: return xbmcplugin.endOfDirectory(int(sys.argv[1])) results = [] for m in re.finditer( '<h3 class="subject"><a href="(?P<url>[^"]+)[^>]+>(?P<name>[^<]+)', data, re.DOTALL | re.IGNORECASE ): results.append((m.group("url"), m.group("name"))) # we load items different way for persons if ( p["fav"].find("oblibeni-herci") > 0 or p["fav"].find("oblibene-herecky") > 0 or p["fav"].find("oblibeni-reziseri") > 0 or p["fav"].find("oblibeni-skladatele") > 0 ): for url, name in results: info = scrapper._empty_info() info["url"] = url add_person(name, info) return xbmcplugin.endOfDirectory(int(sys.argv[1])) if preload(): return preload_items(results) add_items(results) xbmcplugin.endOfDirectory(int(sys.argv[1]))