コード例 #1
0
def shows(url):
    di_list = []
    for eng_name, ori_name, show_url, image in scrapers.shows(url):
        action_url = common.action_url('versions', url=show_url)
        name = cleanstring.show(eng_name, ori_name)
        cm = _saved_to_list_context_menu(eng_name, ori_name, show_url, image)
        di_list.append(common.diritem(name, action_url, image,
                                      context_menu=cm))
    for page, page_url in scrapers.pages(url):
        action_url = common.action_url('shows', url=page_url)
        page_label = cleanstring.page(page)
        di_list.append(common.diritem(page_label, action_url))
    return di_list
コード例 #2
0
def _episodes(url):
    episodes = scrapers.episodes(url)
    if len(episodes) > 0:
        di_list = []
        for name, episode_url in episodes:
            action_url = common.action_url('mirrors', url=episode_url)
            epi = cleanstring.episode(name)
            di_list.append(common.diritem(epi, action_url))
        return di_list
    else:
        return _mirrors(url)
コード例 #3
0
def versions(url):
    versions = scrapers.versions(url)
    if len(versions) == 1:
        ver, href = versions[0]
        return _episodes(href)
    else:
        di_list = []
        for label, version_url in versions:
            action_url = common.action_url('episodes', url=version_url)
            ver = cleanstring.version(label)
            di_list.append(common.diritem(ver, action_url))
        return di_list
コード例 #4
0
def search(url=None):
    if not url:
        heading = xbmcaddon.Addon().getLocalizedString(33301)
        s = common.input(heading)
        if s:
            url = config.search_url % urllib.quote(s.encode('utf8'))
        else:
            return []
    di_list = []
    for eng_name, ori_name, show_url, image in scrapers.search(url):
        action_url = common.action_url('versions', url=show_url)
        name = cleanstring.show(eng_name, ori_name)
        cm = _saved_to_list_context_menu(eng_name, ori_name, show_url, image)
        di_list.append(common.diritem(name, action_url, image,
                                      context_menu=cm))
    for page, page_url in scrapers.pages(url):
        action_url = common.action_url('search', url=page_url)
        page_label = cleanstring.page(page)
        di_list.append(common.diritem(page_label, action_url))
    if not di_list:
        common.popup(xbmcaddon.Addon().getLocalizedString(33304))
    return di_list
コード例 #5
0
def _mirrors(url):
    mirrors = scrapers.mirrors(url)
    num_mirrors = len(mirrors)
    if num_mirrors > 0:
        di_list = []
        for mirr_label, parts in mirrors:
            for part_label, part_url in parts:
                label = cleanstring.mirror(mirr_label, part_label)
                action_url = common.action_url('play_mirror', url=part_url)
                di_list.append(
                    common.diritem(label, action_url, isfolder=False))
        return di_list
    else:
        # if no mirror listing, try to resolve this page directly
        play_mirror(url)
        return []
コード例 #6
0
def saved_list():
    sl = _get_saved_list()
    di_list = []
    for eng_name, ori_name, show_url, image in sl:
        action_url = common.action_url('versions', url=show_url)
        name = cleanstring.show(eng_name, ori_name)
        remove_save_url = common.action_url('remove_saved',
                                            eng_name=eng_name,
                                            ori_name=ori_name,
                                            show_url=show_url,
                                            image=image)
        builtin_url = common.run_plugin_builtin_url(remove_save_url)
        cm = [(xbmcaddon.Addon().getLocalizedString(33109), builtin_url)]
        di_list.append(common.diritem(name, action_url, image,
                                      context_menu=cm))
    return di_list
コード例 #7
0
import os.path
import urlparse
from resources.lib.common import diritem, action_url, profile_dir

base_url = 'http://icdrama.se'
cache_file = os.path.join(profile_dir, 'cache.pickle')
store_file = os.path.join(profile_dir, 'store.pickle')

# the trailing forward slashes are necessary
# without it, page urls will be wrong (icdrama bug)
search_url = urlparse.urljoin(base_url, '/search/%s/')
index_items = [
    diritem(33011, action_url('saved_list')),
    diritem(
        33000,
        action_url('recent_updates',
                   url=urlparse.urljoin(base_url, '/recently-updated/'))),
    diritem(33001,
            action_url('shows', url=urlparse.urljoin(base_url, '/hk-drama/'))),
    diritem(33002,
            action_url('shows', url=urlparse.urljoin(base_url, '/hk-movie/'))),
    diritem(33003,
            action_url('shows', url=urlparse.urljoin(base_url, '/hk-show/'))),
    diritem(
        33004,
        action_url('shows', url=urlparse.urljoin(base_url,
                                                 '/chinese-drama/'))),
    diritem(
        33012,
        action_url('shows',
                   url=urlparse.urljoin(base_url,
コード例 #8
0
def recent_updates(url):
    di_list = []
    for name, update_url in scrapers.recent_updates(url):
        action_url = common.action_url('mirrors', url=update_url)
        di_list.append(common.diritem(name, action_url))
    return di_list
コード例 #9
0
import os.path
import urlparse
from resources.lib.common import diritem, action_url, profile_dir

base_url = 'http://icdrama.se'
cache_file = os.path.join(profile_dir, 'cache.pickle')
store_file = os.path.join(profile_dir, 'store.pickle')

# the trailing forward slashes are necessary
# without it, page urls will be wrong (icdrama bug)
search_url = urlparse.urljoin(base_url, '/search/%s/')
index_items = [
    diritem(33011, action_url('saved_list')),
    diritem(33000, action_url('recent_updates', url=urlparse.urljoin(base_url, '/recently-updated/'))),
    diritem(33001, action_url('shows', url=urlparse.urljoin(base_url, '/hk-drama/'))),
    diritem(33002, action_url('shows', url=urlparse.urljoin(base_url, '/hk-movie/'))),
    diritem(33003, action_url('shows', url=urlparse.urljoin(base_url, '/hk-show/'))),
    diritem(33004, action_url('shows', url=urlparse.urljoin(base_url, '/chinese-drama/'))),
    diritem(33012, action_url('shows', url=urlparse.urljoin(base_url, '/chinese-drama-cantonesedub/'))),
    diritem(33005, action_url('shows', url=urlparse.urljoin(base_url, '/taiwanese-drama/'))),
    diritem(33013, action_url('shows', url=urlparse.urljoin(base_url, '/taiwanese-drama-cantonesedub/'))),
    diritem(33006, action_url('shows', url=urlparse.urljoin(base_url, '/korean-drama/'))),
    diritem(33014, action_url('shows', url=urlparse.urljoin(base_url, '/korean-drama-cantonesedub/'))),
    diritem(33015, action_url('shows', url=urlparse.urljoin(base_url, '/korean-drama-chinesesubtitles/'))),
    diritem(33007, action_url('shows', url=urlparse.urljoin(base_url, '/korean-show/'))),
    diritem(33008, action_url('shows', url=urlparse.urljoin(base_url, '/japanese-drama/'))),
    diritem(33016, action_url('shows', url=urlparse.urljoin(base_url, '/japanese-drama-cantonesedub/'))),
    diritem(33017, action_url('shows', url=urlparse.urljoin(base_url, '/japanese-drama-chinesesubtitles/'))),
    diritem(33009, action_url('shows', url=urlparse.urljoin(base_url, '/movies/'))),
    diritem(33010, action_url('search'))
]