示例#1
0
def collect(collectid):
    url = ROOT_URL+'/app/android/collect?id=%s' % collectid
    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    data = json.loads(content)
    result = []
    for item in data['collect']['songs']:
        title = unescape_name(item['name'])
        lyric = None
        if item['lyric'] and item['lyric'].startswith('http'):
            req = urllib2.Request(url, headers=HTTP_HEADERS)
            lyric = urllib2.urlopen(req, timeout=5).read()
        result.append({
            'label': title,
            'label2': item['artist_name'],
            'path': item['location'],
            'info': {'type':'music', 'infoLabels':[('title',title),('album',item['title']),('artist',item['artist_name']),('lyrics',lyric)]},
            'thumbnail': item['album_logo'],
            'is_playable': True,
            'context_menu': [
                (_L(30100), actions.update_view(plugin.url_for('artist_top', artistid=item['artist_id']))),
                (_L(30101), actions.update_view(plugin.url_for('album', albumid=item['album_id']))),
                (_L(30102), actions.background(plugin.url_for('download_file', url=item['location']))),
            ]
        })
    #plugin.add_to_playlist(result, playlist='music')
    return plugin.finish(result, view_mode='thumbnail')
示例#2
0
 def get_playlist_item(self, playlist, in_library=False):
     item = {
         'label':
         playlist.name,
         'path':
         self._plugin.url_for('playlists_detail', playlist_id=playlist.id),
         'thumbnail':
         Image.get_url(Image.TYPE_PLAYLIST, playlist.id,
                       Image.SIZE_PLAYLIST_ORIGINAL),
         'properties': {
             'fanart_image':
             Image.get_url(Image.TYPE_PLAYLIST, playlist.id,
                           Image.SIZE_PLAYLIST_ORIGINAL),
         }
     }
     if in_library:
         item['path'] = self._plugin.url_for('playlists_library_detail',
                                             playlist_id=playlist.id)
         item['context_menu'] = [
             (self._plugin.get_string(30251),
              actions.update_view(
                  self._plugin.url_for('playlists_library_rename',
                                       playlist_id=playlist.id))),
             (self._plugin.get_string(30252),
              actions.update_view(
                  self._plugin.url_for('playlists_library_remove',
                                       playlist_id=playlist.id)))
         ]
     return item
示例#3
0
def similar_artists(artistid):
    url = ROOT_URL + '/app/android/artist-similar?id=' + artistid

    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    data = json.loads(content)
    result = []
    for item in data['artists']:
        name = unescape_name(item['name'])
        artistid = item['artist_id']
        result.append({
            'label':
            name,
            'path':
            plugin.url_for('artist_top', artistid=artistid),
            'thumbnail':
            item['logo'],
            'context_menu': [
                (_L(30103),
                 actions.update_view(
                     plugin.url_for('topsongs', artistid=artistid))),
                (_L(30104),
                 actions.update_view(
                     plugin.url_for('similar_artists', artistid=artistid))),
            ]
        })
    return plugin.finish(result, view_mode='thumbnail')
 def get_album_item(self, album, show_artist=True, in_library=False, library_artist_id=None):
     if show_artist:
         label = album.artist.name + ' - ' + album.name + ' (' + str(album.get_release_date().year) + ')'
     else:
         label = album.name + ' (' + str(album.get_release_date().year) + ')'
     item = {
         'label': label,
         'thumbnail': album.images[0].get_url(size=Image.SIZE_ORIGINAL),
         'context_menu': []
     }
     item['context_menu'].append((
         self._plugin.get_string(30255).format(album.artist.name),
         actions.update_view(self._plugin.url_for('artists_detail', artist_id=album.artist.id))
     ))
     if in_library:
         item['path'] = self._plugin.url_for('albums_library_tracks', album_id=album.id)
         if library_artist_id is None:
             action = actions.update_view(self._plugin.url_for('albums_library_remove',
                                                               album_id=album.id))
         else:
             action = actions.update_view(self._plugin.url_for('artists_library_albums_remove',
                                                               artist_id=library_artist_id,
                                                               album_id=album.id))
         item['context_menu'].append((self._plugin.get_string(30217), action))
     else:
         item['path'] = self._plugin.url_for('albums_detail', album_id=album.id)
         item['context_menu'].append((
             self._plugin.get_string(30215),
             actions.background(self._plugin.url_for('albums_library_add', album_id=album.id))))
     return item
示例#5
0
def search(domain):
    keywd = plugin.keyboard(heading='Keyword')
    if not keywd:
        return None

    url = ROOT_URL + "/search/%s?key=%s" % (domain, urllib.quote_plus(keywd))

    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    html = BeautifulSoup(content)
    result = []
    for item in html.findAll('div', {'class': re.compile('_item100_block')}):
        title = ''.join(item.find('p', {'class': 'name'}).a.findAll(text=True))
        thumb = item.find('img')['src']
        if domain == 'artist':
            url = item.find('a', {'class': 'artist100'})['href']
            artistid = url.split('/')[-1]
            result.append({
                'label':
                title,
                'path':
                plugin.url_for('artist_top', artistid=artistid),
                'thumbnail':
                thumb,
                'context_menu': [
                    (_L(30103),
                     actions.update_view(
                         plugin.url_for('topsongs', artistid=artistid))),
                    (_L(30104),
                     actions.update_view(
                         plugin.url_for('similar_artists',
                                        artistid=artistid))),
                ]
            })
        elif domain == 'album':
            album_node = item.find('a', {'class': 'CDcover100'})
            albumid = album_node['href'].split('/')[-1]
            artist_node = item.find('a', {'class': 'singer'})
            artistid = artist_node['href'].split('/')[-1]
            artist_name = artist_node.string
            result.append({
                'label':
                title,
                'label2':
                artist_name,
                'path':
                plugin.url_for('album', albumid=albumid),
                'thumbnail':
                thumb,
                'context_menu': [
                    (_L(30100),
                     actions.update_view(
                         plugin.url_for('artist_top', artistid=artistid))),
                ]
            })
    return plugin.finish(result, view_mode='thumbnail')
示例#6
0
 def __context(name,id):
     return [
     (
         'Get Movies for ' + name,
         actions.update_view(plugin.url_for('show_movies', source='director', page='1', query=str(id)))
     )
         ]
示例#7
0
def bang_albums(type, style):
    url = ROOT_URL + '/web/bang-albums?type=%s&style=%s' % (type, style)
    plugin.log.debug(url)
    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    data = json.loads(content)

    result = []
    for item in data['albums']:
        title = unescape_name(item['album_name'])
        result.append({
            'label':
            title,
            'label2':
            item['artist_name'],
            'path':
            plugin.url_for('album', albumid=item['album_id']),
            'thumbnail':
            item['logo'],
            'context_menu': [
                (_L(30100),
                 actions.update_view(
                     plugin.url_for('artist_top',
                                    artistid=item['artist_id']))),
            ]
        })
    return plugin.finish(result, view_mode='thumbnail')
示例#8
0
def search_ix(query = '**just_search**',page = '0',id = -1,labs = {}):
	page = int(page)
	items = []
	ix_url = 'http://ixirc.com/api/'
	if query == '**just_search**':
		query = plugin.keyboard()
	results = requests.get(ix_url + '?q=%s&pn=%s' % (query,page)).json()
	total_pages = results['pc']
	results = results['results']
	idx = 0
	for item in results:
		
		try:
			items.append({'label':item['name'] + ' || Size : %s' % item['szf'],'info':{'title': item['name'],'plot':'Size: %s \n Network: %s \n Channel: %s \n Bot: %s' % (item['szf'],item['nname'],item['cname'],item['uname'])},'path':plugin.url_for('stream',download = False,server = item['naddr'],channel = item['cname'], bot = item['uname'], packetId = item['n'],filename = item['name']), 'is_playable': True
			, 'context_menu' : [('Assign Metadata',actions.update_view(plugin.url_for('assign_metadata',id = idx,search_term = query,page = page,from_XG = False, name = False, bot = False))),('Just Download',actions.background(plugin.url_for('stream',download = True,server = item['naddr'],channel = item['cname'], bot = item['uname'], packetId = item['n'],filename = item['name']))),('Delete File',actions.background(plugin.url_for('delete_file',name=item['name'],all_files = False))),('Delete All Files',actions.background(plugin.url_for('delete_file',name=item['name'],all_files=True)))]})
		except: continue
		try:
			if str(idx) == str(id): 
				plugin.log.info("SUCCESS")
				items[idx]['info'] = labs
				items[idx]['thumbnail'] = labs['cover_url']
				items[idx]['properties'] = {'Fanart_Image':labs['backdrop_url']}
		except: pass
		plugin.log.info('IDX INFO %s' % items[idx]['info'])
		idx+=1
	if page < total_pages:
		items.append({'label':'Next Page >>','path':plugin.url_for('search_ix',query = query, page = str(page+1))})
	return items
示例#9
0
    def get_artist_item(self, artist, in_library=False):
        item = {
            'label':
            artist.name,
            'thumbnail':
            Image.get_url(Image.TYPE_ARTIST, artist.id,
                          Image.SIZE_ARTIST_ORIGINAL),
            'properties': {
                'fanart_image':
                Image.get_url(Image.TYPE_ARTIST, artist.id,
                              Image.SIZE_ARTIST_ORIGINAL),
            },
            'context_menu': [],
        }

        if in_library:
            item['path'] = self._plugin.url_for('artists_library_albums',
                                                artist_id=artist.id)
            item['context_menu'].append(
                (self._plugin.get_string(30217),
                 actions.update_view(
                     self._plugin.url_for('artists_library_remove',
                                          artist_id=artist.id))))
        else:
            item['path'] = self._plugin.url_for('artists_detail',
                                                artist_id=artist.id)
            item['context_menu'].append(
                (self._plugin.get_string(30215),
                 actions.background(
                     self._plugin.url_for('artists_library_add',
                                          artist_id=artist.id))))

        return item
示例#10
0
def collect(collectid):
    url = ROOT_URL + '/app/android/collect?id=%s' % collectid
    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    data = json.loads(content)
    result = []
    for item in data['collect']['songs']:
        title = unescape_name(item['name'])
        lyric = None
        if item['lyric'] and item['lyric'].startswith('http'):
            req = urllib2.Request(url, headers=HTTP_HEADERS)
            lyric = urllib2.urlopen(req, timeout=5).read()
        result.append({
            'label':
            title,
            'label2':
            item['artist_name'],
            'path':
            item['location'],
            'info': {
                'type':
                'music',
                'infoLabels': [('title', title), ('album', item['title']),
                               ('artist', item['artist_name']),
                               ('lyrics', lyric)]
            },
            'thumbnail':
            item['album_logo'],
            'is_playable':
            True,
            'context_menu': [
                (_L(30100),
                 actions.update_view(
                     plugin.url_for('artist_top',
                                    artistid=item['artist_id']))),
                (_L(30101),
                 actions.update_view(
                     plugin.url_for('album', albumid=item['album_id']))),
                (_L(30102),
                 actions.background(
                     plugin.url_for('download_file', url=item['location']))),
            ]
        })
    #plugin.add_to_playlist(result, playlist='music')
    return plugin.finish(result, view_mode='thumbnail')
示例#11
0
    def get_album_item(self,
                       album,
                       show_artist=True,
                       in_library=False,
                       library_artist_id=None):
        label = album.name + ' (' + str(album.get_release_date().year) + ')'
        if show_artist:
            label += '[LIGHT] / ' + album.artist.name + '[/LIGHT]'
        item = {
            'label':
            label,
            'thumbnail':
            Image.get_url(Image.TYPE_ALBUM, album.id,
                          Image.SIZE_ALBUM_ORIGINAL),
            'properties': {
                'fanart_image':
                Image.get_url(Image.TYPE_ARTIST, album.artist.id,
                              Image.SIZE_ARTIST_ORIGINAL),
            },
            'context_menu': []
        }

        if in_library:
            item['path'] = self._plugin.url_for('albums_library_tracks',
                                                album_id=album.id)
            if library_artist_id is None:
                action = actions.update_view(
                    self._plugin.url_for('albums_library_remove',
                                         album_id=album.id))
            else:
                action = actions.update_view(
                    self._plugin.url_for('artists_library_albums_remove',
                                         artist_id=library_artist_id,
                                         album_id=album.id))
            item['context_menu'].append(
                (self._plugin.get_string(30217), action))
        else:
            item['path'] = self._plugin.url_for('albums_detail',
                                                album_id=album.id)
            item['context_menu'].append(
                (self._plugin.get_string(30215),
                 actions.background(
                     self._plugin.url_for('albums_library_add',
                                          album_id=album.id))))
        return item
def playlists_library():
    items = [{'label': _(30250), 'path': plugin.url_for('playlists_library_add')}]
    for playlist in rhapsody.library.playlists():
        items.append({
            'label': playlist.name,
            'path': plugin.url_for('playlists_library_detail', playlist_id=playlist.id),
            'context_menu': [
                (
                    _(30251),
                    actions.update_view(plugin.url_for('playlists_library_rename', playlist_id=playlist.id))
                ),
                (
                    _(30252),
                    actions.update_view(plugin.url_for('playlists_library_remove', playlist_id=playlist.id))
                )
            ]
        })
    return items
示例#13
0
 def make_unfavorite_ctx(self, station_id):
     """
     Returns action url to Remove from Favorites
     :param station_id:
     :return:
     """
     label = self.plugin.get_string(30121)
     new_url = self.plugin.url_for('remove_from_favorites', station_id=station_id)
     return (label, actions.update_view(new_url))
示例#14
0
def search(domain):
    keywd = plugin.keyboard(heading='Keyword')
    if not keywd:
    	return None

    url = ROOT_URL+"/search/%s?key=%s" % (domain, urllib.quote_plus(keywd))

    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    html = BeautifulSoup(content)
    result = []
    for item in html.findAll('div', {'class':re.compile('_item100_block')}):
        title = ''.join(item.find('p', {'class':'name'}).a.findAll(text=True))
        thumb = item.find('img')['src']
        if domain == 'artist':
            url = item.find('a', {'class':'artist100'})['href']
            artistid = url.split('/')[-1]
            result.append({
                'label': title,
                'path': plugin.url_for('artist_top', artistid=artistid),
                'thumbnail': thumb,
                'context_menu': [
                    (_L(30103), actions.update_view(plugin.url_for('topsongs', artistid=artistid))),
                    (_L(30104), actions.update_view(plugin.url_for('similar_artists', artistid=artistid))),
                ]
            })
        elif domain == 'album':
            album_node = item.find('a', {'class':'CDcover100'})
            albumid = album_node['href'].split('/')[-1]
            artist_node = item.find('a', {'class':'singer'})
            artistid = artist_node['href'].split('/')[-1]
            artist_name = artist_node.string
            result.append({
                'label': title,
                'label2': artist_name,
                'path': plugin.url_for('album', albumid=albumid),
                'thumbnail': thumb,
                'context_menu': [
                    (_L(30100), actions.update_view(plugin.url_for('artist_top', artistid=artistid))),
                ]
            })
    return plugin.finish(result, view_mode='thumbnail')
示例#15
0
def similar_artists(artistid):
    url = ROOT_URL+'/app/android/artist-similar?id='+artistid

    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    data = json.loads(content)
    result = []
    for item in data['artists']:
        name = unescape_name(item['name'])
        artistid = item['artist_id']
        result.append({
            'label': name,
            'path': plugin.url_for('artist_top', artistid=artistid),
            'thumbnail': item['logo'],
            'context_menu': [
                (_L(30103), actions.update_view(plugin.url_for('topsongs', artistid=artistid))),
                (_L(30104), actions.update_view(plugin.url_for('similar_artists', artistid=artistid))),
            ]
        })
    return plugin.finish(result, view_mode='thumbnail')
示例#16
0
def select_quality_menu(e):
    """
    :type e: Episode
    """
    if plugin.get_setting('quality', int) > 0:
        url = episode_url(e, True)
        if e.is_complete_season:
            return [(lang(40303), actions.update_view(url))]
        else:
            return [(lang(40301), actions.play_media(url))]
    else:
        return []
示例#17
0
def select_quality_menu(e):
    """
    :type e: Episode
    """
    if plugin.get_setting('quality', int) > 0:
        url = episode_url(e, True)
        if e.is_complete_season:
            return [(lang(40303), actions.update_view(url))]
        else:
            return [(lang(40301), actions.play_media(url))]
    else:
        return []
 def get_artist_item(self, artist, in_library=False):
     item = {
         'label': artist.name,
         'context_menu': []
     }
     if in_library:
         item['path'] = self._plugin.url_for('artists_library_albums', artist_id=artist.id)
         item['context_menu'].append((
             self._plugin.get_string(30217),
             actions.update_view(self._plugin.url_for('artists_library_remove', artist_id=artist.id))))
     else:
         item['path'] = self._plugin.url_for('artists_detail', artist_id=artist.id)
         item['context_menu'].append((
             self._plugin.get_string(30215),
             actions.background(self._plugin.url_for('artists_library_add', artist_id=artist.id))))
     return item
示例#19
0
def bang_albums(type, style):
    url = ROOT_URL+'/web/bang-albums?type=%s&style=%s' % (type, style)
    plugin.log.debug(url)
    req = urllib2.Request(url, headers=HTTP_HEADERS)
    content = urllib2.urlopen(req, timeout=5).read()
    data = json.loads(content)

    result = []
    for item in data['albums']:
        title = unescape_name(item['album_name'])
        result.append({
            'label': title,
            'label2': item['artist_name'],
            'path': plugin.url_for('album', albumid=item['album_id']),
            'thumbnail': item['logo'],
            'context_menu': [
                (_L(30100), actions.update_view(plugin.url_for('artist_top', artistid=item['artist_id']))),
            ]
        })
    return plugin.finish(result, view_mode='thumbnail')
示例#20
0
def rutor_details(catind, tid):
    import xbmcgui
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from xbmctorrent.utils import get_quality_from_name
    from xbmctorrent.search import scrapers as search

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    tid = int(tid)
    try:
        html_data = url_get(urljoin(BASE_URL, "torrent/%d/" % tid), headers=HEADERS)
    except Exception:
        xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    if len(soup.select("div#download")) == 0:
        # it may occur if site redirects a request to his mirror
        xbmcgui.Dialog().ok(plugin.name, "Раздачи не существует. Обновите список")
        return

    div_index = soup.select("div#index")

    scrapers = search.Scrapers()

    details = soup.select("table#details")[0].findAll("tr")
    shift = 1 if len(details[1].select("a")) > 0 else 0
    seeds = details[3 + shift].contents[1].text
    peers = details[4 + shift].contents[1].text
    size = details[7 + shift].contents[1].text
    size = size[:size.find(" ")]
    title = _rutor_cleantitle(soup.h1.text)
    label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
    item = {
        "label": label,
        "info": {"title": title}
    }

    if plugin.get_setting("rutor_usesearch", bool):
        meta = scrapers.scraper(scraper_name, item)
    else:
        meta = scrapers.default(item)

    meta["path"] = plugin.url_for("rutor_play", tid=tid)
    meta["is_playable"] = True
    meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
    meta["context_menu"] = [("Play with Pulsar", actions.update_view(plugin.url_for("rutor_play_pulsar", tid=tid)))]

    del meta["search"]
    del meta["subdir"]
    yield meta

    if len(div_index) > 0:
        nodes = div_index[0].findAll("tr", class_=["gai", "tum"])
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(), cells[len(cells) - 1].findAll("span"))
            donwload_node, magnet_node, title_node = cells[1].findAll("a")
            size = cells[len(cells) - 2].text
            title = _rutor_cleantitle(title_node.text)
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)

            item = scrapers.default({
                "label": label,
                "info": {"title": title}
            })
            item.update({
                "path": plugin.url_for("rutor_play", tid=tid),
                "is_playable": True,
                "thumbnail": meta["thumbnail"],
                "icon": meta["icon"]
            })
            item["info"].update(meta["info"])
            item["properties"].update(meta["properties"])
            item.setdefault("stream_info", {}).update(get_quality_from_name(item['label']))
            item["context_menu"] = [("Play with Pulsar", actions.update_view(plugin.url_for("rutor_play_pulsar", tid=tid)))]

            del item["search"]
            del item["subdir"]
            yield item
示例#21
0
def rutracker_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(
                BASE_URL,
                "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            #Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            plugin.log.debug("Loading forum nodes")
            for node in nodes:
                link = node.find("a")
                plugin.log.debug("Forum link: " + str(link))
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") +
                                        1:len(link_href)])
                item = {
                    "label":
                    link.text,
                    "path":
                    plugin.url_for("rutracker_page",
                                   catind=catind,
                                   page=0,
                                   query=forumid),
                    "info": {
                        "title": link.text
                    },
                    "is_playable":
                    False,
                }
                yield item

            nodes = soup.findAll("td", class_=["topic_id"])

            for node in nodes:
                id = node["id"]
                title_node = node.parent.find(id='tt-%s' % str(id))
                title = _rutracker_cleantitle(title_node.text)
                row_node = node.parent \
                    #find "size" table cell - it should have download link

                size_td = row_node.find_all("td")[2]

                #check if size node has download link to torrent file:
                if size_td:
                    size_link = size_td.find("a", class_=["small"])
                    if size_link:
                        size = size_link.text
                        seeds = size_td.find("span", class_=["seedmed"]).b.text
                        peers = size_td.find("span",
                                             class_=["leechmed"]).b.text
                        size = size_td.find("a", class_=["small"]).text
                        label = "%s | %s (S:%s P:%s)" % (title, size, seeds,
                                                         peers)
                        item = {
                            "label":
                            label,
                            "path":
                            plugin.url_for("rutracker_play", tid=id),
                            "info": {
                                "title": title
                            },
                            "is_playable":
                            False,
                            "context_menu":
                            [("Play with Pulsar",
                              actions.update_view(
                                  plugin.url_for("rutracker_play_pulsar",
                                                 tid=id)))]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " +
                                         title.encode('utf-8'))
        except:

            plugin.log.error("Unexpected error: %s" %
                             format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)

                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(
                    get_quality_from_name(meta['label']))
                return meta
            except:
                plugin.log.error("Unexpected error: %s" %
                                 format_exc().split('\n')[-2])
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            item = job.get()
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label":
            u"[Далее >]",
            "path":
            plugin.url_for("rutracker_page",
                           catind=catind,
                           page=page + 1,
                           query=query),
            "is_playable":
            False,
        }
        yield next_page
示例#22
0
def rutracker_search_page(catind, page, search=None, search_id=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0,
                      line1=u"Получение информации о раздачах...",
                      line2="",
                      line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params=params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post=post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_=["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            nodes = soup.findAll("a", class_=["topictitle"])

            for link in nodes:
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % (title)
                        item = {
                            "label":
                            label,
                            "path":
                            plugin.url_for("rutracker_play", tid=id),
                            "info": {
                                "title": title
                            },
                            "is_playable":
                            False,
                            "context_menu":
                            [("Play with Pulsar",
                              actions.update_view(
                                  plugin.url_for("rutracker_play_pulsar",
                                                 tid=id)))]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " +
                                         title.encode('utf-8'))
                except:
                    plugin.log.error("Unexpected error: %s \r Skipping item" %
                                     format_exc().split('\n')[-2])
        except:
            plugin.log.error("Unexpected error: %s" %
                             format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name,
                                "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)
                plugin.log.debug("RUTRACKER: Meta information received")
                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(
                    get_quality_from_name(meta['label']))
                plugin.log.debug("RUTRACKER: Meta path updated")
                return meta
            except:
                plugin.log.error(
                    "RUTRACKER: Unexpected error: %s parsing item [%s]" %
                    (format_exc().split('\n')[-2], str(item)))
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [
                pool.apply_async(_get_torrent_info, [item], callback=on_done)
                for item in items
            ]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            try:
                item = job.get()
                del item["search"]
                del item["subdir"]
                yield item
            except:
                plugin.log.error(
                    "RUTRACKER: Unexpected error: %s parsing item [%s]" %
                    (format_exc().split('\n')[-2], str(item)))
        if search_id:
            next_page = {
                "label":
                u"[Далее >]",
                "path":
                plugin.url_for("rutracker_search_page",
                               catind=catind,
                               page=page + 1,
                               search=search,
                               search_id=search_id),
                "is_playable":
                False,
            }
            yield next_page
示例#23
0
def rutor_details(catind, tid):
    import urllib
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from xbmctorrent.utils import get_quality_from_name
    from xbmctorrent.search import scrapers as search

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind]
                or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])

    tid = int(tid)
    try:
        html_data = url_get(urljoin(BASE_URL, "torrent/%d/" % tid),
                            headers=HEADERS)
    except:
        import xbmcgui
        xbmcgui.Dialog().ok(plugin.name,
                            "Не удалось получить данные от сервера")
        return

    soup = BeautifulSoup(html_data, "html5lib")
    div_download = soup.select("div#download")[0]
    div_index = soup.select("div#index")

    if len(div_index) == 0:
        plugin.redirect(plugin.url_for("rutor_play", tid=tid))
        return

    scrapers = search.Scrapers()

    details = soup.select("table#details")[0].findAll("tr")
    seeds = details[4].contents[1].text
    peers = details[5].contents[1].text
    size = details[8].contents[1].text
    size = size[:size.find(" ")]
    title = _rutor_cleantitle(soup.h1.text)
    label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
    item = {"label": label, "info": {"title": title}}

    if plugin.get_setting("rutor_usesearch", bool):
        meta = scrapers.scraper(scraper_name, item)
    else:
        meta = scrapers.default(item)

    meta["path"] = plugin.url_for("rutor_play", tid=tid)
    meta["is_playable"] = False
    meta.setdefault("stream_info",
                    {}).update(get_quality_from_name(meta['label']))
    meta["context_menu"] = [
        ("Play with Pulsar",
         actions.update_view(plugin.url_for("rutor_play_pulsar", tid=tid)))
    ]

    del meta["search"]
    del meta["subdir"]
    yield meta

    if len(div_index) > 0:
        nodes = div_index[0].findAll("tr", class_=["gai", "tum"])
        nodes = [node for node in _rutor_filter_nodes(nodes)]

        for node in nodes:
            cells = node.findAll("td")
            seeds, peers = map(lambda x: x.text.strip(),
                               cells[len(cells) - 1].findAll("span"))
            donwload_node, magnet_node, title_node = cells[1].findAll("a")
            size = cells[len(cells) - 2].text
            title = _rutor_cleantitle(title_node.text)
            tid = int(title_node["href"][9:title_node["href"].find(u"/", 9)])
            label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)

            item = scrapers.default({"label": label, "info": {"title": title}})
            item.update({
                "path": plugin.url_for("rutor_play", tid=tid),
                "is_playable": False,
                "thumbnail": meta["thumbnail"],
                "icon": meta["icon"]
            })
            item["info"].update(meta["info"])
            item["properties"].update(meta["properties"])
            item.setdefault("stream_info",
                            {}).update(get_quality_from_name(item['label']))
            item["context_menu"] = [("Play with Pulsar",
                                     actions.update_view(
                                         plugin.url_for("rutor_play_pulsar",
                                                        tid=tid)))]

            del item["search"]
            del item["subdir"]
            yield item
示例#24
0
def rutracker_page(catind, page, query=None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            #Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            plugin.log.debug("Loading forum nodes")
            for node in nodes:
                link = node.find("a")
                plugin.log.debug("Forum link: " + str(link))
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") + 1:len(link_href)])
                item = {
                    "label": link.text,
                    "path": plugin.url_for("rutracker_page", catind=catind, page=0, query=forumid),
                    "info": {"title": link.text},
                    "is_playable": False,
                }
                yield item

            nodes = soup.findAll("td", class_=["topic_id"])

            for node in nodes:
                id = node["id"]
                title_node = node.parent.find(id='tt-%s' % str(id))
                title = _rutracker_cleantitle(title_node.text)
                row_node = node.parent \
                    #find "size" table cell - it should have download link
                size_td = row_node.find_all("td")[2]

                #check if size node has download link to torrent file:
                if size_td:
                    size_link = size_td.find("a", class_=["small"])
                    if size_link:
                        size = size_link.text
                        seeds = size_td.find("span", class_=["seedmed"]).b.text
                        peers = size_td.find("span", class_=["leechmed"]).b.text
                        size = size_td.find("a", class_=["small"]).text
                        label = "%s | %s (S:%s P:%s)" % ( title, size, seeds, peers)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": False,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " + title.encode('utf-8'))
        except:

            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)

                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
                return meta
            except:
                plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            item = job.get()
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label": u"[Далее >]",
            "path": plugin.url_for("rutracker_page", catind=catind, page=page + 1, query=query),
            "is_playable": False,
        }
        yield next_page
示例#25
0
def rutracker_page(catind, page, query=None):
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from concurrent import futures
    from contextlib import closing
    from xbmctorrent.utils import SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    page = int(page)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            url = urljoin(BASE_URL, "viewforum.php?f=%s&start=%s" % (query, str(page * 50)))
            html_data = url_get(url, headers=HEADERS)
            soup = BeautifulSoup(html_data, "html5lib")
            # find subforums
            nodes = soup.findAll("h4", class_=["forumlink"])

            # Add search item on topmost page:
            if catind == query:
                yield {
                    "label": u"[COLOR FFFFFF00][ Поиск ][/COLOR]",
                    "path": plugin.url_for("rutracker_search", catind=catind),
                }

            for node in nodes:
                link = node.find("a")
                link_href = link["href"]
                # find forum id in href:
                forumid = int(link_href[link_href.find(u"=") + 1:len(link_href)])
                item = {
                    "label": link.text,
                    "path": plugin.url_for("rutracker_page", catind=catind, page=0, query=forumid),
                    "info": {"title": link.text},
                    "is_playable": False,
                }
                yield item

            nodes = soup.findAll("a", class_="torTopic")

            for node in nodes:
                id = node["id"].replace("tt-", "")
                title = _rutracker_cleantitle(node.text)
                parent_node = node.parent.parent.parent
                size_node = parent_node.find("a", class_=["dl-stub"])

                if size_node:
                    size = _rutracker_cleantitle(size_node.text)
                    seeds = parent_node.find("span", class_=["seedmed"]).b.text
                    peers = parent_node.find("span", class_=["leechmed"]).b.text
                    label = "%s | %s (S:%s P:%s)" % (title, size, seeds, peers)
                    item = {
                        "label": label,
                        "path": plugin.url_for("rutracker_play", tid=id),
                        "info": {"title": title},
                        "is_playable": True,
                        "context_menu": [
                            ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                        ]
                    }
                    items.append(item)
        except Exception:
            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        with futures.ThreadPoolExecutor(max_workers=5) as pool:
            from xbmctorrent.utils import get_item_info

            usesearch = plugin.get_setting("rutracker_usesearch", bool)
            jobs = [pool.submit(get_item_info, i, usesearch and scraper_name or None) for i in items]
            state = {"done": 0}

            def on_done(future):
                if not future.cancelled():
                    state["done"] += 1
                    data = future.result()
                    dialog.update(
                        percent=int(state["done"] * 100.0 / len(jobs)),
                        line2=data["info"].get("title") or data.get("label") or "",
                    )

            [job.add_done_callback(on_done) for job in jobs]
            while not all(job.done() for job in jobs):
                if dialog.iscanceled():
                    [job.cancel() for job in jobs]
                    return
                xbmc.sleep(100)
            items = [job.result() for job in jobs if not job.cancelled()]

        for item in items:
            del item["search"]
            del item["subdir"]
            yield item

        next_page = {
            "label": u"[COLOR FF00FF00][Далее >][/COLOR]",
            "path": plugin.url_for("rutracker_page", catind=catind, page=page + 1, query=query),
            "is_playable": False,
        }
        yield next_page
 def get_track_item(self, track, album=None, show_artist=True, in_library=False, in_favorites=False,
                    in_playlists=False, playlist_id=None, library_album_id=None):
     if show_artist:
         label = track.artist.name + ' - ' + track.name
     else:
         label = track.name
     item = {
         'label': label,
         'is_playable': True,
         'info': {
             'title': track.name,
             'artist': track.artist.name,
             'album': track.album.name,
             'duration': track.duration
         },
         'context_menu': []
     }
     item['context_menu'].append((
         self._plugin.get_string(30255).format(track.artist.name),
         actions.update_view(self._plugin.url_for('artists_detail', artist_id=track.artist.id))
     ))
     if in_library:
         if library_album_id is None:
             action = actions.update_view(self._plugin.url_for('tracks_library_remove',
                                                               track_id=track.id))
         else:
             action = actions.update_view(self._plugin.url_for('albums_library_tracks_remove',
                                                               track_id=track.id,
                                                               album_id=library_album_id))
         item['context_menu'].append((self._plugin.get_string(30217), action))
     else:
         item['context_menu'].append((
             self._plugin.get_string(30215),
             actions.background(self._plugin.url_for('tracks_library_add', track_id=track.id))))
     if in_favorites:
         item['context_menu'].append((
             self._plugin.get_string(30218),
             actions.update_view(self._plugin.url_for('favorites_remove', track_id=track.id))))
     else:
         item['context_menu'].append((
             self._plugin.get_string(30216),
             actions.background(self._plugin.url_for('favorites_add', track_id=track.id))))
     if in_playlists:
         item['context_menu'].append((
             self._plugin.get_string(30254),
             actions.update_view(self._plugin.url_for('playlists_library_remove_track',
                                                      track_id=track.id, playlist_id=playlist_id))))
     else:
         playlists = self._cache.get('playlists', None)
         if playlists is None:
             playlists = self._api.library.playlists()
             self._cache['playlists'] = playlists
         for playlist in playlists:
             item['context_menu'].append((
                 self._plugin.get_string(30253).format(playlist.name),
                 actions.background(self._plugin.url_for('playlists_library_add_track',
                                                         track_id=track.id, playlist_id=playlist.id))))
     if album is None:
         thumbnail_missing = True
     else:
         thumbnail_missing = False
         item['thumbnail'] = album.images[0].get_url(size=Image.SIZE_ORIGINAL)
         # item['info']['tracknumber'] = [i for i, j in enumerate(album.tracks) if j.id == track.id][0] + 1
     item['path'] = self._plugin.url_for(
         'play',
         track_id=track.id,
         album_id=track.album.id,
         duration=track.duration,
         thumbnail_missing=thumbnail_missing)
     return item
示例#27
0
def search(search_term='first_page',page = '1',id=None, labs = None):
	# packs = xdcc_search.get_packs('http://xdcc.horriblesubs.info','naruto')
	# plugin.log.info('Packs' + str(packs))
	 #%s.%s?searchTerm=%s' % (port,type,format,searchTerm)
	if search_term == 'first_page':
		keyboard = xbmc.Keyboard('','Enter Search Term',False)
		keyboard.doModal()
		if keyboard.isConfirmed(): search_term = keyboard.getText()
	search_packets = 'packets.json?searchTerm=%s&maxResults=20&page=%s' % (search_term,page)
	request = requests.get(api_url+search_packets,headers=headers)
	results = request.json()
	
	# results = json.loads(results)
	items=[]
	idx = 0
	for option in results['Results']:
		guid_url = api_url + 'packets/%s/enable.json' % (option['Guid'])
		item = {'label':option['Name'] + ' || Size: %d MB' % int(option['Size'] / 1000000),'path':plugin.url_for('play_file',url=guid_url,name=option['Name']),'is_playable':True, 'context_menu':[('Assign Metadata',actions.update_view(plugin.url_for('assign_metadata',id = idx,search_term = search_term,page = page,from_XG = True, name = False, bot = False))),('Just Download',actions.background(plugin.url_for('just_download',url = guid_url,data = False))),('Delete File',actions.background(plugin.url_for('delete_file',name=option['Name'],all_files = False))),('Delete All Files',actions.background(plugin.url_for('delete_file',name=option['Name'],all_files = True)))]}
		try:
			if str(idx) == str(id):
				item['info'] = labs
				item['thumbnail'] = labs['cover_url']
				item['properties'] = {'Fanart_Image':labs['backdrop_url']}
		except: pass
		idx+=1
		items.append(item.copy())
		
	items.append({'label' : 'Next Page >>' , 'path' : plugin.url_for('search',search_term = search_term,page = str(int(page) + 1))})
	return plugin.finish(items)
示例#28
0
def rutracker_search_page(catind, page, search=None, search_id=None):
    from bs4 import BeautifulSoup
    from urlparse import urljoin
    from concurrent import futures
    from contextlib import closing
    from xbmctorrent.utils import SafeDialogProgress

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params=params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post=post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_=["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            for link in soup.findAll("a", class_=["topictitle"]):
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % (title)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": True,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                except Exception:
                    plugin.log.error("Unexpected error: %s \r Skipping item" % format_exc().split('\n')[-2])
        except Exception:
            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        with futures.ThreadPoolExecutor(max_workers=5) as pool:
            from xbmctorrent.utils import get_item_info

            usesearch = plugin.get_setting("rutracker_usesearch", bool)
            jobs = [pool.submit(get_item_info, i, usesearch and scraper_name or None) for i in items]
            state = {"done": 0}

            def on_done(future):
                if not future.cancelled():
                    state["done"] += 1
                    data = future.result()
                    dialog.update(
                        percent=int(state["done"] * 100.0 / len(jobs)),
                        line2=data["info"].get("title") or data.get("label") or "",
                    )

            [job.add_done_callback(on_done) for job in jobs]
            while not all(job.done() for job in jobs):
                if dialog.iscanceled():
                    [job.cancel() for job in jobs]
                    return
                xbmc.sleep(100)
            items = [job.result() for job in jobs if not job.cancelled()]

        for item in items:
            del item["search"]
            del item["subdir"]
            yield item

        if search_id:
            next_page = {
                "label": u"[COLOR FF00FF00][Далее >][/COLOR]",
                "path": plugin.url_for("rutracker_search_page", catind=catind, page=page + 1, search=search, search_id=search_id),
                "is_playable": False,
            }
            yield next_page
def BRIDGE(url):
        actions.update_view(url)
示例#30
0
def list_packlist(name,search_term='list_all',bot = 'list_all',page = '1',labs={},id=''):
	page = int(page)
	cache = plugin.get_storage('%s' %(name))
	packlist = cache['packlist']
	items = []
	prev = (page - 1) * 20
	curr = page * 20
	if bot != 'list_all':
		bot_packlist = []
		for item in packlist:
			if bot == item['bot']:
				bot_packlist.append(item)
		packlist = bot_packlist
	
	if search_term != 'list_all':
		search_packlist = []
		search_terms = search_term.split()
		plugin.log.info('Search Terms %s' % search_terms)
		for i in packlist:
			for term in search_terms:
				if term.lower() in i['filename'].lower(): all_Terms = True
				else: 
					all_Terms = False
					break
			if all_Terms:
				search_packlist.append(i)
		idx = 0
		for item in search_packlist[prev:curr]:
			items.append({'label':item['filename'] + ' || Size: ' + str(item['size']) + ' MB || Bot : ' + item['bot'],'path':plugin.url_for('stream',download = False, server = cache['server'],channel = name,bot = item['bot'],packetId = item['packetId'], filename = item['filename']),'is_playable': True, 'context_menu' : [('Assign Metadata',actions.update_view(plugin.url_for('assign_metadata',id = idx,search_term = search_term,page = page,name = name,bot=bot, from_XG = False))),('Refresh Packlist',actions.background(plugin.url_for('refresh',name=name))),('Just Download',actions.background(plugin.url_for('stream',download = True,server = cache['server'],channel = name,bot = item['bot'],packetId = item['packetId'], filename = item['filename']))),('Delete File',actions.background(plugin.url_for('delete_file',name=item['filename'],all_files = False))),('Delete All Files',actions.background(plugin.url_for('delete_file',name=item['filename'],all_files = True)))]})
			try:
				if str(idx) == str(id):
					items[idx]['info'] = labs
					items[idx]['thumbnail'] = labs['cover_url']
					items[idx]['properties'] = {'Fanart_Image':labs['backdrop_url']}
			except: pass
			idx+=1
	else: 
		idx = 0
		for item in packlist[prev:curr] :
			plugin.log.info('ITEM %s' % item)
			items.append({'label':item['filename'] + ' || Size: ' + str(item['size']) + ' MB || Bot : ' + item['bot'],'path':plugin.url_for('stream',download = False, server = cache['server'],channel = name,bot = item['bot'],packetId = item['packetId'], filename = item['filename']),'is_playable': True, 'context_menu' : [('Assign Metadata',actions.update_view(plugin.url_for('assign_metadata',id = idx,search_term = search_term,page = page,name = name,bot=bot, from_XG = False))),('Refresh Packlist',actions.background(plugin.url_for('refresh',name=name))),('Just Download',actions.background(plugin.url_for('stream',download = True,server = cache['server'],channel = name,bot = item['bot'],packetId = item['packetId'], filename = item['filename']))),('Delete File',actions.background(plugin.url_for('delete_file',name=item['filename'],all_files = False))),('Delete All Files',actions.background(plugin.url_for('delete_file',name=item['filename'],all_files = True)))]})
			try:
				if str(idx) == str(id):
					items[idx]['info'] = labs
					items[idx]['thumbnail'] = labs['cover_url']
					items[idx]['properties'] = {'Fanart_Image':labs['backdrop_url']}
			except: pass
			idx+=1
	if curr <= len(packlist):
		items.append({'label':'Next Page >>', 'path' : plugin.url_for('list_packlist',name = name,search_term = search_term,bot = bot, page = str( page + 1))})
	if page > 1:
		items.insert(0,{'label' : '<< Previous Page' , 'path' : plugin.url_for('list_packlist',name = name,search_term = search_term,bot = bot,page = str( page - 1))})
	return items	
示例#31
0
def lostfilm_index(page=""):

    from bs4 import BeautifulSoup
    from contextlib import closing
    from xbmcswift2 import actions
    from xbmctorrent.utils import SafeDialogProgress

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1="Получение информации ...", line2="", line3="")

        try:
            html_data = url_get(BASE_URL + "new/page_" + page)
            soup = BeautifulSoup(html_data, "html5lib")
            div_body = soup.select("div.serials-list")
            episodes = div_body[0].find_all("div", class_="row")
        except Exception:
            plugin.log.error("Unexpected error: %s" % sys.exc_info()[0])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера.")
            return

        yield {
            "label": "[COLOR FF00FF00][Полный список сериалов][/COLOR]",
            "path": plugin.url_for("lostfilm_all", page=0),
        }

        done = 0
        for episode in episodes:
            episode_name_ru = episode.find("div", class_="alpha").text
            episode_name_en = episode.find("div", class_="beta").text

            data_code = episode.find("div", class_="haveseen-btn")["data-code"]
            tvshowid, season, episode_num = data_code.split("-")

            show = {
                "title": episode.find("div", class_="name-ru").text,
                "title_orig": episode.find("div", class_="name-en").text,
                "alias": episode.find("a")["href"].split("/")[2],
                "id": tvshowid
            }

            if dialog.iscanceled():
                return

            item = _lostfilm_updateitem_from_db({
                "label": "[COLOR FFFFFFCC][%02d.%02d][/COLOR] [COLOR FFFFFFFF][B]%s[/B][/COLOR]: %s" % (int(season), int(episode_num), show["title"], episode_name_ru),
                "path": plugin.url_for("lostfilm_play", showid=tvshowid, season=season, episode=episode_num),
                "is_playable": True,
                "info": {
                    "title": "%s [%s]" % (episode_name_ru, episode_name_en),
                    "season": season,
                    "episode": episode_num
                },
                "context_menu": [
                    ("Все серии", actions.update_view(plugin.url_for("lostfilm_tvshow", alias=show["alias"])))
                ]
            }, show)

            if int(season) == 999:
                item.update({
                    "label": "[COLOR FFFFFFCC][xx.%02d][/COLOR] [COLOR FFFFFFFF][B]%s[/B][/COLOR]: %s" % (int(episode_num), show["title"], episode_name_ru),
                })
            elif "-" not in episode_num and int(episode_num) == 999:
                item.update({
                    "label": "[COLOR FFFFFFCC][%02d.xx][/COLOR] [COLOR FFFFFFFF][B]%s[/B][/COLOR]: %s" % (int(season), show["title"], episode_name_ru),
                    "is_playable": False
                })
                item.setdefault("info", {}).update({
                    "episode": "all"
                })

            done += 1
            dialog.update(
                percent=int(done * 100.0 / len(episodes)),
                line2=item.get("info", {}).get("tvshowtitle", "") or item.get("info", {}).get("title", "") or item["label"],
                line3=""
            )

            yield item

        if len(episodes) >= 10:
            yield {
                "label": "[COLOR FF00FF00][Далее >][/COLOR]",
                "path": plugin.url_for("lostfilm_index", page=(int(page) + 1)),
            }

        _lostfilm_close_dbase()
示例#32
0
def rutracker_search_page(catind, page, search=None, search_id = None):
    import urllib, xbmc
    from bs4 import BeautifulSoup, SoupStrainer
    from urlparse import urljoin
    from contextlib import closing
    from itertools import izip
    from concurrent import futures
    from multiprocessing.pool import ThreadPool
    from xbmctorrent.utils import terminating, SafeDialogProgress
    from urlparse import urlparse

    scraper_name = ""
    category = ([cat for cat in CATEGORIES if cat[0] == catind] or [("0", u"", "", "")])[0]
    scraper_name = category[3]
    plugin.set_content(category[2])
    if plugin.request.args.get("search_id"):
        search_id = plugin.request.args.get("search_id")[0]
    page = int(page)
    catind = int(catind)

    with closing(SafeDialogProgress(delay_close=0)) as dialog:
        dialog.create(plugin.name)
        dialog.update(percent=0, line1=u"Получение информации о раздачах...", line2="", line3="")

        items = []
        try:
            start_index = 0
            url = urljoin(BASE_URL, "search.php?")

            if search_id:
                params = {}
                params["nm"] = search
                if int(page) > 0:
                    params["start"] = int(page) * 50
                params["id"] = search_id
                html_data = url_get(url, headers=HEADERS, params = params)
            else:
                post_body = {"nm": search, "fsf": catind}
                html_data = url_get(url, headers=HEADERS, post = post_body)

            soup = BeautifulSoup(html_data, "html5lib")

            node = soup.find("a", class_ =["pg"])
            if node:
                r = search_id_parser.search(node['href'])
                if r:
                    plugin.log.debug("Search id found: " + str(r.group(1)))
                    search_id = str(r.group(1))

            nodes = soup.findAll("a", class_=["topictitle"])

            for link in nodes:
                try:
                    title = _rutracker_cleantitle(link.text)
                    r = topic_id_parser.search(link['href'])
                    if r:
                        id = r.group(1)
                        label = "%s" % ( title)
                        item = {
                            "label": label,
                            "path": plugin.url_for("rutracker_play", tid=id),
                            "info": {"title": title},
                            "is_playable": False,
                            "context_menu": [
                                ("Play with Pulsar", actions.update_view(plugin.url_for("rutracker_play_pulsar", tid=id)))
                            ]
                        }
                        items.append(item)
                        plugin.log.debug("Item added: " + title.encode('utf-8'))
                except:
                    plugin.log.error("Unexpected error: %s \r Skipping item" % format_exc().split('\n')[-2])
        except:
            plugin.log.error("Unexpected error: %s" % format_exc().split('\n')[-2])
            xbmcgui.Dialog().ok(plugin.name, "Не удалось получить данные от сервера")
            return

        def _get_torrent_info(item):

            from xbmctorrent.search import scrapers as search
            from xbmctorrent.utils import get_quality_from_name

            try:
                scrapers = search.Scrapers()
                if not plugin.get_setting("rutracker_usesearch", bool):
                    meta = scrapers.default(item)
                else:
                    meta = scrapers.scraper(scraper_name, item)
                plugin.log.debug("RUTRACKER: Meta information received")
                meta["path"] = item["path"]
                meta["is_playable"] = item["is_playable"]
                meta.setdefault("stream_info", {}).update(get_quality_from_name(meta['label']))
                plugin.log.debug("RUTRACKER: Meta path updated")
                return meta
            except:
                plugin.log.error("RUTRACKER: Unexpected error: %s parsing item [%s]" % (format_exc().split('\n')[-2],str(item)))
                return scrapers.default(item)

        state = {"done": 0}

        def on_done(data):
            state["done"] += 1
            dialog.update(
                percent=int(state["done"] * 100.0 / len(items)),
                line2=data["info"].get("title") or data.get("label") or "",
            )

        with terminating(ThreadPool(5)) as pool:
            jobs = [pool.apply_async(_get_torrent_info, [item], callback=on_done) for item in items]
            while not all(job.ready() for job in jobs):
                if dialog.iscanceled():
                    return
                xbmc.sleep(100)

        for job in jobs:
            try:
                item = job.get()
                del item["search"]
                del item["subdir"]
                yield item
            except:
                plugin.log.error("RUTRACKER: Unexpected error: %s parsing item [%s]" % (format_exc().split('\n')[-2],str(item)))
        if search_id:
            next_page = {
                "label": u"[Далее >]",
                "path": plugin.url_for("rutracker_search_page", catind=catind, page=page + 1, search=search,search_id = search_id),
                "is_playable": False,
            }
            yield next_page
示例#33
0
def go_to_series_menu(s):
    return [(lang(40307), actions.update_view(series_url(s)))]
示例#34
0
    def get_track_item(self,
                       track,
                       track_number=None,
                       show_artist=True,
                       in_library=False,
                       in_favorites=False,
                       in_playlists=False,
                       playlist_id=None,
                       library_album_id=None):
        label = track.name
        if show_artist:
            label += '[LIGHT] / ' + track.artist.name + '[/LIGHT]'
        item = {
            'label':
            label,
            'is_playable':
            True,
            'info': {
                'title': track.name,
                'artist': track.artist.name,
                'album': track.album.name,
                'duration': track.duration
            },
            'thumbnail':
            Image.get_url(Image.TYPE_ALBUM, track.album.id,
                          Image.SIZE_ALBUM_ORIGINAL),
            'properties': {
                'fanart_image':
                Image.get_url(Image.TYPE_ARTIST, track.artist.id,
                              Image.SIZE_ARTIST_ORIGINAL),
            },
            'context_menu': []
        }
        if track_number is not None:
            item['info']['tracknumber'] = track_number
        item['context_menu'].append(
            (self._plugin.get_string(30255).format(track.artist.name),
             actions.update_view(
                 self._plugin.url_for('artists_detail',
                                      artist_id=track.artist.id))))
        if in_library:
            if library_album_id is None:
                action = actions.update_view(
                    self._plugin.url_for('tracks_library_remove',
                                         track_id=track.id))
            else:
                action = actions.update_view(
                    self._plugin.url_for('albums_library_tracks_remove',
                                         track_id=track.id,
                                         album_id=library_album_id))
            item['context_menu'].append(
                (self._plugin.get_string(30217), action))
        else:
            item['context_menu'].append(
                (self._plugin.get_string(30215),
                 actions.background(
                     self._plugin.url_for('tracks_library_add',
                                          track_id=track.id))))
        if in_favorites:
            item['context_menu'].append(
                (self._plugin.get_string(30218),
                 actions.update_view(
                     self._plugin.url_for('favorites_remove',
                                          track_id=track.id))))
        else:
            item['context_menu'].append(
                (self._plugin.get_string(30216),
                 actions.background(
                     self._plugin.url_for('favorites_add',
                                          track_id=track.id))))
        if in_playlists:
            item['context_menu'].append(
                (self._plugin.get_string(30254),
                 actions.update_view(
                     self._plugin.url_for('playlists_library_remove_track',
                                          track_id=track.id,
                                          playlist_id=playlist_id))))
        else:
            item['context_menu'].append(
                (self._plugin.get_string(30253),
                 actions.background(
                     self._plugin.url_for('playlists_library_select',
                                          track_id=track.id))))

        item['path'] = self._plugin.url_for('play', track_id=track.id)
        return item
示例#35
0
def go_to_series_menu(s):
    return [(lang(40307), actions.update_view(series_url(s)))]
示例#36
0
def get_movies(source,page,query):
    if source =='query':
        movieIds=searchTmdb(query,page)
    elif source =='similar':
        movieIds = getTmdbSimilar(query,page)
    elif source == 'discover':
        movieIds = discoverMovies(page,query)
    elif source =='genre':
        movieIds = getMoviesByGenre(query,page)
    elif source =='actor':
        movieIds = getMoviesByActor(query,page)
    elif source =='director' or source =='actor':
        movieIds = getMoviesByPerson(query,page,source)
    else:
        movieIds=getTmdbMovies(source,page)
    movies=[]
    for id in movieIds:
        movies.append(getTmdbMovie(id))
    #Populate Movie Items
    items =[]
    movies=sorted(movies, key=lambda k: k['popularity'],reverse=True)
    for movie in movies:
        context_items=[]
        context_items.append((plugin.addon.getLocalizedString(30001),'XBMC.RunPlugin("plugin://plugin.video.couchpotato_manager/movies/add?title=' + movie['title']+ ')'))
        context_items.append((plugin.addon.getLocalizedString(30002),'XBMC.RunPlugin("plugin://plugin.video.trakt_list_manager/movies/add?title=' + movie['title'] + ')'))
        context_items.append((plugin.addon.getLocalizedString(30000),actions.update_view(plugin.url_for('show_movies', source='similar', page='1', query=movie['id']))))
        context_items.append(('Actors', actions.update_view(plugin.url_for('show_actors',movie_id=movie['id']))))
        context_items.append(('Similar',actions.update_view(plugin.url_for('show_movies', source='similar', page='1', query=movie['id']))))
        context_items.append(('Actors', actions.update_view(plugin.url_for('show_actors',movie_id=movie['id']))))
        context_items.append(('Directors', actions.update_view(plugin.url_for('show_directors',movie_id=movie['id']))))
        context_items.append(('Reviews', actions.update_view(plugin.url_for('show_reviews',imdb_id=movie['imdb']))))
        cast=[]
        for c in movie['cast']:
            cast.append(c['name'])
        item = {
            'label': movie['title'],
            'thumbnail': movie['thumbnail'],
            'info': {
                'title': movie['title'],
                'originaltitle': movie['title'],
                'year': movie['year'],
                'studio': movie['studio'],
                'mpaa': movie['mpaa'],
                'cast': cast,
                'director': movie['director'],
                'genre': movie['genre'],
                'tagline': movie['tagline'],
                'credits': movie.get('writer'),
                'plot':movie['plot'],
                'trailer':movie['trailer'],
                'duration':movie['runtime'],
                'country':movie['country']
            },
            'properties': {
                'fanart_image': movie['fanart'],
            },
            'context_menu': context_items,
            'replace_context_menu': True,
            'is_playable': True,
            'path': movie['trailer']
        } 
        items.append(item)
    return items
def run_channels_listing(url):
    actions.update_view(plugin.url_for('channels_listing', url=url))