def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG
    #from resources.domains import make_addon_url_from
    #called from context menu
    log('listLinksInComment:%s:%s' % (type_, url))

    #does not work for list comments coz key is the playable url (not reddit comments url)
    #msg=WINDOW.getProperty(url)
    #WINDOW.clearProperty( url )
    #log( '   msg=' + msg )

    directory_items = []
    author = ""
    ShowOnlyCommentsWithlink = False

    if type_ == 'linksOnly':
        ShowOnlyCommentsWithlink = True

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"

    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]

    #use safe='' argument in quoteplus to encode only the weird chars part
    url = urllib.quote_plus(url, safe=':/?&')
    if '?' in url:
        url = url.split('?', 1)[0] + '.json?' + url.split('?', 1)[1]
    else:
        url += '.json'

    loading_indicator = progressBG(translation(30024))
    loading_indicator.update(0, 'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10, 'Parsing')
    content = json.loads(content)

    del harvest[:]
    #harvest links in the post text (just 1)
    r_linkHunter(content[0]['data']['children'])

    try:
        submitter = content[0]['data']['children'][0]['data']['author']
    except:
        submitter = ''

    #the post title is provided in json, we'll just use that instead of messages from addLink()
    try:
        post_title = content[0]['data']['children'][0]['data']['title']
    except:
        post_title = ''
    #for i, h in enumerate(harvest):
    #    log("aaaaa first harvest "+h[2])

    #harvest links in the post itself
    r_linkHunter(content[1]['data']['children'])

    comment_score = 0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:
            #log(str(i)+"  score:"+ str(h[0]).zfill(5)+" "+ h[1] +'|'+ h[3] )
            comment_score = h[0]
            #log("score %d < %d (%s)" %(comment_score,int_CommentTreshold, CommentTreshold) )
            link_url = h[2]
            desc100 = h[3].replace(
                '\n', ' ')[0:100]  #first 100 characters of description

            kind = h[
                6]  #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d = h[5]  #depth of the comment

            tab = " " * d if d > 0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format(uri=urlparse(link_url))

            author = h[7]
            DirectoryItem_url = ''

            if comment_score < int_CommentTreshold:
                continue

            #hoster, DirectoryItem_url, videoID, mode_type, thumb_url,poster_url, isFolder,setInfo_type, setProperty_IsPlayable =make_addon_url_from(h[2])
            #if link_url:
            #    log( '  comment %s TITLE:%s... link[%s]' % ( str(d).zfill(3), desc100.ljust(20)[:20],link_url ) )

            ld = parse_reddit_link(link_url=link_url,
                                   assume_is_video=False,
                                   needs_preview=True,
                                   get_playable_url=True)

            if kind == 't1':
                list_title = r"[COLOR cadetblue]%3d[/COLOR] %s" % (h[0], tab)
            elif kind == 't3':
                list_title = r"[COLOR cadetblue]Title [/COLOR] %s" % (tab)

            #helps the the textbox control treat [url description] and (url) as separate words. so that they can be separated into 2 lines
            plot = h[3].replace('](', '] (')
            plot = markdown_to_bbcode(plot)
            plot = unescape(plot)  #convert html entities e.g.:(&#39;)

            liz = xbmcgui.ListItem(label=list_title + ': ' + desc100)

            liz.setInfo(type="Video",
                        infoLabels={
                            "Title": h[1],
                            "plot": plot,
                            "studio": domain,
                            "votes": str(comment_score),
                            "director": author
                        })
            isFolder = False

            #force all links to ytdl to see if it can be played
            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(
                    ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty(
                    'url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot = "  [COLOR greenyellow][%s] %s" % (domain,
                                                             plot) + "[/COLOR]"
                else:
                    plot = "  [COLOR greenyellow][%s]" % (plot) + "[/COLOR]"
                liz.setLabel(list_title + plot)

                if ld:
                    liz.setArt({
                        "thumb": ld.poster,
                        "poster": ld.poster,
                        "banner": ld.poster,
                        "fanart": ld.poster,
                        "landscape": ld.poster
                    })

            if DirectoryItem_url:
                #log( 'IsPlayable:'+setProperty_IsPlayable )
                directory_items.append((
                    DirectoryItem_url,
                    liz,
                    isFolder,
                ))
                #xbmcplugin.addDirectoryItem(handle=pluginhandle,url=DirectoryItem_url,listitem=liz,isFolder=isFolder)
            else:
                #this section are for comments that have no links
                if not ShowOnlyCommentsWithlink:
                    result = h[3].replace('](', '] (')
                    result = markdown_to_bbcode(result)
                    liz = xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo(type="Video",
                                infoLabels={
                                    "Title": h[1],
                                    "plot": result,
                                    "studio": domain,
                                    "votes": str(h[0]),
                                    "director": author
                                })
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append((
                        "",
                        liz,
                        False,
                    ))
                    #xbmcplugin.addDirectoryItem(handle=pluginhandle,url="",listitem=liz,isFolder=False)

                #END section are for comments that have no links or unsupported links
        except Exception as e:
            log('  EXCEPTION:' + str(e))

        #for di in directory_items:
        #    log( str(di) )

        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    #log('  comments_view id=%s' %comments_viewMode)

    #xbmcplugin.setContent(pluginhandle, "mixed")  #in estuary, mixed have limited view id's available. it has widelist which is nice for comments but we'll just stick with 'movies'
    xbmcplugin.setContent(
        pluginhandle, "movies"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle, 'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items)
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' % comments_viewMode)
Example #2
0
def reddit_get_refresh_token(url, name, type_):
    #this function gets a refresh_token from reddit and keep it in our addon. this refresh_token is used to get 1-hour access tokens.
    #  getting a refresh_token is a one-time step

    #1st: use any webbrowser to
    #  https://www.reddit.com/api/v1/authorize?client_id=hXEx62LGqxLj8w&response_type=code&state=RS&redirect_uri=http://localhost:8090/&duration=permanent&scope=read,mysubreddits
    #2nd: click allow and copy the code provided after reddit redirects the user
    #  save this code in add-on settings.  A one-time use code that may be exchanged for a bearer token.
    code = addon.getSetting("reddit_code")
    #log("  user refresh token:"+reddit_refresh_token)
    #log("  user          code:"+code)

    if reddit_refresh_token and code:
        #log("  user already have refresh token:"+reddit_refresh_token)
        dialog = xbmcgui.Dialog()
        if dialog.yesno(translation(32411), translation(32412),
                        translation(32413), translation(32414)):
            pass
        else:
            return

    try:
        log("Requesting a reddit permanent token with code=" + code)

        req = urllib2.Request('https://www.reddit.com/api/v1/access_token')

        #http://stackoverflow.com/questions/6348499/making-a-post-call-instead-of-get-using-urllib2
        data = urllib.urlencode({
            'grant_type': 'authorization_code',
            'code': code  #'woX9CDSuw7XBg1MiDUnTXXQd0e4'
            ,
            'redirect_uri': reddit_redirect_uri
        })  #http://localhost:8090/

        #http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
        import base64
        base64string = base64.encodestring(
            '%s:%s' % (reddit_clientID, '')).replace('\n', '')
        req.add_header('Authorization', "Basic %s" % base64string)
        req.add_header('User-Agent', reddit_userAgent)

        page = urllib2.urlopen(req, data=data)
        response = page.read()
        page.close()
        log(response)

        status = reddit_set_addon_setting_from_response(response)

        if status == 'ok':
            r1 = "Click 'OK' when done"
            r2 = "Settings will not be saved"
            xbmc.executebuiltin("XBMC.Notification(%s, %s)" % (r1, r2))
        else:
            r2 = "Requesting a reddit permanent token"
            xbmc.executebuiltin("XBMC.Notification(%s, %s)" % (status, r2))


#    This is a 2nd option reddit oauth. user needs to request access token every hour
#         #user enters this on their webbrowser. note that there is no duration=permanent response_type=token instead of code
#         request_url='https://www.reddit.com/api/v1/authorize?client_id=hXEx62LGqxLj8w&response_type=token&state=RS&redirect_uri=http://localhost:8090/&scope=read,identity'
#         #click on "Allow"
#         #copy the redirect url code    #enters it on settings. e.g.: LVQu8vitbEXfMPcK1sGlVVQZEpM
#
#         #u='https://oauth.reddit.com/new.json'
#         u='https://oauth.reddit.com//api/v1/me.json'
#
#         req = urllib2.Request(u)
#         #req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14')
#         req.add_header('User-Agent', reddit_userAgent)
#         req.add_header('Authorization','bearer LVQu8vitbEXfMPcK1sGlVVQZEpM')
#         page = read,identity.urlopen(req)
#         response=page.read();page.close()

    except urllib2.HTTPError, err:
        xbmc_notify(err.code, err.msg)
Example #3
0
def reddit_post_worker(idx, entry, q_out):
    import datetime
    from utils import strip_emoji, pretty_datediff, clean_str
    from reddit import determine_if_video_media_from_reddit_json, ret_sub_icon

    show_listVideos_debug=True
    credate = ""
    is_a_video=False
    title_line2=""
    t_on = translation(30071)  #"on"

    t_pts='c'
    thumb_w=0; thumb_h=0

    try:

        kind=entry.get('kind')  #t1 for comments  t3 for posts
        data=entry.get('data')
        post_id=data.get('name')
        if data:
            if kind=='t3':
                title = clean_str(data,['title'])
                description=clean_str(data,['media','oembed','description'])
                post_selftext=clean_str(data,['selftext'])

                description=post_selftext+'[CR]'+description if post_selftext else description
            else:
                title=clean_str(data,['link_title'])
                description=clean_str(data,['body'])

            title = strip_emoji(title) #an emoji in the title was causing a KeyError  u'\ud83c'

            commentsUrl = urlMain+clean_str(data,['permalink'])


            try:
                aaa = data.get('created_utc')
                credate = datetime.datetime.utcfromtimestamp( aaa )
                now_utc = datetime.datetime.utcnow()
                pretty_date=pretty_datediff(now_utc, credate)
                credate = str(credate)
            except (AttributeError,TypeError,ValueError):
                credate = ""

            subreddit=clean_str(data,['subreddit'])
            author=clean_str(data,['author'])
            domain=clean_str(data,['domain'])

            num_comments = data.get('num_comments',0)

            d_url=clean_str(data,['url'])
            link_url=clean_str(data,['link_url'])
            media_oembed_url=clean_str(data,['media','oembed','url'])

            media_url=next((item for item in [d_url,link_url,media_oembed_url] if item ), '')


            thumb=clean_str(data,['thumbnail'])


            if not thumb.startswith('http'): #in ['nsfw','default','self']:  #reddit has a "default" thumbnail (alien holding camera with "?")
                thumb=""

            if thumb=="":
                thumb=clean_str(data,['media','oembed','thumbnail_url']).replace('&amp;','&')

            if thumb=="":  #use this subreddit's icon if thumb still empty
                try: thumb=ret_sub_icon(subreddit)
                except: pass

            try:

                preview=data.get('preview')['images'][0]['source']['url'].encode('utf-8').replace('&amp;','&')

                try:
                    thumb_h = float( data.get('preview')['images'][0]['source']['height'] )
                    thumb_w = float( data.get('preview')['images'][0]['source']['width'] )
                except (AttributeError,TypeError,ValueError):
                    thumb_w=0; thumb_h=0

            except Exception as e:

                thumb_w=0; thumb_h=0; preview="" #a blank preview image will be replaced with poster_url from make_addon_url_from() for domains that support it

            is_a_video = determine_if_video_media_from_reddit_json(data)

            over_18=data.get('over_18')

            title_line2=""


            title_line2 = "[I][COLOR dimgrey]%s %s [COLOR cadetblue]r/%s[/COLOR] (%d) %s[/COLOR][/I]" %(pretty_date,t_on, subreddit,num_comments, t_pts)

            if show_listVideos_debug : log("  POST%cTITLE%.2d=%s" %( ("v" if is_a_video else " "), idx, title ))


            tuple_for_addDirectoryItems=addLink(title=title,
                    title_line2=title_line2,
                    iconimage=thumb,
                    previewimage=preview,
                    preview_w=thumb_w,
                    preview_h=thumb_h,
                    domain=domain,
                    description=description,
                    credate=credate,
                    reddit_says_is_video=is_a_video,
                    commentsUrl=commentsUrl,
                    subreddit=subreddit,
                    media_url=media_url,
                    over_18=over_18,
                    posted_by=author,
                    num_comments=num_comments,
                    post_index=idx,
                    post_id=post_id
                    )

            q_out.put( [idx, tuple_for_addDirectoryItems] )
    except Exception as e:
        log( '  #reddit_post_worker EXCEPTION:' + repr(sys.exc_info()) +'--'+ str(e) )
def search(payload, method="general"):
    """ Main search entrypoint

    Args:
        payload (dict): Search payload from projectx.
        method   (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``

    Returns:
        list: All filtered results in the format projectx expects
    """
    log.debug("Searching with payload (%s): %s" % (method, repr(payload)))

    if method == 'general':
        if 'query' in payload:
            payload['title'] = payload['query']
            payload['titles'] = {'source': payload['query']}
        else:
            payload = {
                'title': payload,
                'titles': {
                    'source': payload
                },
            }

    payload['titles'] = dict(
        (k.lower(), v) for k, v in payload['titles'].iteritems())

    # If titles[] exists in payload and there are special chars in titles[source]
    #   then we set a flag to possibly modify the search query
    payload['has_special'] = 'titles' in payload and \
                             bool(payload['titles']) and \
                             'source' in payload['titles'] and \
                             any(c in payload['titles']['source'] for c in special_chars)
    if payload['has_special']:
        log.debug(
            "Query title contains special chars, so removing any quotes in the search query"
        )

    if 'proxy_url' not in payload:
        payload['proxy_url'] = ''
    if 'internal_proxy_url' not in payload:
        payload['internal_proxy_url'] = ''
    if 'projectx_url' not in payload:
        payload['projectx_url'] = ''
    if 'silent' not in payload:
        payload['silent'] = False
    if 'skip_auth' not in payload:
        payload['skip_auth'] = False

    global request_time
    global provider_names
    global provider_results
    global available_providers

    provider_names = []
    provider_results = []
    available_providers = 0
    request_time = time.time()

    providers = get_enabled_providers(method)

    if len(providers) == 0:
        if not payload['silent']:
            notify(translation(32060), image=get_icon_path())
        log.error("No providers enabled")
        return []

    log.info(
        "Burstin' with %s" %
        ", ".join([definitions[provider]['name'] for provider in providers]))

    if get_setting('kodi_language', bool):
        kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
        if not kodi_language:
            log.warning("Kodi returned empty language code...")
        elif 'titles' not in payload or not payload['titles']:
            log.info("No translations available...")
        elif payload['titles'] and kodi_language not in payload['titles']:
            log.info("No '%s' translation available..." % kodi_language)

    p_dialog = xbmcgui.DialogProgressBG()
    if not payload['silent']:
        p_dialog.create('projectx [COLOR FFFF6B00]Burst[/COLOR]',
                        translation(32061))

    for provider in providers:
        available_providers += 1
        provider_names.append(definitions[provider]['name'])
        task = Thread(target=run_provider, args=(provider, payload, method))
        task.start()

    providers_time = time.time()
    total = float(available_providers)

    # Exit if all providers have returned results or timeout reached, check every 100ms
    while time.time() - providers_time < timeout and available_providers > 0:
        timer = time.time() - providers_time
        log.debug("Timer: %ds / %ds" % (timer, timeout))
        if timer > timeout:
            break
        message = translation(
            32062
        ) % available_providers if available_providers > 1 else translation(
            32063)
        if not payload['silent']:
            p_dialog.update(int((total - available_providers) / total * 100),
                            message=message)
        time.sleep(0.25)

    if not payload['silent']:
        p_dialog.close()
    del p_dialog

    if available_providers > 0:
        message = u', '.join(provider_names)
        message = message + translation(32064)
        log.warning(message.encode('utf-8'))
        if not payload['silent']:
            notify(message, ADDON_ICON)

    log.debug("all provider_results: %s" % repr(provider_results))

    filtered_results = apply_filters(provider_results)

    log.debug("all filtered_results: %s" % repr(filtered_results))

    log.info("Providers returned %d results in %s seconds" %
             (len(filtered_results), round(time.time() - request_time, 2)))

    return filtered_results
Example #5
0
def playYTDLVideo(url, name, type_):
    if pluginhandle == -1:
        xbmc_notify(
            "Error",
            "Attempt to use invalid handle -1")  #saves the user from waiting
        return

    dialog_progress_title = 'Youtube_dl'  #.format(ytdl_get_version_info())
    dialog_progress_YTDL = xbmcgui.DialogProgressBG()
    dialog_progress_YTDL.create(dialog_progress_title)
    dialog_progress_YTDL.update(10, dialog_progress_title, translation(30024))

    from YoutubeDLWrapper import YoutubeDLWrapper, _selectVideoQuality
    from urlparse import urlparse, parse_qs
    import pprint

    o = urlparse(url)
    query = parse_qs(o.query)
    video_index = 0
    #note that in domains.py youtube class will send a simplified url to avoid sending
    #   https://www.youtube.com/watch?v=R6_dZhE-4bk&index=22&list=PLGJ6ezwqAB2a4RP8hWEWAGB9eT2bmaBsy  (ytdl will parse around 90+ videos, takes a very long time)
    #   http://youtube.com/v/R6_dZhE-4bk   (will be faster)
    if 'index' in query:
        try:
            video_index = int(query['index'][0])
        except (TypeError, ValueError):
            video_index = 0
        #log( repr(video_index) )
        dialog_progress_YTDL.update(20, dialog_progress_title,
                                    translation(30025))
    else:
        #if there is index, link is likely a playlist, parsing will take a looooong time.
        #  we move progress dialog here to differentiate
        dialog_progress_YTDL.update(20, dialog_progress_title,
                                    translation(30022))

    #use YoutubeDLWrapper by ruuk to avoid  bad file error
    ytdl = YoutubeDLWrapper()
    try:
        ydl_info = ytdl.extract_info(url, download=False)
        #in youtube_dl utils.py def unified_timestamp(date_str, day_first=True):
        # there was an error playing https://vimeo.com/14652586
        #   on line 1195:
        # change          except ValueError:
        #     to          except (ValueError,TypeError):
        #   this already fixed by ruuk magic. in YoutubeDLWrapper

        #log( "YoutubeDL extract_info:\n" + pprint.pformat(ydl_info, indent=1) )
        video_infos = _selectVideoQuality(ydl_info,
                                          quality=ytdl_quality,
                                          disable_dash=(not ytdl_DASH))
        #log( "video_infos:\n" + pprint.pformat(video_infos, indent=1, depth=3) )
        dialog_progress_YTDL.update(80, dialog_progress_title,
                                    translation(30023))

        if len(video_infos) > 1:
            log('    ***ytdl link resolved to %d streams. playing #%d' %
                (len(video_infos), video_index))
            #xbmc_notify("Multiple video", "{} videos in playlist".format(len(pl)))

        li = ytdl_video_info_to_listitem(video_infos, video_index, name)
        xbmcplugin.setResolvedUrl(pluginhandle, True, li)

    except Exception as e:
        ytdl_ver = dialog_progress_title + ' v' + ytdl_get_version_info(
            'local')
        err_msg = str(
            e
        ) + ';'  #ERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest vers....
        short_err = err_msg.split(';')[0]
        log("playYTDLVideo Exception:" + str(sys.exc_info()[0]) + "  " +
            str(e))
        xbmc_notify(ytdl_ver, short_err)

        #try urlresolver
        log('   trying urlresolver...')
        playURLRVideo(url, name, type_)


#    finally:
    dialog_progress_YTDL.update(
        100, dialog_progress_title
    )  #not sure if necessary to set to 100 before closing dialogprogressbg
    dialog_progress_YTDL.close()
def error_message(message, name, type_):
    if name:
        sub_msg = name  #name is usually the title of the post
    else:
        sub_msg = translation(32021)  #Parsing error
    xbmc_notify(message, sub_msg)
Example #7
0
        self.params.append(self.b_out)

    def fprop(self, input, extra_input):
        cap_out = []
        for i in xrange(num_capsules):
            out, prob = self.capsules[i].fprop(input, extra_input)
            cap_out.append((out, prob))
        #prob_sum = sum([result[1] for result in cap_out])
        caps_out = sum([result[0] * result[1] for result in cap_out])
        shifted_img = T.nnet.sigmoid(caps_out + self.b_out)
        return shifted_img


if __name__ == "__main__":
    train, valid, test = load('mnist.pkl.gz')
    trans_train, shift_train, ori_train = translation(train[0], 28)
    trans_train, shift_train, ori_train = shared(
        (trans_train, shift_train, ori_train))
    trans_valid, shift_valid, ori_valid = translation(valid[0], 28)
    trans_valid, shift_valid, ori_valid = shared(
        (trans_valid, shift_valid, ori_valid))
    trans_test, shift_test, ori_test = translation(test[0], 28)
    trans_test, shift_test, ori_test = shared(
        (trans_test, shift_test, ori_test))

    num_capsules = 60
    in_dim = 784
    recog_dim = 10
    gener_dim = 20
    activation = 'sigmoid'
def build_context_menu_entries(num_comments, commentsUrl, subreddit, domain,
                               link_url, post_id, post_title, posted_by):
    from reddit import assemble_reddit_filter_string, subreddit_in_favorites  #, this_is_a_user_saved_list
    from utils import colored_subreddit, build_script, truncate

    s = truncate(subreddit, 15)  #crop long subreddit names in context menu
    colored_subreddit_short = colored_subreddit(s)
    colored_subreddit_full = colored_subreddit(subreddit)
    colored_domain_full = colored_subreddit(domain, 'tan', False)
    post_title_short = truncate(post_title, 15)
    post_author = truncate(posted_by, 15)

    label_view_comments = translation(32050) + ' ({})'.format(num_comments)
    label_goto_subreddit = translation(32051) + ' {}'.format(
        colored_subreddit_full)
    label_goto_domain = translation(32053) + ' {}'.format(colored_domain_full)
    label_search = translation(32052)
    label_autoplay_after = translation(32055) + ' ' + colored_subreddit(
        post_title_short, 'gray', False)
    label_more_by_author = translation(32049) + ' ' + colored_subreddit(
        post_author, 'gray', False)

    cxm_list = [
        ('html to text', build_script('readHTML', link_url)),
        (label_view_comments, build_script('listLinksInComment', commentsUrl)),
    ]

    #more by author
    if GCXM_hasmultipleauthor:
        cxm_list.append((label_more_by_author,
                         build_script(
                             "listSubReddit",
                             assemble_reddit_filter_string(
                                 "", "/user/" + posted_by + '/submitted'),
                             posted_by)))

    #more from r/subreddit
    if GCXM_hasmultiplesubreddit:
        cxm_list.append(
            (label_goto_subreddit,
             build_script("listSubReddit",
                          assemble_reddit_filter_string("", subreddit),
                          subreddit)))

    #more from domain
    if GCXM_hasmultipledomain:
        cxm_list.append(
            (label_goto_domain,
             build_script("listSubReddit",
                          assemble_reddit_filter_string("", '', '', domain),
                          domain)))

    #more random
    if any(x in GCXM_actual_url_used_to_generate_these_posts.lower()
           for x in ['/random', '/randnsfw']
           ):  #if '/rand' in GCXM_actual_url_used_to_generate_these_posts:
        cxm_list.append(
            (translation(32053) + ' random',
             build_script('listSubReddit',
                          GCXM_actual_url_used_to_generate_these_posts)),
        )  #Reload

    #Autoplay all
    #Autoplay after post_title
    #slideshow
    cxm_list.extend([
        (translation(32054),
         build_script('autoPlay', GCXM_reddit_query_of_this_gui)),
        (label_autoplay_after,
         build_script(
             'autoPlay',
             GCXM_reddit_query_of_this_gui.split('&after=')[0] + '&after=' +
             post_id)),
        (translation(32048),
         build_script('autoSlideshow', GCXM_reddit_query_of_this_gui)),
    ])

    #Add %s to shortcuts
    if not subreddit_in_favorites(subreddit):
        cxm_list.append((translation(32056) % colored_subreddit_short,
                         build_script("addSubreddit", subreddit)))

    #Add to subreddit/domain filter
    cxm_list.append((translation(32057) % colored_subreddit_short,
                     build_script("addtoFilter", subreddit, '', 'subreddit')))
    cxm_list.append((translation(32057) % colored_domain_full,
                     build_script("addtoFilter", domain, '', 'domain')))

    #Search
    if GCXM_hasmultiplesubreddit:
        cxm_list.append((label_search, build_script("search", '', '')))
    else:
        label_search += ' {}'.format(colored_subreddit_full)
        cxm_list.append((label_search, build_script("search", '', subreddit)))

    return cxm_list
def listSubReddit(url, subreddit_key, type_):
    from guis import progressBG
    from utils import post_is_filtered_out, build_script, compose_list_item, xbmc_notify
    from reddit import reddit_request, has_multiple, assemble_reddit_filter_string

    global GCXM_hasmultiplesubreddit, GCXM_actual_url_used_to_generate_these_posts, GCXM_reddit_query_of_this_gui, GCXM_hasmultipledomain, GCXM_hasmultipleauthor
    #the +'s got removed by url conversion
    title_bar_name = subreddit_key.replace(' ', '+')
    #log("  title_bar_name %s " %(title_bar_name) )

    log("listSubReddit r/%s\n %s" % (title_bar_name, url))

    currentUrl = url
    icon = banner = header = None
    xbmc_busy()

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(0, 'Retrieving ' + subreddit_key)
    content = reddit_request(url)
    loading_indicator.update(10, subreddit_key)

    if not content:
        xbmc_busy(False)
        loading_indicator.end(
        )  #it is important to close xbmcgui.DialogProgressBG
        return

    threads = []
    q_liz = Queue()  #output queue (listitem)

    content = json.loads(content)
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count = len(content['data']['children'])
    filtered_out_posts = 0

    hms = has_multiple('subreddit', content['data']['children'])

    if hms == False:  #r/random and r/randnsfw returns a random subreddit. we need to use the name of this subreddit for the "next page" link.
        try:
            g = content['data']['children'][0]['data']['subreddit']
        except ValueError:
            g = ""
        except IndexError:
            xbmc_busy(False)
            loading_indicator.end(
            )  #it is important to close xbmcgui.DialogProgressBG
            xbmc_notify("List Subreddit", translation(32022))
            return
        if g:
            title_bar_name = g
            #preserve the &after string so that functions like play slideshow and play all videos can 'play' the correct page
            #  extract the &after string from currentUrl -OR- send it with the 'type' argument when calling this function.
            currentUrl = assemble_reddit_filter_string('',
                                                       g) + '&after=' + type_

        #put subreddit icon/header in the GUI
        icon, banner, header = subreddit_icoheader_banner(g)

    GCXM_hasmultiplesubreddit = hms
    GCXM_hasmultipledomain = has_multiple('domain',
                                          content['data']['children'])
    GCXM_hasmultipleauthor = has_multiple('author',
                                          content['data']['children'])
    GCXM_actual_url_used_to_generate_these_posts = url
    GCXM_reddit_query_of_this_gui = currentUrl

    for idx, entry in enumerate(content['data']['children']):
        try:
            #if entry.get('kind')!='t3':
            #    filtered_out_posts+=1
            #    continue
            if post_is_filtered_out(entry.get('data')):
                filtered_out_posts += 1
                continue
            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker,
                                 args=(idx, entry, q_liz),
                                 name='#t%.2d' % idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    #check the queue to determine progress
    break_counter = 0  #to avoid infinite loop
    expected_listitems = (posts_count - filtered_out_posts)
    if expected_listitems > 0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size = 0
        while q_liz.qsize() < expected_listitems:
            if break_counter >= 100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added = q_liz.qsize() - last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter += 1

            last_queue_size = q_liz.qsize()
            xbmc.sleep(100)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        #some post might be filtered out.
        log('some threads did not return a listitem')

    #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )

    #liu=[ qi for qi in sorted(q_liz.queue) ]
    li = [liz for idx, liz in sorted(q_liz.queue)]

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after = ""
        after = content['data']['after']
        if after:
            if "&after=" in currentUrl:
                nextUrl = currentUrl[:currentUrl.find("&after="
                                                      )] + "&after=" + after
            else:
                nextUrl = currentUrl + "&after=" + after

            liz = compose_list_item(
                translation(32004), "", "DefaultFolderNextSquare.png",
                "script",
                build_script("listSubReddit", nextUrl, title_bar_name, after))

            #for items at the bottom left corner
            liz.setArt({"clearart": "DefaultFolderNextSquare.png"})
            liz.setInfo(type='video',
                        infoLabels={"Studio": translation(32004)})
            liz.setProperty('link_url', nextUrl)
            li.append(liz)

    except Exception as e:
        log(" EXCEPTzION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    xbmc_busy(False)

    title_bar_name = urllib.unquote_plus(title_bar_name)
    ui = skin_launcher('listSubReddit',
                       title_bar_name=title_bar_name,
                       listing=li,
                       subreddits_file=subredditsFile,
                       currentUrl=currentUrl,
                       icon=icon,
                       banner=banner,
                       header=header)
    ui.doModal()
    del ui
def autoSlideshow(url, name, type_):

    log('starting slideshow ' + url)
    ev = threading.Event()

    entries = []

    preview_w = 0
    preview_h = 0
    image = ''

    content = reddit_request(url)
    if not content: return

    content = json.loads(content)

    log("slideshow %s:Parsing %d items: %s" %
        (type_, len(content['data']['children']),
         'random' if random_post_order else 'normal order'))

    data_children = content['data']['children']

    if random_post_order:
        random.shuffle(data_children)

    for j_entry in data_children:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            log("  TITLE:%s [r/%s]" %
                (title, j_entry.get('data').get('subreddit')))

            try:
                description = unescape(j_entry['data']['media']['oembed']
                                       ['description'].encode('utf-8'))
            except:
                description = ''

            try:
                post_selftext = unescape(
                    j_entry['data']['selftext'].encode('utf-8'))
            except:
                post_selftext = ''

            description = post_selftext + '[CR]' + description if post_selftext else description

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            try:
                preview = j_entry['data']['preview']['images'][0]['source'][
                    'url'].encode('utf-8').replace('&amp;', '&')
                try:
                    preview_h = float(j_entry['data']['preview']['images'][0]
                                      ['source']['height'])
                    preview_w = float(j_entry['data']['preview']['images'][0]
                                      ['source']['width'])
                except:
                    preview_w = 0
                    preview_h = 0

            except Exception as e:

                preview = ""

            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=False,
                                   needs_preview=True,
                                   get_playable_url=True)
            if ld:
                if not preview:
                    preview = ld.poster

                if (addon.getSetting('include_albums')
                        == 'true') and (ld.media_type == sitesBase.TYPE_ALBUM):
                    dictlist = listAlbum(media_url, title, 'return_dictlist')
                    for d in dictlist:

                        t2 = d.get('li_label') if d.get('li_label') else title

                        d['li_label'] = t2
                        entries.append(d)

                else:
                    if addon.getSetting('use_reddit_preview') == 'true':
                        if preview: image = preview
                        elif ld.poster: image = ld.poster

                    else:
                        if ld.poster:
                            image = ld.poster  #entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        elif preview:
                            image = preview  #entries.append([title,preview,preview_w, preview_h,len(entries)])

                    append_entry(entries, title, image, preview_w, preview_h,
                                 description)
            else:
                append_entry(entries, title, preview, preview_w, preview_h,
                             description)

        except Exception as e:
            log('  autoPlay exception:' + str(e))

    entries = remove_dict_duplicates(entries, 'DirectoryItem_url')

    for i, e in enumerate(entries):
        log('  possible playable items({0}) {1}...{2}x{3}  {4}'.format(
            i, e['li_label'].ljust(15)[:15], repr(e.get('width')),
            repr(e.get('height')), e.get('DirectoryItem_url')))

    if len(entries) == 0:
        log('  Play All: no playable items')
        xbmc.executebuiltin(
            'XBMC.Notification("%s","%s")' %
            (translation(32054),
             translation(32055)))  #Play All     No playable items
        return

    log("**********playing slideshow*************")

    for e in entries:
        q.put(e)

    s = ScreensaverManager(ev, q)

    try:
        s.start_loop()
    except Exception as e:
        log("  EXCEPTION slideshowAlbum:=" + str(sys.exc_info()[0]) + "  " +
            str(e))

    return
def reddit_post_worker(idx, entry, q_out):
    import datetime
    from utils import pretty_datediff, clean_str, get_int, format_description
    from reddit import determine_if_video_media_from_reddit_json
    try:
        credate = ""
        is_a_video = False
        title_line2 = ""
        thumb_w = 0
        thumb_h = 0

        t_on = translation(32071)  #"on"
        #t_pts = u"\U0001F4AC"  # translation(30072) #"cmnts"  comment bubble symbol. doesn't work
        t_pts = u"\U00002709"  # translation(30072)   envelope symbol
        t_up = u"\U000025B4"  #u"\U00009650"(up arrow)   #upvote symbol

        #on 3/21/2017 we're adding a new feature that lets users view their saved posts by entering /user/username/saved as their subreddit.
        #  in addition to saved posts, users can also save comments. we need to handle it by checking for "kind"
        kind = entry.get('kind')  #t1 for comments  t3 for posts
        data = entry.get('data')
        if data:
            if kind == 't3':
                title = clean_str(data, ['title'])
                description = clean_str(data,
                                        ['media', 'oembed', 'description'])
                post_selftext = clean_str(data, ['selftext'])

                description = post_selftext + '[CR]' + description if post_selftext else description
                domain = clean_str(data, ['domain'])
            else:
                title = clean_str(data, ['link_title'])
                description = clean_str(data, ['body'])
                domain = 'Comment post'

            description = format_description(description)
            #title=strip_emoji(title) #an emoji in the title was causing a KeyError  u'\ud83c'
            title = format_description(title)

            is_a_video = determine_if_video_media_from_reddit_json(entry)
            log("  POST%cTITLE%.2d=%s" %
                (("v" if is_a_video else " "), idx, title))
            #log("description%.2d=%s" %(idx,description))
            post_id = entry['kind'] + '_' + data.get(
                'id')  #same as entry['data']['name']
            #log('  %s  %s ' % (post_id, entry['data']['name'] ))
            commentsUrl = urlMain + clean_str(data, ['permalink'])
            #log("commentsUrl"+str(idx)+"="+commentsUrl)
            try:
                aaa = data.get('created_utc')
                credate = datetime.datetime.utcfromtimestamp(aaa)
                now_utc = datetime.datetime.utcnow()
                pretty_date = pretty_datediff(now_utc, credate)
                credate = str(credate)
            except (AttributeError, TypeError, ValueError):
                credate = ""

            subreddit = clean_str(data, ['subreddit'])
            author = clean_str(data, ['author'])
            #log("     DOMAIN%.2d=%s" %(idx,domain))

            ups = data.get('score', 0)  #downs not used anymore
            num_comments = data.get('num_comments', 0)

            d_url = clean_str(data, ['url'])
            link_url = clean_str(data, ['link_url'])
            media_oembed_url = clean_str(data, ['media', 'oembed', 'url'])
            #            log('   kind     ='+kind)
            #            log('    url     ='+d_url)
            #            log('    link_url='+link_url)
            #            log('   permalink='+clean_str(data,['permalink']))
            #            log('    media_oembed_url='+media_oembed_url)
            media_url = next(
                (item for item in [d_url, link_url, media_oembed_url] if item),
                '')
            #log("     MEDIA%.2d=%s" %(idx,media_url))

            thumb = clean_str(data, ['thumbnail'])

            #media_w=get_int(data,['media','oembed','width'])
            #media_h=get_int(data,['media','oembed','height'])
            #log('  media_w='+repr(media_w)+' h='+repr(media_h) )

            #try:log('  media_w='+repr(data.get('media')['oembed']['width']  ) )
            #except:pass

            if not thumb.startswith(
                    'http'
            ):  #in ['nsfw','default','self']:  #reddit has a "default" thumbnail (alien holding camera with "?")
                thumb = ""

            if thumb == "":
                thumb = clean_str(data, ['media', 'oembed', 'thumbnail_url'
                                         ]).replace('&amp;', '&')

            #a blank preview image will be replaced with poster_url from parse_reddit_link() for domains that support it
            preview = clean_str(data, [
                'preview', 'images', 0, 'source', 'url'
            ]).replace(
                '&amp;', '&'
            )  #data.get('preview')['images'][0]['source']['url'].encode('utf-8').replace('&amp;','&')
            #log('  preview='+repr(preview))
            #try:
            thumb_h = get_int(data, [
                'preview', 'images', 0, 'source', 'height'
            ])  #float( data.get('preview')['images'][0]['source']['height'] )
            thumb_w = get_int(data, [
                'preview', 'images', 0, 'source', 'width'
            ])  #float( data.get('preview')['images'][0]['source']['width'] )
            #except (AttributeError,TypeError,ValueError):
            #log("   thumb_w _h EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )
            #   thumb_w=0; thumb_h=0

            #preview images are 'keep' stretched to fit inside 1080x1080.
            #  if preview image is smaller than the box we have for thumbnail, we'll use that as thumbnail and not have a bigger stretched image
            if thumb_w > 0 and thumb_w < 280:
                #log('*******preview is small ')
                thumb = preview
                thumb_w = 0
                thumb_h = 0
                preview = ""

            over_18 = data.get('over_18')

            title_line2 = ""
            title_line2 = "[I][COLOR dimgrey]%d%c %s %s [B][COLOR cadetblue]r/%s[/COLOR][/B] (%d) %s[/COLOR][/I]" % (
                ups, t_up, pretty_date, t_on, subreddit, num_comments, t_pts)

            liz = addLink(
                title=title,
                title_line2=title_line2,
                iconimage=thumb,
                previewimage=preview,
                preview_w=thumb_w,
                preview_h=thumb_h,
                domain=domain,
                description=description,
                credate=credate,
                reddit_says_is_video=is_a_video,
                commentsUrl=commentsUrl,
                subreddit=subreddit,
                link_url=media_url,
                over_18=over_18,
                posted_by=author,
                num_comments=num_comments,
                post_id=post_id,
            )

            q_out.put([idx, liz])  #we put the idx back for easy sorting

    except Exception as e:
        log('  #reddit_post_worker EXCEPTION:' + repr(sys.exc_info()) + '--' +
            str(e))
Example #12
0
def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG

    log('listLinksInComment:%s:%s' %(type_,url) )


    directory_items=[]
    author=""
    ShowOnlyCommentsWithlink=False

    if type_=='linksOnly':
        ShowOnlyCommentsWithlink=True

    url=urllib.quote_plus(url,safe=':/?&')
    if '?' in url:
        url=url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]
    else:
        url+= '.json'

    loading_indicator=progressBG(translation(30024))
    loading_indicator.update(0,'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10,'Parsing')
    content = json.loads(content)

    del harvest[:]

    r_linkHunter(content[0]['data']['children'])

    try:submitter=content[0]['data']['children'][0]['data']['author']
    except: submitter=''

    try:post_title=content[0]['data']['children'][0]['data']['title']
    except:post_title=''

    r_linkHunter(content[1]['data']['children'])

    comment_score=0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:

            comment_score=h[0]

            link_url=h[2]
            desc100=h[3].replace('\n',' ')[0:100] #first 100 characters of description

            kind=h[6] #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d=h[5]   #depth of the comment

            tab=" "*d if d>0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format( uri=urlparse( link_url ) )

            author=h[7]
            DirectoryItem_url=''

            if comment_score < int_CommentTreshold:
                continue


            ld=parse_reddit_link(link_url=link_url, assume_is_video=False, needs_preview=True, get_playable_url=True )

            if kind=='t1':
                list_title=r"[COLOR cadetblue]%3d[/COLOR] %s" %( h[0], tab )
            elif kind=='t3':
                list_title=r"[COLOR cadetblue]Title [/COLOR] %s" %( tab )

            plot=h[3].replace('](', '] (')
            plot= markdown_to_bbcode(plot)
            plot=unescape(plot)  #convert html entities e.g.:(&#39;)

            liz=xbmcgui.ListItem(label=list_title +': '+ desc100)

            liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": plot, "studio": domain, "votes": str(comment_score), "director": author  } )
            isFolder=False

            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty('url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot= "  [COLOR greenyellow][%s] %s"%(domain, plot )  + "[/COLOR]"
                else:
                    plot= "  [COLOR greenyellow][%s]"%( plot ) + "[/COLOR]"
                liz.setLabel(list_title+plot)

                if ld:
                    liz.setArt({"thumb": ld.poster, "poster":ld.poster, "banner":ld.poster, "fanart":ld.poster, "landscape":ld.poster   })

            if DirectoryItem_url:

                directory_items.append( (DirectoryItem_url, liz, isFolder,) )

            else:

                if not ShowOnlyCommentsWithlink:
                    result=h[3].replace('](', '] (')
                    result=markdown_to_bbcode(result)
                    liz=xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": result, "studio": domain, "votes": str(h[0]), "director": author } )
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append( ("", liz, False,) )

        except Exception as e:
            log('  EXCEPTION:' + str(e) )


        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    xbmcplugin.setContent(pluginhandle, "movies")    #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle,'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items )
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' %comments_viewMode)
Example #13
0
def build_context_menu_entries(num_comments,commentsUrl, subreddit, domain, link_url, post_id):
    from reddit import assemble_reddit_filter_string, subreddit_in_favorites, this_is_a_user_saved_list
    from utils import colored_subreddit

    s=(subreddit[:12] + '..') if len(subreddit) > 12 else subreddit     #crop long subreddit names in context menu
    colored_subreddit_short=colored_subreddit( s )
    colored_subreddit_full=colored_subreddit( subreddit )
    colored_domain_full=colored_subreddit( domain, 'tan',False )
    entries=[]


    if cxm_show_open_browser:
            entries.append( ( translation(30509),  #Open in browser
                              "XBMC.RunPlugin(%s?mode=openBrowser&url=%s)" % ( sys.argv[0],  urllib.quote_plus( link_url ) ) ) )

    if cxm_show_comment_link or cxm_show_comments:
        if num_comments > 0:

            if cxm_show_comment_link:
                entries.append( ( translation(30052) , #Show comment links
                                  "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listLinksInComment&url=%s&type=linksOnly)" % ( sys.argv[0], sys.argv[0], urllib.quote_plus(commentsUrl) ) ) )
            if cxm_show_comments:
                entries.append( ( translation(30050) ,  #Show comments
                                  "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listLinksInComment&url=%s)" % ( sys.argv[0], sys.argv[0], urllib.quote_plus(commentsUrl) ) ) )

        else:
            entries.append( ( translation(30053) ,  #No comments
                          "xbmc.executebuiltin('Action(Close)')" ) )

    if GCXM_hasmultiplesubreddit and cxm_show_go_to:
        entries.append( ( translation(30051)+" %s" %colored_subreddit_full ,
                          "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listSubReddit&url=%s)" % ( sys.argv[0], sys.argv[0],urllib.quote_plus(assemble_reddit_filter_string("",subreddit,True)  ) ) ) )

    if cxm_show_new_from:

        entries.append( ( translation(30055)+" %s" %colored_subreddit_short ,
                          "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listSubReddit&url=%s)" % ( sys.argv[0], sys.argv[0],urllib.quote_plus(assemble_reddit_filter_string("",subreddit+'/new',True)  ) ) ) )

    if cxm_show_add_shortcuts:
        if not subreddit_in_favorites(subreddit):

            entries.append( ( translation(30056) %colored_subreddit_short ,
                              "XBMC.RunPlugin(%s?mode=addSubreddit&url=%s)" % ( sys.argv[0], subreddit ) ) )

    if cxm_show_filter_subreddit:
            entries.append( ( translation(30057) %colored_subreddit_short ,
                              "XBMC.RunPlugin(%s?mode=addtoFilter&url=%s&type=%s)" % ( sys.argv[0], subreddit, 'subreddit' ) ) )
    if cxm_show_filter_domain:
            entries.append( ( translation(30057) %colored_domain_full ,
                              "XBMC.RunPlugin(%s?mode=addtoFilter&url=%s&type=%s)" % ( sys.argv[0], domain, 'domain' ) ) )

    from reddit import reddit_refresh_token
    if reddit_refresh_token and cxm_show_reddit_save:
        if this_is_a_user_saved_list(GCXM_subreddit_key):

            entries.append( ( translation(30059) ,
                                  "XBMC.RunPlugin(%s?mode=reddit_save&url=%s&name=%s)" % ( sys.argv[0], '/api/unsave/', post_id ) ) )
        else:
            entries.append( ( translation(30058) ,
                                  "XBMC.RunPlugin(%s?mode=reddit_save&url=%s&name=%s)" % ( sys.argv[0], '/api/save/', post_id ) ) )

    if cxm_show_youtube_items:

        from domains import ClassYoutube
        match=re.compile( ClassYoutube.regex, re.I).findall( link_url )  #regex='(youtube.com/)|(youtu.be/)|(youtube-nocookie.com/)|(plugin.video.youtube/play)'
        if match:

            entries.append( ( translation(30048) ,
                                "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listRelatedVideo&url=%s&type=%s)" % ( sys.argv[0], sys.argv[0], urllib.quote_plus(link_url), 'channel' ) ) )
            entries.append( ( translation(30049) ,
                                "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listRelatedVideo&url=%s&type=%s)" % ( sys.argv[0], sys.argv[0], urllib.quote_plus(link_url), 'related' ) ) )

    return entries
Example #14
0
def index(url,name,type_):
    from utils import xstr, samealphabetic, hassamealphabetic
    from reddit import load_subredditsFile, parse_subreddit_entry, create_default_subreddits, assemble_reddit_filter_string, ret_sub_info, ret_settings_type_default_icon


    if not os.path.exists(subredditsFile):  #if not os.path.exists(subredditsFile):
        create_default_subreddits()

    subredditsFile_entries=load_subredditsFile()

    subredditsFile_entries.sort(key=lambda y: y.lower())

    addtl_subr_info={}

    xbmcplugin.setContent(pluginhandle, "mixed") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    next_mode='listSubReddit'

    for subreddit_entry in subredditsFile_entries:

        addtl_subr_info=ret_sub_info(subreddit_entry)

        entry_type, subreddit, alias, shortcut_description=parse_subreddit_entry(subreddit_entry)

        icon=default_icon='' #addon_path+"/resources/skins/Default/media/"+ret_settings_type_default_icon(entry_type)

        url= assemble_reddit_filter_string("",subreddit, "yes")

        if subreddit.lower() in ["all","popular"]:
            addDir(subreddit, url, next_mode, icon, subreddit, { "plot": translation(30009) } )  #Displays the currently most popular content from all of reddit
        else:
            if addtl_subr_info: #if we have additional info about this subreddit

                title=xstr(addtl_subr_info.get('title'))+'\n'
                display_name=xstr(addtl_subr_info.get('display_name'))
                if samealphabetic( title, display_name): title=''


                header_title=xstr(addtl_subr_info.get('header_title'))
                public_description=xstr( addtl_subr_info.get('public_description'))

                if samealphabetic( header_title, public_description): public_description=''
                if samealphabetic(title,public_description): public_description=''


                if entry_type=='subreddit':
                    display_name='r/'+display_name
                shortcut_description='[COLOR cadetblue][B]%s[/B][/COLOR]\n%s[I]%s[/I]\n%s' %(display_name,title,header_title,public_description )

                icon=addtl_subr_info.get('icon_img')
                banner=addtl_subr_info.get('banner_img')
                header=addtl_subr_info.get('header_img')  #usually the small icon on upper left side on subreddit screen

                icon=next((item for item in [icon,banner,header] if item ), '') or default_icon

                addDirR(alias, url, next_mode, icon,
                        type_=subreddit,
                        listitem_infolabel={ "plot": shortcut_description },
                        file_entry=subreddit_entry,
                        banner_image=banner )
            else:
                addDirR(alias, url, next_mode, icon, subreddit, { "plot": shortcut_description }, subreddit_entry )

    addDir("[B]- "+translation(30001)+"[/B]", "", 'addSubreddit', "", "", { "plot": translation(30006) } ) #"Customize this list with your favorite subreddit."
    addDir("[B]- "+translation(30005)+"[/B]", "",'searchReddits', "", "", { "plot": translation(30010) } ) #"Search reddit for a particular post or topic

    xbmcplugin.endOfDirectory(pluginhandle)
Example #15
0
def autoPlay(url, name, type_):
    from domains import sitesBase, parse_reddit_link, ydtl_get_playable_url
    from utils import unescape, post_is_filtered_out, strip_emoji, xbmc_busy, log, translation
    from reddit import reddit_request, determine_if_video_media_from_reddit_json
    from actions import setting_gif_repeat_count

    #collect a list of title and urls as entries[] from the j_entries obtained from reddit
    #then create a playlist from those entries
    #then play the playlist

    gif_repeat_count = setting_gif_repeat_count()
    entries = []
    watchdog_counter = 0
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()

    xbmc_busy()

    content = reddit_request(url)
    if not content: return
    #log( str(content) )
    #content = json.loads(content.replace('\\"', '\''))
    content = json.loads(content)

    log("Autoplay %s - Parsing %d items" %
        (type_, len(content['data']['children'])))

    for j_entry in content['data']['children']:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            title = strip_emoji(title)

            try:
                media_url = j_entry['data']['url']
            except (AttributeError, TypeError, ValueError):
                media_url = j_entry['data']['media']['oembed']['url']

            is_a_video = determine_if_video_media_from_reddit_json(j_entry)

            log("  %cTITLE:%s" % (("v" if is_a_video else " "), title))

            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=False,
                                   needs_preview=False,
                                   get_playable_url=True)

            if ld:
                log('      type:%s %s' % (ld.media_type, ld.link_action))
                if ld.media_type in [
                        sitesBase.TYPE_VIDEO, sitesBase.TYPE_GIF,
                        sitesBase.TYPE_VIDS, sitesBase.TYPE_MIXED
                ]:

                    if ld.media_type == sitesBase.TYPE_GIF:
                        entries.append([
                            title, ld.playable_url,
                            sitesBase.DI_ACTION_PLAYABLE
                        ])
                        for _ in range(0, gif_repeat_count):
                            entries.append([
                                title, ld.playable_url,
                                sitesBase.DI_ACTION_PLAYABLE
                            ])
                    else:
                        entries.append(
                            [title, ld.playable_url, ld.link_action])
            else:
                #log('    checking if ytdl supports %s' %media_url )
                playable_video_url = ydtl_get_playable_url(media_url)
                if playable_video_url:
                    for u in playable_video_url:
                        entries.append(
                            [title, u, sitesBase.DI_ACTION_PLAYABLE])

        except Exception as e:
            log('  autoPlay exception:' + str(e))

    #for i,e in enumerate(entries): log('  e1-%d %s:' %(i, e[1]) )
    #def k2(x): return x[1]
    #entries=remove_duplicates(entries, k2)   #***disable removal of duplicates because it will also remove looping for gif videos
    #for i,e in enumerate(entries): log('  e2-%d %s:' %(i, e[1]) )

    for i, e in enumerate(entries):
        try:
            log('  possible playable items(%.2d) %s...%s (%s)' %
                (i, e[0].ljust(15)[:15], e[1], e[2]))
        except:
            continue

    if len(entries) == 0:
        log('  Play All: no playable items')
        xbmc.executebuiltin(
            'XBMC.Notification("%s","%s")' %
            (translation(32054),
             translation(32055)))  #Play All     No playable items
        return

    entries_to_buffer = 4
    #log('  entries:%d buffer:%d' %( len(entries), entries_to_buffer ) )
    if len(entries) < entries_to_buffer:
        entries_to_buffer = len(entries)
        #log('entries to buffer reduced to %d' %entries_to_buffer )

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + url )

    log("**********autoPlay*************")

    #play_list=[]
    ev = threading.Event()

    t = Worker(entries, q, ev)
    t.daemon = True
    t.start()
    #t.run()

    #wait for worker to finish processing 1st item
    #e.wait(200)

    while True:
        #log( '  c-wait+get buffer(%d) wdt=%d ' %(playlist.size(), watchdog_counter)  )
        try:
            #playable_url = q.get(True, 10)
            playable_entry = q.get(True, 10)
            #playable_url=playable_entry[1]
            q.task_done()
            #play_list.append(playable_entry[1])
            playlist.add(playable_entry[1],
                         xbmcgui.ListItem(playable_entry[0]))
            log('    c-buffered(%d):%s...%s' %
                (playlist.size(), playable_entry[0].ljust(15)[:15],
                 playable_entry[1]))

        except:
            watchdog_counter += 1
            if ev.is_set():  #p is done producing
                break
            #if got 3 empty from queue.
            pass
        watchdog_counter += 1
        #log('  playlist:%d buffer:%d' %( playlist.size(), entries_to_buffer ) )
        if playlist.size() >= entries_to_buffer:  #q.qsize()
            log('  c-buffer count met')
            break
        if watchdog_counter > entries_to_buffer:
            break

    log('  c-buffering done')

    #xbmc_busy(False)

    xbmc.Player().play(playlist)

    watchdog_counter = 0
    while True:
        #log( '  c-get buffer(%d) wdt=%d ' %(playlist.size(), watchdog_counter)  )
        #q.join()
        #log( ' c- join-ed, get... '  )
        try:
            #playable_url = q.get(True,10)
            playable_entry = q.get(True, 10)
            q.task_done()
            #log( '    c- got next item... ' + playable_entry[1] )
            #play_list.append(playable_entry[1])
            playlist.add(playable_entry[1],
                         xbmcgui.ListItem(playable_entry[0]))
            log('    c-got next item(%d):%s...%s' %
                (playlist.size(), playable_entry[0].ljust(15)[:15],
                 playable_entry[1]))
        except:
            watchdog_counter += 1
            if ev.isSet():  #p is done producing
                break

            pass
        #xbmc.PlayList(1).add(playable_url)

        if ev.isSet() and q.empty():
            log(' c- ev is set and q.empty -->  break ')
            break

        if watchdog_counter > 2:
            break

    log(' c-all done ')
Example #16
0
def process(provider,
            generator,
            filtering,
            has_special,
            verify_name=True,
            verify_size=True):
    """ Method for processing provider results using its generator and Filtering class instance

    Args:
        provider        (str): Provider ID
        generator  (function): Generator method, can be either ``extract_torrents`` or ``extract_from_api``
        filtering (Filtering): Filtering class instance
        has_special    (bool): Whether title contains special chars
        verify_name    (bool): Whether to double-check the results' names match the query or not
        verify_size    (bool): Whether to check the results' file sizes
    """
    log.debug("execute_process for %s with %s" % (provider, repr(generator)))
    definition = definitions[provider]
    definition = get_alias(definition, get_setting("%s_alias" % provider))

    client = Client()
    token = None
    logged_in = False
    token_auth = False

    if get_setting("use_cloudhole", bool):
        client.clearance = get_setting('clearance')
        client.user_agent = get_setting('user_agent')

    if get_setting('kodi_language', bool):
        kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
        if kodi_language:
            filtering.kodi_language = kodi_language
        language_exceptions = get_setting('language_exceptions')
        if language_exceptions.strip().lower():
            filtering.language_exceptions = re.split(r',\s?',
                                                     language_exceptions)

    log.debug("[%s] Queries: %s" % (provider, filtering.queries))
    log.debug("[%s] Extras:  %s" % (provider, filtering.extras))

    for query, extra in zip(filtering.queries, filtering.extras):
        log.debug("[%s] Before keywords - Query: %s - Extra: %s" %
                  (provider, repr(query), repr(extra)))
        if has_special:
            # Removing quotes, surrounding {title*} keywords, when title contains special chars
            query = re.sub("[\"']({title.*?})[\"']", '\\1', query)

        query = filtering.process_keywords(provider, query)
        extra = filtering.process_keywords(provider, extra)
        if 'charset' in definition and 'utf' not in definition[
                'charset'].lower():
            try:
                query = urllib.quote(query.encode(definition['charset']))
                extra = urllib.quote(extra.encode(definition['charset']))
            except:
                pass

        log.debug("[%s] After keywords  - Query: %s - Extra: %s" %
                  (provider, repr(query), repr(extra)))
        if not query:
            return filtering.results

        url_search = filtering.url.replace('QUERY', query)
        if extra:
            url_search = url_search.replace('EXTRA', extra)
        else:
            url_search = url_search.replace('EXTRA', '')
        url_search = url_search.replace(' ', definition['separator'])

        # MagnetDL fix...
        url_search = url_search.replace('FIRSTLETTER', query[:1])

        # Creating the payload for POST method
        payload = dict()
        for key, value in filtering.post_data.iteritems():
            if 'QUERY' in value:
                payload[key] = filtering.post_data[key].replace('QUERY', query)
            else:
                payload[key] = filtering.post_data[key]

        # Creating the payload for GET method
        data = None
        if filtering.get_data:
            data = dict()
            for key, value in filtering.get_data.iteritems():
                if 'QUERY' in value:
                    data[key] = filtering.get_data[key].replace('QUERY', query)
                else:
                    data[key] = filtering.get_data[key]

        log.debug("-   %s query: %s" % (provider, repr(query)))
        log.debug("--  %s url_search before token: %s" %
                  (provider, repr(url_search)))
        log.debug("--- %s using POST payload: %s" % (provider, repr(payload)))
        log.debug("----%s filtering with post_data: %s" %
                  (provider, repr(filtering.post_data)))

        # Set search's "title" in filtering to double-check results' names
        if 'filter_title' in definition and definition['filter_title']:
            filtering.filter_title = True
            filtering.title = query

        if token:
            log.info('[%s] Reusing existing token' % provider)
            url_search = url_search.replace('TOKEN', token)
        elif 'token' in definition:
            token_url = definition['base_url'] + definition['token']
            log.debug("Getting token for %s at %s" %
                      (provider, repr(token_url)))
            client.open(token_url.encode('utf-8'))
            try:
                token_data = json.loads(client.content)
            except:
                log.error('%s: Failed to get token for %s' %
                          (provider, repr(url_search)))
                return filtering.results
            log.debug("Token response for %s: %s" %
                      (provider, repr(token_data)))
            if 'token' in token_data:
                token = token_data['token']
                log.debug("Got token for %s: %s" % (provider, repr(token)))
                url_search = url_search.replace('TOKEN', token)
            else:
                log.warning('%s: Unable to get token for %s' %
                            (provider, repr(url_search)))

        if logged_in:
            log.info("[%s] Reusing previous login" % provider)
        elif token_auth:
            log.info("[%s] Reusing previous token authorization" % provider)
        elif 'private' in definition and definition['private']:
            username = get_setting('%s_username' % provider)
            password = get_setting('%s_password' % provider)
            passkey = get_setting('%s_passkey' % provider)
            if not username and not password and not passkey:
                for addon_name in ('script.magnetic.%s' % provider,
                                   'script.magnetic.%s-mc' % provider):
                    for setting in ('username', 'password'):
                        try:
                            value = xbmcaddon.Addon(addon_name).getSetting(
                                setting)
                            set_setting('%s_%s' % (provider, setting), value)
                            if setting == 'username':
                                username = value
                            if setting == 'password':
                                password = value
                        except:
                            pass

            if passkey:
                logged_in = True
                client.passkey = passkey
                url_search = url_search.replace('PASSKEY', passkey)

            elif 'login_object' in definition and definition['login_object']:
                logged_in = False
                login_object = definition['login_object'].replace(
                    'USERNAME',
                    '"%s"' % username).replace('PASSWORD', '"%s"' % password)

                # TODO generic flags in definitions for those...
                if provider == 'lostfilm':
                    client.open(definition['root_url'] +
                                '/v_search.php?c=110&s=1&e=1')
                    if client.content == 'log in first':
                        pass
                    else:
                        log.info('[%s] Login successful' % provider)
                        logged_in = True

                if 'token_auth' in definition:
                    # log.debug("[%s] logging in with: %s" % (provider, login_object))
                    if client.open(definition['root_url'] +
                                   definition['token_auth'],
                                   post_data=eval(login_object)):
                        try:
                            token_data = json.loads(client.content)
                        except:
                            log.error('%s: Failed to get token from %s' %
                                      (provider, definition['token_auth']))
                            return filtering.results
                        log.debug("Token response for %s: %s" %
                                  (provider, repr(token_data)))
                        if 'token' in token_data:
                            client.token = token_data['token']
                            log.debug("Auth token for %s: %s" %
                                      (provider, repr(client.token)))
                        else:
                            log.error('%s: Unable to get auth token for %s' %
                                      (provider, repr(url_search)))
                            return filtering.results
                        log.info('[%s] Token auth successful' % provider)
                        token_auth = True
                    else:
                        log.error("[%s] Token auth failed with response: %s" %
                                  (provider, repr(client.content)))
                        return filtering.results
                elif not logged_in and client.login(
                        definition['root_url'] + definition['login_path'],
                        eval(login_object), definition['login_failed']):
                    log.info('[%s] Login successful' % provider)
                    logged_in = True
                elif not logged_in:
                    log.error("[%s] Login failed: %s", provider, client.status)
                    log.debug("[%s] Failed login content: %s", provider,
                              repr(client.content))
                    notify(translation(32089), image=get_icon_path())
                    return filtering.results

                if logged_in:
                    if provider == 'lostfilm':
                        log.info('[%s] Search lostfilm serial ID...', provider)
                        url_search = fix_lf(url_search)
                        client.open(url_search.encode('utf-8'),
                                    post_data=payload,
                                    get_data=data)
                        search_info = re.search(r'PlayEpisode\((.*?)\)">',
                                                client.content)
                        if search_info:
                            series_details = re.search(
                                '\'(\d+)\',\'(\d+)\',\'(\d+)\'',
                                search_info.group(1))
                            client.open(definition['root_url'] +
                                        '/v_search.php?c=%s&s=%s&e=%s' %
                                        (series_details.group(1),
                                         series_details.group(2),
                                         series_details.group(3)))
                            redirect_url = re.search(ur'url=(.*?)">',
                                                     client.content)
                            if redirect_url is not None:
                                url_search = redirect_url.group(1)
                        else:
                            log.info('[%s] Not found ID in %s' %
                                     (provider, url_search))
                            return filtering.results

        log.info(">  %s search URL: %s" %
                 (definition['name'].rjust(longest), url_search))

        client.open(url_search.encode('utf-8'),
                    post_data=payload,
                    get_data=data)
        filtering.results.extend(
            generate_payload(provider, generator(provider, client), filtering,
                             verify_name, verify_size))
    return filtering.results
Example #17
0
def search(payload, method="general"):
    """ Main search entrypoint

    Args:
        payload (dict): Search payload from Elementum.
        method   (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``

    Returns:
        list: All filtered results in the format Elementum expects
    """
    log.debug("Searching with payload (%s): %s" % (method, repr(payload)))

    if method == 'general':
        payload = {
            'title': payload
        }

    if 'has_special' not in payload:
        payload['has_special'] = False

    global request_time
    global provider_names
    global provider_results
    global available_providers

    provider_names = []
    provider_results = []
    available_providers = 0
    request_time = time.time()

    providers = get_enabled_providers(method)

    if len(providers) == 0:
        notify(translation(32060), image=get_icon_path())
        log.error("No providers enabled")
        return []

    log.info("Burstin' with %s" % ", ".join([definitions[provider]['name'] for provider in providers]))

    if get_setting('kodi_language', bool):
        kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
        if not kodi_language:
            log.warning("Kodi returned empty language code...")
        elif 'titles' not in payload or not payload['titles']:
            log.info("No translations available...")
        elif payload['titles'] and kodi_language not in payload['titles']:
            log.info("No '%s' translation available..." % kodi_language)

    p_dialog = xbmcgui.DialogProgressBG()
    p_dialog.create('Quasar [COLOR FF5CB9FF]Nova[/COLOR]', translation(32061))
    for provider in providers:
        available_providers += 1
        provider_names.append(definitions[provider]['name'])
        task = Thread(target=run_provider, args=(provider, payload, method))
        task.start()

    providers_time = time.time()
    total = float(available_providers)

    # Exit if all providers have returned results or timeout reached, check every 100ms
    while time.time() - providers_time < timeout and available_providers > 0:
        timer = time.time() - providers_time
        log.debug("Timer: %ds / %ds" % (timer, timeout))
        if timer > timeout:
            break
        message = translation(32062) % available_providers if available_providers > 1 else translation(32063)
        p_dialog.update(int((total - available_providers) / total * 100), message=message)
        time.sleep(0.25)

    p_dialog.close()
    del p_dialog

    if available_providers > 0:
        message = u', '.join(provider_names)
        message = message + translation(32064)
        log.warning(message.encode('utf-8'))
        notify(message, ADDON_ICON)

    log.debug("all provider_results: %s" % repr(provider_results))

    filtered_results = apply_filters(provider_results)

    log.debug("all filtered_results: %s" % repr(filtered_results))

    log.info("Providers returned %d results in %s seconds" % (len(filtered_results), round(time.time() - request_time, 2)))

    return filtered_results
Example #18
0
def getCameraName(camera_number):
    name = getSetting('name', camera_number)
    if name == '':
        name = '%s' % utils.translation(32000 + int(camera_number))
    return name
def playYTDLVideo(url, name, type_):
    dialog_progress_title = 'Youtube_dl'  #.format(ytdl_get_version_info())
    dialog_progress_YTDL = xbmcgui.DialogProgressBG()
    dialog_progress_YTDL.create(dialog_progress_title)
    dialog_progress_YTDL.update(10, dialog_progress_title, translation(32014))

    from YoutubeDLWrapper import YoutubeDLWrapper, _selectVideoQuality
    import pprint

    pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    pl.clear()

    dialog_progress_YTDL.update(20, dialog_progress_title, translation(32012))

    #use YoutubeDLWrapper by ruuk to avoid  bad file error
    ytdl = YoutubeDLWrapper()
    try:
        ydl_info = ytdl.extract_info(url, download=False)
        #in youtube_dl utils.py def unified_timestamp(date_str, day_first=True):
        # there was an error playing https://vimeo.com/14652586
        #   on line 1195:
        # change          except ValueError:
        #     to          except (ValueError,TypeError):
        #   this already fixed by ruuk magic. in YoutubeDLWrapper

        #log( "YoutubeDL extract_info:\n" + pprint.pformat(ydl_info, indent=1) )
        #log('quality============='+repr(ytdl_quality))
        #log('ytdl_DASH==========='+repr(ytdl_DASH))
        video_infos = _selectVideoQuality(ydl_info,
                                          quality=ytdl_quality,
                                          disable_dash=(not ytdl_DASH))
        #log( "video_infos:\n" + pprint.pformat(video_infos, indent=1, depth=5) )
        dialog_progress_YTDL.update(80, dialog_progress_title,
                                    translation(32013))

        for video_info in video_infos:
            url = video_info.get(
                'xbmc_url'
            )  #there is also  video_info.get('url')  url without the |useragent...
            #url="d://mp4-live-mpd-AV-BS.mpd.xml"
            title = video_info.get('title') or name
            ytdl_format = video_info.get('ytdl_format')
            if ytdl_format:
                description = ytdl_format.get('description')
                #check if there is a time skip code
                try:
                    start_time = ytdl_format.get(
                        'start_time',
                        0)  #int(float(ytdl_format.get('start_time')))
                except (ValueError, TypeError):
                    start_time = 0

            li = xbmcgui.ListItem(label=title,
                                  label2='',
                                  iconImage=video_info.get('thumbnail'),
                                  thumbnailImage=video_info.get('thumbnail'),
                                  path=url)
            li.setInfo(type="Video",
                       infoLabels={
                           "Title": title,
                           "plot": description
                       })
            li.setProperty('StartOffset', str(start_time))
            pl.add(url, li)

        xbmc.Player().play(pl, windowed=False)

        #only use the time skip code if there is only one item in the playlist
        #if start_time and pl.size()==1:
        #    xbmc.Player().seekTime(start_time)

    except Exception as e:
        ytdl_ver = dialog_progress_title + ' v' + ytdl_get_version_info(
            'local')
        err_msg = str(
            e
        ) + ';'  #ERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest vers....
        short_err = err_msg.split(';')[0]
        log("playYTDLVideo Exception:" + str(sys.exc_info()[0]) + "  " +
            str(e))
        xbmc_notify(ytdl_ver, short_err)

        #try urlresolver
        log('   trying urlresolver...')
        playURLRVideo(url, name, type_)
    finally:
        dialog_progress_YTDL.update(
            100, dialog_progress_title
        )  #not sure if necessary to set to 100 before closing dialogprogressbg
        dialog_progress_YTDL.close()
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit, GCXM_hasmultipledomain, GCXM_hasmultipleauthor, GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" % (subreddit_key, url))

    currentUrl = url
    xbmcplugin.setContent(
        pluginhandle, "movies"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(8, 'Retrieving ' + subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11, subreddit_key)

    if not content:
        loading_indicator.end(
        )  #it is important to close xbmcgui.DialogProgressBG
        return

    page_title = "[COLOR cadetblue]%s[/COLOR]" % subreddit_key

    #setPluginCategory lets us show text at the top of window, we take advantage of this and put the subreddit name
    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label = {"plot": translation(30013)}  #Automatically play videos
    if autoplayAll:
        addDir("[B]- " + translation(30016) + "[/B]", url, 'autoPlay', "",
               "ALL", info_label)
    if autoplayUnwatched:
        addDir("[B]- " + translation(30017) + "[/B]", url, 'autoPlay', "",
               "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()  #output queue (listitem)

    content = json.loads(content)

    #A modhash is a token that the reddit API requires to help prevent CSRF. Modhashes can be obtained via the /api/me.json call or in response data of listing endpoints.
    #The preferred way to send a modhash is to include an X-Modhash custom HTTP header with your requests.
    #Modhashes are not required when authenticated with OAuth.
    #modhash=content['data']['modhash']
    #log( 'modhash='+repr(modhash) )
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count = len(content['data']['children'])
    filtered_out_posts = 0

    GCXM_hasmultiplesubreddit = has_multiple('subreddit',
                                             content['data']['children'])
    GCXM_hasmultipledomain = has_multiple('domain',
                                          content['data']['children'])
    GCXM_hasmultipleauthor = has_multiple('author',
                                          content['data']['children'])
    GCXM_subreddit_key = subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out(entry.get('data')):
                filtered_out_posts += 1
                continue

            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker,
                                 args=(idx, entry, q_liz),
                                 name='#t%.2d' % idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    #check the queue to determine progress
    break_counter = 0  #to avoid infinite loop
    expected_listitems = (posts_count - filtered_out_posts)
    if expected_listitems > 0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size = 0
        while q_liz.qsize() < expected_listitems:
            if break_counter >= 100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added = q_liz.qsize() - last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter += 1

            last_queue_size = q_liz.qsize()
            xbmc.sleep(50)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    #liz is a tuple for addDirectoryItems
    li = [liz for idx, liz in sorted(q_liz.queue)
          ]  #list of (url, listitem[, isFolder]) as a tuple
    #log(repr(li))

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after = content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl = set_query_field(
            currentUrl, field='after', value=after,
            replace=True)  #(url, field, value, replace=False):
        #log('$$$currenturl: ' +currentUrl)
        #log('$$$   nextUrl: ' +nextUrl)

        count = current_url_query.get('count')
        #log('$$$count   : ' +repr(count))
        if current_url_query.get('count') == None:
            #firsttime it is none
            count = itemsPerPage
        else:
            #nexttimes it will be kept incremented with itemsPerPage
            try:
                count = int(
                    current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError:
                count = itemsPerPage

        nextUrl = set_query_field(nextUrl, 'count', count, True)
        #log('$$$   nextUrl: ' +nextUrl)

        # plot shows up on estuary. etc. ( avoids the "No information available" message on description )
        info_label = {"plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,
               info_label)  #Next Page
    except Exception as e:
        log('    Exception: ' + str(e))

    #the +'s got removed by url conversion
    subreddit_key = subreddit_key.replace(' ', '+')
    viewID = WINDOW.getProperty("viewid-" + subreddit_key)
    #log("  custom viewid %s for %s " %(viewID,subreddit_key) )

    if viewID:
        log("  custom viewid %s for %s " % (viewID, subreddit_key))
        xbmc.executebuiltin('Container.SetViewMode(%s)' % viewID)
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode(' + viewMode + ')')

    xbmcplugin.endOfDirectory(
        handle=pluginhandle,
        succeeded=True,
        updateListing=
        False,  #setting this to True causes the ".." entry to quit the plugin
        cacheToDisc=True)
def process(provider, generator, filtering, has_special, verify_name=True, verify_size=True):
    """ Method for processing provider results using its generator and Filtering class instance

    Args:
        provider        (str): Provider ID
        generator  (function): Generator method, can be either ``extract_torrents`` or ``extract_from_api``
        filtering (Filtering): Filtering class instance
        has_special    (bool): Whether title contains special chars
        verify_name    (bool): Whether to double-check the results' names match the query or not
        verify_size    (bool): Whether to check the results' file sizes
    """
    log.debug("execute_process for %s with %s" % (provider, repr(generator)))
    definition = definitions[provider]
    definition = get_alias(definition, get_setting("%s_alias" % provider))

    client = Client(info=filtering.info)
    logged_in = False

    if get_setting('kodi_language', bool):
        kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
        if kodi_language:
            filtering.kodi_language = kodi_language
        language_exceptions = get_setting('language_exceptions')
        if language_exceptions.strip().lower():
            filtering.language_exceptions = re.split(r',\s?', language_exceptions)

    log.debug("[%s] Queries: %s" % (provider, filtering.queries))
    log.debug("[%s] Extras:  %s" % (provider, filtering.extras))

    for query, extra in zip(filtering.queries, filtering.extras):
        log.debug("[%s] Before keywords - Query: %s - Extra: %s" % (provider, repr(query), repr(extra)))
        if has_special:
            # Removing quotes, surrounding {title*} keywords, when title contains special chars
            query = re.sub("[\"']({title.*?})[\"']", '\\1', query)

        query = filtering.process_keywords(provider, query)
        extra = filtering.process_keywords(provider, extra)
        if 'charset' in definition and 'utf' not in definition['charset'].lower():
            try:
                query = urllib.quote(query.encode(definition['charset']))
                extra = urllib.quote(extra.encode(definition['charset']))
            except:
                pass

        log.debug("[%s] After keywords  - Query: %s - Extra: %s" % (provider, repr(query), repr(extra)))
        if not query:
            return filtering.results

        url_search = filtering.url.replace('QUERY', query)
        if extra:
            url_search = url_search.replace('EXTRA', extra)
        else:
            url_search = url_search.replace('EXTRA', '')
        url_search = url_search.replace(' ', definition['separator'])

        if 'post_data' in definition and not filtering.post_data:
            filtering.post_data = eval(definition['post_data'])

        # Creating the payload for POST method
        payload = dict()
        for key, value in filtering.post_data.iteritems():
            if 'QUERY' in value:
                payload[key] = filtering.post_data[key].replace('QUERY', query)
            else:
                payload[key] = filtering.post_data[key]

        # Creating the payload for GET method
        data = None
        if filtering.get_data:
            data = dict()
            for key, value in filtering.get_data.iteritems():
                if 'QUERY' in value:
                    data[key] = filtering.get_data[key].replace('QUERY', query)
                else:
                    data[key] = filtering.get_data[key]

        log.debug("-   %s query: %s" % (provider, repr(query)))
        log.debug("--  %s url_search before token: %s" % (provider, repr(url_search)))
        log.debug("--- %s using POST payload: %s" % (provider, repr(payload)))
        log.debug("----%s filtering with post_data: %s" % (provider, repr(filtering.post_data)))

        # Set search's "title" in filtering to double-check results' names
        if 'filter_title' in definition and definition['filter_title']:
            filtering.filter_title = True
            filtering.title = query

        if logged_in:
            log.info("[%s] Reusing previous login" % provider)
        elif 'private' in definition and definition['private']:
            username = get_setting('%s_username' % provider)
            password = get_setting('%s_password' % provider)

            if 'login_object' in definition and definition['login_object']:
                logged_in = False
                try:
                    login_object = definition['login_object'].replace('USERNAME', '"%s"' % username).replace('PASSWORD', '"%s"' % password)
                except Exception, e:
                    log.error("[{0}] Make login_object fail: {1}".format(provider, e))
                    return filtering.results

                # TODO generic flags in definitions for those...
                if provider == 'lostfilm':
                    client.open(definition['root_url'] + '/v_search.php?c=110&s=1&e=1')
                    if u'Вход. – LostFilm.TV.' in client.content:
                        pass
                    else:
                        log.info('[%s] Login successful' % provider)
                        logged_in = True

                if not logged_in and client.login(definition['root_url'] + definition['login_path'], eval(login_object), definition['login_failed']):
                    log.info('[%s] Login successful' % provider)
                    logged_in = True
                elif not logged_in:
                    log.error("[%s] Login failed: %s", provider, client.status)
                    log.debug("[%s] Failed login content: %s", provider, repr(client.content))
                    notify(translation(32089).format(provider), image=get_icon_path())
                    return filtering.results

                if logged_in:
                    if provider == 'lostfilm':
                        log.info('[%s] Search lostfilm serial ID...', provider)
                        url_search = fix_lf(url_search)
                        client.open(url_search.encode('utf-8'), post_data=payload, get_data=data)
                        series_details = re.search(r'"mark-rate-pane" rel="(\d+),(\d+),(\d+)">', client.content)
                        if series_details:
                            client.open(definition['root_url'] + '/v_search.php?c=%s&s=%s&e=%s' % (series_details.group(1), series_details.group(2), series_details.group(3)))
                            redirect_url = re.search(ur'url=(.*?)">', client.content)
                            if redirect_url is not None:
                                url_search = redirect_url.group(1)
                        else:
                            log.info('[%s] Not found ID in %s' % (provider, url_search))
                            return filtering.results

        log.info(">  %s search URL: %s" % (definition['name'].rjust(longest), url_search))

        client.open(url_search.encode('utf-8'), post_data=payload, get_data=data)
        filtering.results.extend(
            generate_payload(provider,
                             generator(provider, client),
                             filtering,
                             verify_name,
                             verify_size))
def reddit_post_worker(idx, entry, q_out):
    import datetime
    from utils import strip_emoji, pretty_datediff, clean_str
    from reddit import determine_if_video_media_from_reddit_json, ret_sub_icon

    show_listVideos_debug = True
    credate = ""
    is_a_video = False
    title_line2 = ""
    t_on = translation(30071)  #"on"
    #t_pts = u"\U0001F4AC"  # translation(30072) #"cmnts"  comment bubble symbol. doesn't work
    #t_pts = u"\U00002709"  # translation(30072)   envelope symbol
    t_pts = 'c'
    thumb_w = 0
    thumb_h = 0

    try:
        #on 3/21/2017 we're adding a new feature that lets users view their saved posts by entering /user/username/saved as their subreddit.
        #  in addition to saved posts, users can also save comments. we need to handle it by checking for "kind"
        kind = entry.get('kind')  #t1 for comments  t3 for posts
        data = entry.get('data')
        post_id = data.get('name')
        if data:
            if kind == 't3':
                title = clean_str(data, ['title'])
                description = clean_str(data,
                                        ['media', 'oembed', 'description'])
                post_selftext = clean_str(data, ['selftext'])

                description = post_selftext + '[CR]' + description if post_selftext else description
            else:
                title = clean_str(data, ['link_title'])
                description = clean_str(data, ['body'])

            title = strip_emoji(
                title
            )  #an emoji in the title was causing a KeyError  u'\ud83c'

            commentsUrl = urlMain + clean_str(data, ['permalink'])
            #if show_listVideos_debug :log("commentsUrl"+str(idx)+"="+commentsUrl)

            try:
                aaa = data.get('created_utc')
                credate = datetime.datetime.utcfromtimestamp(aaa)
                now_utc = datetime.datetime.utcnow()
                pretty_date = pretty_datediff(now_utc, credate)
                credate = str(credate)
            except (AttributeError, TypeError, ValueError):
                credate = ""

            subreddit = clean_str(data, ['subreddit'])
            author = clean_str(data, ['author'])
            domain = clean_str(data, ['domain'])
            #log("     DOMAIN%.2d=%s" %(idx,domain))

            #ups = data.get('score',0)       #downs not used anymore
            num_comments = data.get('num_comments', 0)
            #description = "[COLOR blue]r/"+ subreddit + "[/COLOR]  [I]" + str(ups)+" pts  |  "+str(comments)+" cmnts  |  by "+author+"[/I]\n"+description
            #description = "[COLOR blue]r/"+ subreddit + "[/COLOR]  [I]" + str(ups)+" pts.  |  by "+author+"[/I]\n"+description
            #description = title_line2+"\n"+description
            #if show_listVideos_debug :log("DESCRIPTION"+str(idx)+"=["+description+"]")
            d_url = clean_str(data, ['url'])
            link_url = clean_str(data, ['link_url'])
            media_oembed_url = clean_str(data, ['media', 'oembed', 'url'])

            media_url = next(
                (item for item in [d_url, link_url, media_oembed_url] if item),
                '')
            #log("          url"+str(idx)+"="+media_url)

            thumb = clean_str(data, ['thumbnail'])
            #if show_listSubReddit_debug : log("       THUMB%.2d=%s" %( idx, thumb ))

            if not thumb.startswith(
                    'http'
            ):  #in ['nsfw','default','self']:  #reddit has a "default" thumbnail (alien holding camera with "?")
                thumb = ""

            if thumb == "":
                thumb = clean_str(data, ['media', 'oembed', 'thumbnail_url'
                                         ]).replace('&amp;', '&')

            if thumb == "":  #use this subreddit's icon if thumb still empty
                try:
                    thumb = ret_sub_icon(subreddit)
                except:
                    pass

            try:
                #collect_thumbs(entry)
                preview = data.get('preview')['images'][0]['source'][
                    'url'].encode('utf-8').replace('&amp;', '&')
                #poster = entry['data']['media']['oembed']['thumbnail_url'].encode('utf-8')
                #t=thumb.split('?')[0]
                #can't preview gif thumbnail on thumbnail view, use alternate provided by reddit
                #if t.endswith('.gif'):
                #log('  thumb ends with .gif')
                #    thumb = entry['data']['thumbnail'].encode('utf-8')
                try:
                    thumb_h = float(
                        data.get('preview')['images'][0]['source']['height'])
                    thumb_w = float(
                        data.get('preview')['images'][0]['source']['width'])
                except (AttributeError, TypeError, ValueError):
                    thumb_w = 0
                    thumb_h = 0

            except Exception as e:
                #log("   getting preview image EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )
                thumb_w = 0
                thumb_h = 0
                preview = ""  #a blank preview image will be replaced with poster_url from make_addon_url_from() for domains that support it

            is_a_video = determine_if_video_media_from_reddit_json(data)

            over_18 = data.get('over_18')

            #setting: toggle showing 2-line title
            #log("   TitleAddtlInfo "+str(idx)+"="+str(TitleAddtlInfo))
            title_line2 = ""
            #if TitleAddtlInfo:
            #title_line2 = "[I][COLOR dimgrey]%s by %s [COLOR darkslategrey]r/%s[/COLOR] %d pts.[/COLOR][/I]" %(pretty_date,author,subreddit,ups)
            #title_line2 = "[I][COLOR dimgrey]"+pretty_date+" by "+author+" [COLOR darkslategrey]r/"+subreddit+"[/COLOR] "+str(ups)+" pts.[/COLOR][/I]"

            title_line2 = "[I][COLOR dimgrey]%s %s [COLOR cadetblue]r/%s[/COLOR] (%d) %s[/COLOR][/I]" % (
                pretty_date, t_on, subreddit, num_comments, t_pts)
            #title_line2 = "[I]"+str(idx)+". [COLOR dimgrey]"+ media_url[0:50]  +"[/COLOR][/I] "  # +"    "+" [COLOR darkslategrey]r/"+subreddit+"[/COLOR] "+str(ups)+" pts.[/COLOR][/I]"

            #if show_listVideos_debug : log( ("v" if is_a_video else " ") +"     TITLE"+str(idx)+"="+title)
            if show_listVideos_debug:
                log("  POST%cTITLE%.2d=%s" %
                    (("v" if is_a_video else " "), idx, title))
            #if show_listVideos_debug :log("      OVER_18"+str(idx)+"="+str(over_18))
            #if show_listVideos_debug :log("   IS_A_VIDEO"+str(idx)+"="+str(is_a_video))
            #if show_listVideos_debug :log("        THUMB"+str(idx)+"="+thumb)
            #if show_listVideos_debug :log("    MediaURL%.2d=%s" % (idx,media_url) )

            #if show_listVideos_debug :log("       HOSTER"+str(idx)+"="+hoster)
            #log("    VIDEOID"+str(idx)+"="+videoID)
            #log( "["+description+"]1["+ str(date)+"]2["+ str( count)+"]3["+ str( commentsUrl)+"]4["+ str( subreddit)+"]5["+ video_url +"]6["+ str( over_18))+"]"

            tuple_for_addDirectoryItems = addLink(
                title=title,
                title_line2=title_line2,
                iconimage=thumb,
                previewimage=preview,
                preview_w=thumb_w,
                preview_h=thumb_h,
                domain=domain,
                description=description,
                credate=credate,
                reddit_says_is_video=is_a_video,
                commentsUrl=commentsUrl,
                subreddit=subreddit,
                media_url=media_url,
                over_18=over_18,
                posted_by=author,
                num_comments=num_comments,
                post_index=idx,
                post_id=post_id)

            q_out.put([idx, tuple_for_addDirectoryItems])
    except Exception as e:
        log('  #reddit_post_worker EXCEPTION:' + repr(sys.exc_info()) + '--' +
            str(e))
Example #23
0
def autoSlideshow(url, name, type_):

    log('starting slideshow ' + url)
    ev = threading.Event()

    entries = []
    #watchdog_counter=0
    preview_w = 0
    preview_h = 0
    image = ''

    #content = opener.open(url).read()
    content = reddit_request(url)
    if not content: return
    #log( str(content) )
    #content = json.loads(content.replace('\\"', '\''))
    content = json.loads(content)

    log("slideshow %s:Parsing %d items: %s" %
        (type_, len(content['data']['children']),
         'random' if random_post_order else 'normal order'))

    data_children = content['data']['children']

    if random_post_order:
        random.shuffle(data_children)

    for j_entry in data_children:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            log("  TITLE:%s [r/%s]" %
                (title, j_entry.get('data').get('subreddit')))

            try:
                description = unescape(j_entry['data']['media']['oembed']
                                       ['description'].encode('utf-8'))
            except:
                description = ''
            #log('    description  [%s]' %description)
            try:
                post_selftext = unescape(
                    j_entry['data']['selftext'].encode('utf-8'))
            except:
                post_selftext = ''
            #log('    post_selftext[%s]' %post_selftext)

            description = post_selftext + '[CR]' + description if post_selftext else description

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            try:
                preview = j_entry['data']['preview']['images'][0]['source'][
                    'url'].encode('utf-8').replace('&amp;', '&')
                try:
                    preview_h = float(j_entry['data']['preview']['images'][0]
                                      ['source']['height'])
                    preview_w = float(j_entry['data']['preview']['images'][0]
                                      ['source']['width'])
                except:
                    preview_w = 0
                    preview_h = 0

            except Exception as e:
                #log("   getting preview image EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )
                preview = ""

            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=False,
                                   needs_preview=True,
                                   get_playable_url=True)
            if ld:
                if not preview:
                    preview = ld.poster

                if (addon.getSetting('include_albums')
                        == 'true') and (ld.media_type == sitesBase.TYPE_ALBUM):
                    dictlist = listAlbum(media_url, title, 'return_dictlist')
                    for d in dictlist:
                        #log('    (S) adding items from album ' + title  +' ' + d.get('DirectoryItem_url') )
                        t2 = d.get('li_label') if d.get('li_label') else title
                        #entries.append([ t2, d.get('DirectoryItem_url'), d.get('width'), d.get('height'), len(entries)])

                        d['li_label'] = t2
                        entries.append(d)
                        #title=''  #only put the title in once.
                else:
                    if addon.getSetting('use_reddit_preview') == 'true':
                        if preview: image = preview
                        elif ld.poster: image = ld.poster
                        #if preview: entries.append([title,preview,preview_w, preview_h,len(entries)]) #log('      (N)added preview:%s %s' %( title,preview) )
                        #elif ld.poster: entries.append([title,ld.poster,preview_w, preview_h,len(entries)])    #log('      (N)added poster:%s %s' % ( title,ld.poster) )
                    else:
                        if ld.poster:
                            image = ld.poster  #entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        elif preview:
                            image = preview  #entries.append([title,preview,preview_w, preview_h,len(entries)])
                        #if ld.poster: entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        #elif preview: entries.append([title,preview,preview_w, preview_h,len(entries)])

                    append_entry(entries, title, image, preview_w, preview_h,
                                 description)
            else:
                append_entry(entries, title, preview, preview_w, preview_h,
                             description)
                #log('      (N)added preview:%s' % title )

        except Exception as e:
            log('  autoPlay exception:' + str(e))

    #log( repr(entries))

    entries = remove_dict_duplicates(entries, 'DirectoryItem_url')

    #     #for i,e in enumerate(entries): log('  e1-%d %s' %(i, e[1]) )
    #     def k2(x): return x[1]
    #     entries=remove_duplicates(entries, k2)
    #     #for i,e in enumerate(entries): log('  e2-%d %s' %(i, e[1]) )

    for i, e in enumerate(entries):
        log('  possible playable items({0}) {1}...{2}x{3}  {4}'.format(
            i, e['li_label'].ljust(15)[:15], repr(e.get('width')),
            repr(e.get('height')), e.get('DirectoryItem_url')))

    if len(entries) == 0:
        log('  Play All: no playable items')
        xbmc.executebuiltin(
            'XBMC.Notification("%s","%s")' %
            (translation(32054),
             translation(32055)))  #Play All     No playable items
        return

    #if type.endswith("_RANDOM"):
    #    random.shuffle(entries)

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + url )

    log("**********playing slideshow*************")

    for e in entries:
        q.put(e)

    #s= HorizontalSlideScreensaver(ev,q)
    s = ScreensaverManager(ev, q)

    try:
        s.start_loop()
    except Exception as e:
        log("  EXCEPTION slideshowAlbum:=" + str(sys.exc_info()[0]) + "  " +
            str(e))

    return
def index(url, name, type_):
    from utils import xstr, samealphabetic, hassamealphabetic
    from reddit import load_subredditsFile, parse_subreddit_entry, create_default_subreddits, assemble_reddit_filter_string, ret_sub_info, ret_settings_type_default_icon

    ## this is where the main screen is created

    if not os.path.exists(
            subredditsFile):  #if not os.path.exists(subredditsFile):
        create_default_subreddits()

    #if os.path.exists(subredditsPickle):
    #    subreddits_dlist=load_dict(subredditsPickle)
    #log( pprint.pformat(subreddits_dlist, indent=1) )
    #for e in subreddits_dlist: log(e.get('entry_name'))

    #testing code
    #h="as asd [S]asdasd[/S] asdas "
    #log(markdown_to_bbcode(h))
    #addDir('test', "url", "next_mode", "", "subreddit" )

    #liz = xbmcgui.ListItem(label="test", label2="label2", iconImage="DefaultFolder.png")
    #u=sys.argv[0]+"?url=&mode=callwebviewer&type="
    #xbmcplugin.addDirectoryItem(handle=pluginhandle, url=u, listitem=liz, isFolder=False)

    #liz = xbmcgui.ListItem().fromString('Hello World')
    #xbmcplugin.addDirectoryItem(handle=pluginhandle, listitem=liz, isFolder=False)
    subredditsFile_entries = load_subredditsFile()

    subredditsFile_entries.sort(key=lambda y: y.lower())

    addtl_subr_info = {}

    #this controls what infolabels will be used by the skin. very skin specific.
    #  for estuary, this lets infolabel:plot (and genre) show up below the folder
    #  giving us the opportunity to provide a shortcut_description about the shortcuts
    xbmcplugin.setContent(
        pluginhandle, "mixed"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    next_mode = 'listSubReddit'

    for subreddit_entry in subredditsFile_entries:
        #strip out the alias identifier from the subreddit string retrieved from the file so we can process it.
        #subreddit, alias = subreddit_alias(subreddit_entry)
        addtl_subr_info = ret_sub_info(subreddit_entry)

        entry_type, subreddit, alias, shortcut_description = parse_subreddit_entry(
            subreddit_entry)
        #log( subreddit + "   " + shortcut_description )

        #url= urlMain+"/r/"+subreddit+"/.json?"+nsfw+allHosterQuery+"&limit="+itemsPerPage
        icon = default_icon = ''  #addon_path+"/resources/skins/Default/media/"+ret_settings_type_default_icon(entry_type)

        #log('  %s             icon=%s' %(subreddit, icon))
        url = assemble_reddit_filter_string("", subreddit, "yes")
        #log("assembled================="+url)
        if subreddit.lower() in ["all", "popular"]:
            addDir(subreddit, url, next_mode, icon, subreddit, {
                "plot": translation(30009)
            })  #Displays the currently most popular content from all of reddit
        else:
            if addtl_subr_info:  #if we have additional info about this subreddit
                #log(repr(addtl_subr_info))
                title = xstr(addtl_subr_info.get('title')) + '\n'
                display_name = xstr(addtl_subr_info.get('display_name'))
                if samealphabetic(title, display_name): title = ''
                #if re.sub('\W+','', display_name.lower() )==re.sub('\W+','', title.lower()): title=''
                #display_name=re.sub('\W+','', display_name.lower() )
                #title=re.sub('\W+','', title.lower())

                header_title = xstr(addtl_subr_info.get('header_title'))
                public_description = xstr(
                    addtl_subr_info.get('public_description'))

                if samealphabetic(header_title, public_description):
                    public_description = ''
                if samealphabetic(title, public_description):
                    public_description = ''
                #if hassamealphabetic(header_title,title,public_description): public_description=''

                if entry_type == 'subreddit':
                    display_name = 'r/' + display_name
                shortcut_description = '[COLOR cadetblue][B]%s[/B][/COLOR]\n%s[I]%s[/I]\n%s' % (
                    display_name, title, header_title, public_description)

                icon = addtl_subr_info.get('icon_img')
                banner = addtl_subr_info.get('banner_img')
                header = addtl_subr_info.get(
                    'header_img'
                )  #usually the small icon on upper left side on subreddit screen

                #log( subreddit + ' icon=' + repr(icon) +' header=' + repr(header))
                #picks the first item that is not None
                icon = next((item for item in [icon, banner, header] if item),
                            '') or default_icon

                addDirR(alias,
                        url,
                        next_mode,
                        icon,
                        type_=subreddit,
                        listitem_infolabel={"plot": shortcut_description},
                        file_entry=subreddit_entry,
                        banner_image=banner)
            else:
                addDirR(alias, url, next_mode, icon, subreddit,
                        {"plot": shortcut_description}, subreddit_entry)

    addDir("[B]- " + translation(30001) + "[/B]", "", 'addSubreddit', "", "",
           {"plot": translation(30006)
            })  #"Customize this list with your favorite subreddit."
    addDir("[B]- " + translation(30005) + "[/B]", "", 'searchReddits', "", "",
           {"plot": translation(30010)
            })  #"Search reddit for a particular post or topic

    xbmcplugin.endOfDirectory(pluginhandle)
Example #25
0
def playYTDLVideoOLD(url, name, type_):
    #url = "http://www.youtube.com/watch?v=_yVv9dx88x0"   #a youtube ID will work as well and of course you could pass the url of another site

    #url='https://www.youtube.com/shared?ci=W8n3GMW5RCY'
    #url='http://burningcamel.com/video/waster-blonde-amateur-gets-f****d'
    #url='http://www.3sat.de/mediathek/?mode=play&obj=51264'
    #url='http://www.4tube.com/videos/209271/hurry-f**k-i-bored'
    #url='http://www.pbs.org/newshour/rundown/cubas-elian-gonzalez-now-college-graduate/'

    #these checks done in around May 2016
    #does not work:  yourlust  porntube xpornvid.com porndig.com  thumbzilla.com eporner.com yuvutu.com p**n.com pornerbros.com f*x.com flyflv.com xstigma.com sexu.com 5min.com alphaporno.com
    # stickyxtube.com xxxbunker.com bdsmstreak.com  jizzxman.com pornwebms.com pornurl.pw porness.tv openload.online pornworms.com fapgod.com porness.tv hvdporn.com pornmax.xyz xfig.net yobt.com
    # eroshare.com kalporn.com hdvideos.p**n dailygirlscute.com desianalporn.com indianxxxhd.com onlypron.com sherloxxx.com hdvideos.p**n x1xporn.com pornhvd.com lxxlx.com xrhub.com shooshtime.com
    # pornvil.com lxxlx.com redclip.xyz younow.com aniboom.com  gotporn.com  virtualtaboo.com 18porn.xyz vidshort.net fapxl.com vidmega.net freudbox.com bigtits.com xfapzap.com o****m.com
    # userporn.com hdpornstar.com moviesand.com chumleaf.com fucktube.com fookgle.com pornative.com dailee.com pornsharia.com f*x.com sluttyred.com pk5.net kuntfutube.com youpunish.com
    # vidxnet.com jizzbox.com bondagetube.tv spankingtube.tv pornheed.com pornwaiter.com lubetube.com porncor.com maxjizztube.com asianxtv.com analxtv.com yteenporn.com nurglestube.com yporn.tv
    # asiantubesex.com zuzandra.com moviesguy.com bustnow.com dirtydirtyangels.com yazum.com watchersweb.com voyeurweb.com zoig.com flingtube.com yourfreeporn.us foxgay.com goshgay.com
    # player.moviefap.com(www.moviefap.com works) nosvideo.com

    # also does not work (non p**n)
    # rutube.ru  mail.ru  afreeca.com nicovideo.jp  videos.sapo.pt(many but not all) sciencestage.com vidoosh.tv metacafe.com vzaar.com videojug.com trilulilu.ro tudou.com video.yahoo.com blinkx.com blip.tv
    # blogtv.com  brainpop.com crackle.com engagemedia.org expotv.com flickr.com fotki.com hulu.com lafango.com  mefeedia.com motionpictur.com izlesene.com sevenload.com patas.in myvideo.de
    # vbox7.com 1tv.ru 1up.com 220.ro 24video.xxx 3sat.de 56.com adultswim.com atresplayer.com techchannel.att.com v.baidu.com azubu.tv www.bbc.co.uk/iplayer bet.com biobiochile.cl biqle.com
    # bloomberg.com/news/videos bpb.de bravotv.com byutv.org cbc.ca chirbit.com cloudtime.to(almost) cloudyvideos.com cracked.com crackle.com criterion.com ctv.ca culturebox.francetvinfo.fr
    # cultureunplugged.com cwtv.com daum.net dctp.tv democracynow.org douyutv.com dumpert.nl eitb.tv ex.fm fc-zenit.ru  ikudonsubs.com akb48ma.com Flipagram.com ft.dk Formula1.com
    # fox.com/watch(few works) video.foxnews.com foxsports.com france2.fr franceculture.fr franceinter.fr francetv.fr/videos francetvinfo.fr giantbomb.com hbo.com History.com hitbox.tv
    # howcast.com HowStuffWorks.com hrt.hr iconosquare.com infoq.com  ivi.ru kamcord.com/v video.kankan.com karrierevideos.at KrasView.ru hlamer.ru kuwo.cn la7.it laola1.tv le.com
    # media.ccc.de metacritic.com mitele.es  moevideo.net,playreplay.net,videochart.net vidspot.net(might work, can't find recent post) movieclips.com mtv.de mtviggy.com muenchen.tv myspace.com
    # myvi.ru myvideo.de myvideo.ge 163.com netzkino.de nfb.ca nicovideo.jp  videohive.net normalboots.com nowness.com ntr.nl nrk.no ntv.ru/video ocw.mit.edu odnoklassniki.ru/video
    # onet.tv onionstudios.com/videos openload.co orf.at parliamentlive.tv pbs.org

    # news site (can't find sample to test)
    # bleacherreport.com crooksandliars.com DailyMail.com channel5.com Funimation.com gamersyde.com gamespot.com gazeta.pl helsinki.fi hotnewhiphop.com lemonde.fr mnet.com motorsport.com MSN.com
    # n-tv.de ndr.de NDTV.com NextMedia.com noz.de

    # these sites have mixed media. can handle the video in these sites:
    # 20min.ch 5min.com archive.org Allocine.fr(added) br.de bt.no  buzzfeed.com condenast.com firstpost.com gameinformer.com gputechconf.com heise.de HotStar.com(some play) lrt.lt natgeo.com
    # nbcsports.com  patreon.com
    # 9c9media.com(no posts)

    #ytdl plays this fine but no video?
    #coub.com

    #supported but is an audio only site
    #acast.com AudioBoom.com audiomack.com bandcamp.com clyp.it democracynow.org? freesound.org hark.com hearthis.at hypem.com libsyn.com mixcloud.com
    #Minhateca.com.br(direct mp3)

    #
    # ytdl also supports these sites:
    # myvideo.co.za  ?
    #bluegartr.com  (gif)
    # behindkink.com   (not sure)
    # facebook.com  (need to work capturing only videos)
    # features.aol.com  (inconsistent)
    # livestream.com (need to work capturing only videos)
    # mail.ru inconsistent(need to work capturing only videos)
    # miomio.tv(some play but most won't)
    # ooyala.com(some play but most won't)
    #

    #     extractors=[]
    #     from youtube_dl.extractor import gen_extractors
    #     for ie in gen_extractors():
    #         #extractors.append(ie.IE_NAME)
    #         try:
    #             log("[%s] %s " %(ie.IE_NAME, ie._VALID_URL) )
    #         except Exception as e:
    #             log( "zz   " + str(e) )

    #     extractors.sort()
    #     for n in extractors: log("'%s'," %n)
    from urlparse import urlparse
    parsed_uri = urlparse(url)
    domain = '{uri.netloc}'.format(uri=parsed_uri)

    dialog_progress_YTDL = xbmcgui.DialogProgressBG()
    dialog_progress_YTDL.create('YTDL')
    dialog_progress_YTDL.update(10, 'YTDL', 'Checking link...')

    try:
        from domains import ydtl_get_playable_url
        stream_url = ydtl_get_playable_url(url)
        if stream_url:
            dialog_progress_YTDL.update(80, 'YTDL', 'Playing')
            listitem = xbmcgui.ListItem(
                path=stream_url[0])  #plugins play video like this.
            xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
        else:
            dialog_progress_YTDL.update(40, 'YTDL', 'Trying URLResolver')
            log('YTDL Unable to get playable URL, Trying UrlResolver...')

            #ytdl seems better than urlresolver for getting the playable url...
            media_url = urlresolver.resolve(url)
            if media_url:
                dialog_progress_YTDL.update(88, 'YTDL', 'Playing')
                #log( '------------------------------------------------urlresolver stream url ' + repr(media_url ))
                listitem = xbmcgui.ListItem(path=media_url)
                xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
            else:
                log('UrlResolver cannot get a playable url')
                xbmc_notify(translation(30192), domain)

    except Exception as e:
        xbmc_notify("%s(YTDL)" % domain, str(e))
    finally:
        dialog_progress_YTDL.update(
            100, 'YTDL'
        )  #not sure if necessary to set to 100 before closing dialogprogressbg
        dialog_progress_YTDL.close()
def build_context_menu_entries(num_comments, commentsUrl, subreddit, domain,
                               link_url, post_id):
    from reddit import assemble_reddit_filter_string, subreddit_in_favorites, this_is_a_user_saved_list
    from utils import colored_subreddit

    s = (
        subreddit[:12] + '..'
    ) if len(subreddit
             ) > 12 else subreddit  #crop long subreddit names in context menu
    colored_subreddit_short = colored_subreddit(s)
    colored_subreddit_full = colored_subreddit(subreddit)
    colored_domain_full = colored_subreddit(domain, 'tan', False)
    entries = []

    #sys.argv[0] is plugin://plugin.video.reddit_viewer/
    #prl=zaza is just a dummy: during testing the first argument is ignored... possible bug?

    if cxm_show_open_browser:
        entries.append((
            translation(30509),  #Open in browser
            "XBMC.RunPlugin(%s?mode=openBrowser&url=%s)" %
            (sys.argv[0], urllib.quote_plus(link_url))))

    if cxm_show_comment_link or cxm_show_comments:
        if num_comments > 0:
            #if we are using a custom gui to show comments, we need to use RunPlugin. there is a weird loading/pause if we use XBMC.Container.Update. i think xbmc expects us to use addDirectoryItem
            #  if we have xbmc manage the gui(addDirectoryItem), we need to use XBMC.Container.Update. otherwise we'll get the dreaded "Attempt to use invalid handle -1" error
            #entries.append( ( translation(30050) + " (c)",  #Show comments
            #              "XBMC.RunPlugin(%s?path=%s?prl=zaza&mode=listLinksInComment&url=%s)" % ( sys.argv[0], sys.argv[0], urllib.quote_plus(commentsUrl) ) ) )
            #entries.append( ( translation(30052) , #Show comment links
            #              "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listLinksInComment&url=%s&type=linksOnly)" % ( sys.argv[0], sys.argv[0], urllib.quote_plus(commentsUrl) ) ) )
            if cxm_show_comment_link:
                entries.append((
                    translation(30052),  #Show comment links
                    "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listLinksInComment&url=%s&type=linksOnly)"
                    % (sys.argv[0], sys.argv[0],
                       urllib.quote_plus(commentsUrl))))
            if cxm_show_comments:
                entries.append((
                    translation(30050),  #Show comments
                    "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listLinksInComment&url=%s)"
                    % (sys.argv[0], sys.argv[0],
                       urllib.quote_plus(commentsUrl))))
            #entries.append( ( translation(30050) + " (ActivateWindow)",  #Show comments
            #              "XBMC.ActivateWindow(Video, %s?mode=listLinksInComment&url=%s)" % (  sys.argv[0], urllib.quote_plus(site) ) ) )      #***  ActivateWindow is for the standard xbmc window
        else:
            entries.append((
                translation(30053),  #No comments
                "xbmc.executebuiltin('Action(Close)')"))

    if GCXM_hasmultiplesubreddit and cxm_show_go_to:
        entries.append((
            translation(30051) + " %s" % colored_subreddit_full,
            "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listSubReddit&url=%s)"
            % (sys.argv[0], sys.argv[0],
               urllib.quote_plus(
                   assemble_reddit_filter_string("", subreddit, True)))))

    if cxm_show_new_from:
        #show check /new from this subreddit if it is all the same subreddit
        entries.append((
            translation(30055) + " %s" % colored_subreddit_short,
            "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listSubReddit&url=%s)"
            % (sys.argv[0], sys.argv[0],
               urllib.quote_plus(
                   assemble_reddit_filter_string("", subreddit + '/new',
                                                 True)))))

    if cxm_show_add_shortcuts:
        if not subreddit_in_favorites(subreddit):
            #add selected subreddit to shortcuts
            entries.append((translation(30056) % colored_subreddit_short,
                            "XBMC.RunPlugin(%s?mode=addSubreddit&url=%s)" %
                            (sys.argv[0], subreddit)))

    if cxm_show_filter_subreddit:
        entries.append((translation(30057) % colored_subreddit_short,
                        "XBMC.RunPlugin(%s?mode=addtoFilter&url=%s&type=%s)" %
                        (sys.argv[0], subreddit, 'subreddit')))
    if cxm_show_filter_domain:
        entries.append((translation(30057) % colored_domain_full,
                        "XBMC.RunPlugin(%s?mode=addtoFilter&url=%s&type=%s)" %
                        (sys.argv[0], domain, 'domain')))

    #only available if user gave reddit_viewer permission to interact with their account
    #reddit_refresh_token=addon.getSetting("reddit_refresh_token")
    from reddit import reddit_refresh_token
    if reddit_refresh_token and cxm_show_reddit_save:
        if this_is_a_user_saved_list(GCXM_subreddit_key):
            #only show the unsave option if viewing /user/xxxx/saved
            entries.append(
                (translation(30059),
                 "XBMC.RunPlugin(%s?mode=reddit_save&url=%s&name=%s)" %
                 (sys.argv[0], '/api/unsave/', post_id)))
        else:
            entries.append(
                (translation(30058),
                 "XBMC.RunPlugin(%s?mode=reddit_save&url=%s&name=%s)" %
                 (sys.argv[0], '/api/save/', post_id)))

    if cxm_show_youtube_items:
        #check if link_url is youtube
        from domains import ClassYoutube
        match = re.compile(ClassYoutube.regex, re.I).findall(
            link_url
        )  #regex='(youtube.com/)|(youtu.be/)|(youtube-nocookie.com/)|(plugin.video.youtube/play)'
        if match:
            #video_id=ClassYoutube.get_video_id(link_url )
            #log('video id:'+repr(video_id))
            entries.append((translation(
                30048
            ), "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listRelatedVideo&url=%s&type=%s)"
                            % (sys.argv[0], sys.argv[0],
                               urllib.quote_plus(link_url), 'channel')))
            entries.append((translation(
                30049
            ), "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=listRelatedVideo&url=%s&type=%s)"
                            % (sys.argv[0], sys.argv[0],
                               urllib.quote_plus(link_url), 'related')))
    #not working...
    #entries.append( ( translation(30054) ,
    #                  "XBMC.Container.Update(%s?path=%s?prl=zaza&mode=playURLResolver&url=%s)" % ( sys.argv[0], sys.argv[0],urllib.quote_plus(media_url) ) ) )
    #entries.append( ( translation(30054) ,
    #                  "XBMC.RunPlugin(%s?path=%s?prl=zaza&mode=playURLRVideo&url=%s)" % ( sys.argv[0], sys.argv[0], urllib.quote_plus(media_url) ) ) )

    #favEntry = '<favourite name="'+title+'" url="'+DirectoryItem_url+'" description="'+description+'" thumb="'+iconimage+'" date="'+credate+'" site="'+site+'" />'
    #entries.append((translation(30022), 'RunPlugin(plugin://'+addonID+'/?mode=addToFavs&url='+urllib.quote_plus(favEntry)+'&type='+urllib.quote_plus(subreddit)+')',))
    return entries
def error_message(message, name, type_):
    if name:
        sub_msg = name
    else:
        sub_msg = translation(30021)  #Parsing error
    xbmc_notify(message, sub_msg)
Example #28
0
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit,GCXM_hasmultipledomain,GCXM_hasmultipleauthor,GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" %(subreddit_key,url) )

    currentUrl = url
    xbmcplugin.setContent(pluginhandle, "movies") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(8,'Retrieving '+subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11,subreddit_key  )

    if not content:
        loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
        return

    page_title="[COLOR cadetblue]%s[/COLOR]" %subreddit_key

    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label={ "plot": translation(30013) }  #Automatically play videos
    if autoplayAll:       addDir("[B]- "+translation(30016)+"[/B]", url, 'autoPlay', "", "ALL", info_label)
    if autoplayUnwatched: addDir("[B]- "+translation(30017)+"[/B]" , url, 'autoPlay', "", "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()   #output queue (listitem)

    content = json.loads(content)

    posts_count=len(content['data']['children'])
    filtered_out_posts=0

    GCXM_hasmultiplesubreddit=has_multiple('subreddit', content['data']['children'])
    GCXM_hasmultipledomain=has_multiple('domain', content['data']['children'])
    GCXM_hasmultipleauthor=has_multiple('author', content['data']['children'])
    GCXM_subreddit_key=subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out( entry.get('data') ):
                filtered_out_posts+=1
                continue

            t = threading.Thread(target=reddit_post_worker, args=(idx, entry,q_liz), name='#t%.2d'%idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    break_counter=0 #to avoid infinite loop
    expected_listitems=(posts_count-filtered_out_posts)
    if expected_listitems>0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size=0
        while q_liz.qsize() < expected_listitems:
            if break_counter>=100:
                break

            if last_queue_size < q_liz.qsize():
                items_added=q_liz.qsize()-last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter+=1

            last_queue_size=q_liz.qsize()
            xbmc.sleep(50)

    for idx, t in enumerate(threads):

        t.join(timeout=20)

    xbmc_busy(False)

    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    li=[ liz for idx,liz in sorted(q_liz.queue) ]  #list of (url, listitem[, isFolder]) as a tuple

    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG

    try:

        after=content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl=set_query_field(currentUrl, field='after', value=after, replace=True)  #(url, field, value, replace=False):


        count=current_url_query.get('count')

        if current_url_query.get('count')==None:

            count=itemsPerPage
        else:

            try: count=int(current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError: count=itemsPerPage

        nextUrl=set_query_field(nextUrl,'count', count, True)

        info_label={ "plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,info_label)   #Next Page
    except Exception as e:
        log('    Exception: '+ str(e))

    subreddit_key=subreddit_key.replace(' ','+')
    viewID=WINDOW.getProperty( "viewid-"+subreddit_key )


    if viewID:
        log("  custom viewid %s for %s " %(viewID,subreddit_key) )
        xbmc.executebuiltin('Container.SetViewMode(%s)' %viewID )
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')

    xbmcplugin.endOfDirectory(handle=pluginhandle,
                              succeeded=True,
                              updateListing=False,   #setting this to True causes the ".." entry to quit the plugin
                              cacheToDisc=True)