def populate_subreddits_pickle():
    from guis import progressBG
    loading_indicator=progressBG(translation(30026))   #Gathering icons..

    with open(subredditsFile, 'r') as fh:
        subreddit_settings = fh.readlines()

    loading_indicator.set_tick_total(len(subreddit_settings))
    for entry in subreddit_settings:
        entry=entry.strip()
        loading_indicator.tick(1,entry)
        s=convert_settings_entry_into_subreddits_list_or_domain(entry)
        if s:

            log('processing saved entry:'+repr(entry))
            get_subreddit_entry_info_thread(s)

    xbmc.sleep(2000)
    loading_indicator.end()
def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG
    #from resources.domains import make_addon_url_from
    #called from context menu
    log('listLinksInComment:%s:%s' % (type_, url))

    #does not work for list comments coz key is the playable url (not reddit comments url)
    #msg=WINDOW.getProperty(url)
    #WINDOW.clearProperty( url )
    #log( '   msg=' + msg )

    directory_items = []
    author = ""
    ShowOnlyCommentsWithlink = False

    if type_ == 'linksOnly':
        ShowOnlyCommentsWithlink = True

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"

    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]

    #use safe='' argument in quoteplus to encode only the weird chars part
    url = urllib.quote_plus(url, safe=':/?&')
    if '?' in url:
        url = url.split('?', 1)[0] + '.json?' + url.split('?', 1)[1]
    else:
        url += '.json'

    loading_indicator = progressBG(translation(30024))
    loading_indicator.update(0, 'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10, 'Parsing')
    content = json.loads(content)

    del harvest[:]
    #harvest links in the post text (just 1)
    r_linkHunter(content[0]['data']['children'])

    try:
        submitter = content[0]['data']['children'][0]['data']['author']
    except:
        submitter = ''

    #the post title is provided in json, we'll just use that instead of messages from addLink()
    try:
        post_title = content[0]['data']['children'][0]['data']['title']
    except:
        post_title = ''
    #for i, h in enumerate(harvest):
    #    log("aaaaa first harvest "+h[2])

    #harvest links in the post itself
    r_linkHunter(content[1]['data']['children'])

    comment_score = 0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:
            #log(str(i)+"  score:"+ str(h[0]).zfill(5)+" "+ h[1] +'|'+ h[3] )
            comment_score = h[0]
            #log("score %d < %d (%s)" %(comment_score,int_CommentTreshold, CommentTreshold) )
            link_url = h[2]
            desc100 = h[3].replace(
                '\n', ' ')[0:100]  #first 100 characters of description

            kind = h[
                6]  #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d = h[5]  #depth of the comment

            tab = " " * d if d > 0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format(uri=urlparse(link_url))

            author = h[7]
            DirectoryItem_url = ''

            if comment_score < int_CommentTreshold:
                continue

            #hoster, DirectoryItem_url, videoID, mode_type, thumb_url,poster_url, isFolder,setInfo_type, setProperty_IsPlayable =make_addon_url_from(h[2])
            #if link_url:
            #    log( '  comment %s TITLE:%s... link[%s]' % ( str(d).zfill(3), desc100.ljust(20)[:20],link_url ) )

            ld = parse_reddit_link(link_url=link_url,
                                   assume_is_video=False,
                                   needs_preview=True,
                                   get_playable_url=True)

            if kind == 't1':
                list_title = r"[COLOR cadetblue]%3d[/COLOR] %s" % (h[0], tab)
            elif kind == 't3':
                list_title = r"[COLOR cadetblue]Title [/COLOR] %s" % (tab)

            #helps the the textbox control treat [url description] and (url) as separate words. so that they can be separated into 2 lines
            plot = h[3].replace('](', '] (')
            plot = markdown_to_bbcode(plot)
            plot = unescape(plot)  #convert html entities e.g.:(&#39;)

            liz = xbmcgui.ListItem(label=list_title + ': ' + desc100)

            liz.setInfo(type="Video",
                        infoLabels={
                            "Title": h[1],
                            "plot": plot,
                            "studio": domain,
                            "votes": str(comment_score),
                            "director": author
                        })
            isFolder = False

            #force all links to ytdl to see if it can be played
            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(
                    ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty(
                    'url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot = "  [COLOR greenyellow][%s] %s" % (domain,
                                                             plot) + "[/COLOR]"
                else:
                    plot = "  [COLOR greenyellow][%s]" % (plot) + "[/COLOR]"
                liz.setLabel(list_title + plot)

                if ld:
                    liz.setArt({
                        "thumb": ld.poster,
                        "poster": ld.poster,
                        "banner": ld.poster,
                        "fanart": ld.poster,
                        "landscape": ld.poster
                    })

            if DirectoryItem_url:
                #log( 'IsPlayable:'+setProperty_IsPlayable )
                directory_items.append((
                    DirectoryItem_url,
                    liz,
                    isFolder,
                ))
                #xbmcplugin.addDirectoryItem(handle=pluginhandle,url=DirectoryItem_url,listitem=liz,isFolder=isFolder)
            else:
                #this section are for comments that have no links
                if not ShowOnlyCommentsWithlink:
                    result = h[3].replace('](', '] (')
                    result = markdown_to_bbcode(result)
                    liz = xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo(type="Video",
                                infoLabels={
                                    "Title": h[1],
                                    "plot": result,
                                    "studio": domain,
                                    "votes": str(h[0]),
                                    "director": author
                                })
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append((
                        "",
                        liz,
                        False,
                    ))
                    #xbmcplugin.addDirectoryItem(handle=pluginhandle,url="",listitem=liz,isFolder=False)

                #END section are for comments that have no links or unsupported links
        except Exception as e:
            log('  EXCEPTION:' + str(e))

        #for di in directory_items:
        #    log( str(di) )

        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    #log('  comments_view id=%s' %comments_viewMode)

    #xbmcplugin.setContent(pluginhandle, "mixed")  #in estuary, mixed have limited view id's available. it has widelist which is nice for comments but we'll just stick with 'movies'
    xbmcplugin.setContent(
        pluginhandle, "episodes"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle, 'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items)
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' % comments_viewMode)
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit, GCXM_hasmultipledomain, GCXM_hasmultipleauthor, GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" % (subreddit_key, url))

    currentUrl = url

    #use the "episodes" content type rather than "movies" so that the Wall and InfoWall views in the default Estuary skin show titles on each item like the Youtube and RedditTV addons.
    #The "movies" type assumes the thumbnails are posters and don't need a title which is definitely not the case for reddit content.  --credit:Kestrel
    xbmcplugin.setContent(
        pluginhandle, "episodes"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(8, 'Retrieving ' + subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11, subreddit_key)

    if not content:
        loading_indicator.end(
        )  #it is important to close xbmcgui.DialogProgressBG
        return

    page_title = "[COLOR cadetblue]%s[/COLOR]" % subreddit_key

    #setPluginCategory lets us show text at the top of window, we take advantage of this and put the subreddit name
    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label = {"plot": translation(30013)}  #Automatically play videos
    if autoplayAll:
        addDir("[B]- " + translation(30016) + "[/B]", url, 'autoPlay', "",
               "ALL", info_label)
    if autoplayUnwatched:
        addDir("[B]- " + translation(30017) + "[/B]", url, 'autoPlay', "",
               "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()  #output queue (listitem)

    content = json.loads(content)

    #A modhash is a token that the reddit API requires to help prevent CSRF. Modhashes can be obtained via the /api/me.json call or in response data of listing endpoints.
    #The preferred way to send a modhash is to include an X-Modhash custom HTTP header with your requests.
    #Modhashes are not required when authenticated with OAuth.
    #modhash=content['data']['modhash']
    #log( 'modhash='+repr(modhash) )
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count = len(content['data']['children'])
    filtered_out_posts = 0

    GCXM_hasmultiplesubreddit = has_multiple('subreddit',
                                             content['data']['children'])
    GCXM_hasmultipledomain = has_multiple('domain',
                                          content['data']['children'])
    GCXM_hasmultipleauthor = has_multiple('author',
                                          content['data']['children'])
    GCXM_subreddit_key = subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out(entry.get('data')):
                filtered_out_posts += 1
                continue

            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker,
                                 args=(idx, entry, q_liz),
                                 name='#t%.2d' % idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    #check the queue to determine progress
    break_counter = 0  #to avoid infinite loop
    expected_listitems = (posts_count - filtered_out_posts)
    if expected_listitems > 0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size = 0
        while q_liz.qsize() < expected_listitems:
            if break_counter >= 100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added = q_liz.qsize() - last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter += 1

            last_queue_size = q_liz.qsize()
            xbmc.sleep(50)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    #liz is a tuple for addDirectoryItems
    li = [liz for idx, liz in sorted(q_liz.queue)
          ]  #list of (url, listitem[, isFolder]) as a tuple
    #log(repr(li))

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after = content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl = set_query_field(
            currentUrl, field='after', value=after,
            replace=True)  #(url, field, value, replace=False):
        #log('$$$currenturl: ' +currentUrl)
        #log('$$$   nextUrl: ' +nextUrl)

        count = current_url_query.get('count')
        #log('$$$count   : ' +repr(count))
        if current_url_query.get('count') == None:
            #firsttime it is none
            count = itemsPerPage
        else:
            #nexttimes it will be kept incremented with itemsPerPage
            try:
                count = int(
                    current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError:
                count = itemsPerPage

        nextUrl = set_query_field(nextUrl, 'count', count, True)
        #log('$$$   nextUrl: ' +nextUrl)

        # plot shows up on estuary. etc. ( avoids the "No information available" message on description )
        info_label = {"plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,
               info_label)  #Next Page
    except Exception as e:
        log('    Exception: ' + str(e))

    #the +'s got removed by url conversion
    subreddit_key = subreddit_key.replace(' ', '+')
    viewID = WINDOW.getProperty("viewid-" + subreddit_key)
    #log("  custom viewid %s for %s " %(viewID,subreddit_key) )

    if viewID:
        log("  custom viewid %s for %s " % (viewID, subreddit_key))
        xbmc.executebuiltin('Container.SetViewMode(%s)' % viewID)
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode(' + viewMode + ')')

    xbmcplugin.endOfDirectory(
        handle=pluginhandle,
        succeeded=True,
        updateListing=
        False,  #setting this to True causes the ".." entry to quit the plugin
        cacheToDisc=True)
def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG
    #from resources.domains import make_addon_url_from
    #called from context menu
    log('listLinksInComment:%s:%s' %(type_,url) )

    #does not work for list comments coz key is the playable url (not reddit comments url)
    #msg=WINDOW.getProperty(url)
    #WINDOW.clearProperty( url )
    #log( '   msg=' + msg )

    directory_items=[]
    author=""
    ShowOnlyCommentsWithlink=False

    if type_=='linksOnly':
        ShowOnlyCommentsWithlink=True

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"

    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]

    #use safe='' argument in quoteplus to encode only the weird chars part
    url=urllib.quote_plus(url,safe=':/?&')
    if '?' in url:
        url=url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]
    else:
        url+= '.json'

    loading_indicator=progressBG(translation(30024))
    loading_indicator.update(0,'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10,'Parsing')
    content = json.loads(content)

    del harvest[:]
    #harvest links in the post text (just 1)
    r_linkHunter(content[0]['data']['children'])

    try:submitter=content[0]['data']['children'][0]['data']['author']
    except: submitter=''

    #the post title is provided in json, we'll just use that instead of messages from addLink()
    try:post_title=content[0]['data']['children'][0]['data']['title']
    except:post_title=''
    #for i, h in enumerate(harvest):
    #    log("aaaaa first harvest "+h[2])

    #harvest links in the post itself
    r_linkHunter(content[1]['data']['children'])

    comment_score=0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:
            #log(str(i)+"  score:"+ str(h[0]).zfill(5)+" "+ h[1] +'|'+ h[3] )
            comment_score=h[0]
            #log("score %d < %d (%s)" %(comment_score,int_CommentTreshold, CommentTreshold) )
            link_url=h[2]
            desc100=h[3].replace('\n',' ')[0:100] #first 100 characters of description

            kind=h[6] #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d=h[5]   #depth of the comment

            tab=" "*d if d>0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format( uri=urlparse( link_url ) )

            author=h[7]
            DirectoryItem_url=''

            if comment_score < int_CommentTreshold:
                continue

            #hoster, DirectoryItem_url, videoID, mode_type, thumb_url,poster_url, isFolder,setInfo_type, setProperty_IsPlayable =make_addon_url_from(h[2])
            #if link_url:
            #    log( '  comment %s TITLE:%s... link[%s]' % ( str(d).zfill(3), desc100.ljust(20)[:20],link_url ) )

            ld=parse_reddit_link(link_url=link_url, assume_is_video=False, needs_preview=True, get_playable_url=True )

            if kind=='t1':
                list_title=r"[COLOR cadetblue]%3d[/COLOR] %s" %( h[0], tab )
            elif kind=='t3':
                list_title=r"[COLOR cadetblue]Title [/COLOR] %s" %( tab )

            #helps the the textbox control treat [url description] and (url) as separate words. so that they can be separated into 2 lines
            plot=h[3].replace('](', '] (')
            plot= markdown_to_bbcode(plot)
            plot=unescape(plot)  #convert html entities e.g.:(&#39;)

            liz=xbmcgui.ListItem(label=list_title +': '+ desc100)

            liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": plot, "studio": domain, "votes": str(comment_score), "director": author  } )
            isFolder=False

            #force all links to ytdl to see if it can be played
            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty('url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot= "  [COLOR greenyellow][%s] %s"%(domain, plot )  + "[/COLOR]"
                else:
                    plot= "  [COLOR greenyellow][%s]"%( plot ) + "[/COLOR]"
                liz.setLabel(list_title+plot)

                if ld:
                    liz.setArt({"thumb": ld.poster, "poster":ld.poster, "banner":ld.poster, "fanart":ld.poster, "landscape":ld.poster   })

            if DirectoryItem_url:
                #log( 'IsPlayable:'+setProperty_IsPlayable )
                directory_items.append( (DirectoryItem_url, liz, isFolder,) )
                #xbmcplugin.addDirectoryItem(handle=pluginhandle,url=DirectoryItem_url,listitem=liz,isFolder=isFolder)
            else:
                #this section are for comments that have no links
                if not ShowOnlyCommentsWithlink:
                    result=h[3].replace('](', '] (')
                    result=markdown_to_bbcode(result)
                    liz=xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": result, "studio": domain, "votes": str(h[0]), "director": author } )
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append( ("", liz, False,) )
                    #xbmcplugin.addDirectoryItem(handle=pluginhandle,url="",listitem=liz,isFolder=False)

                #END section are for comments that have no links or unsupported links
        except Exception as e:
            log('  EXCEPTION:' + str(e) )

        #for di in directory_items:
        #    log( str(di) )

        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    #log('  comments_view id=%s' %comments_viewMode)

    #xbmcplugin.setContent(pluginhandle, "mixed")  #in estuary, mixed have limited view id's available. it has widelist which is nice for comments but we'll just stick with 'movies'
    xbmcplugin.setContent(pluginhandle, "episodes")    #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle,'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items )
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' %comments_viewMode)
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit,GCXM_hasmultipledomain,GCXM_hasmultipleauthor,GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" %(subreddit_key,url) )

    currentUrl = url

    #use the "episodes" content type rather than "movies" so that the Wall and InfoWall views in the default Estuary skin show titles on each item like the Youtube and RedditTV addons. 
    #The "movies" type assumes the thumbnails are posters and don't need a title which is definitely not the case for reddit content.  --credit:Kestrel
    xbmcplugin.setContent(pluginhandle, "episodes") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(8,'Retrieving '+subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11,subreddit_key  )

    if not content:
        loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
        return

    page_title="[COLOR cadetblue]%s[/COLOR]" %subreddit_key

    #setPluginCategory lets us show text at the top of window, we take advantage of this and put the subreddit name
    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label={ "plot": translation(30013) }  #Automatically play videos
    if autoplayAll:       addDir("[B]- "+translation(30016)+"[/B]", url, 'autoPlay', "", "ALL", info_label)
    if autoplayUnwatched: addDir("[B]- "+translation(30017)+"[/B]" , url, 'autoPlay', "", "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()   #output queue (listitem)

    content = json.loads(content)

    #A modhash is a token that the reddit API requires to help prevent CSRF. Modhashes can be obtained via the /api/me.json call or in response data of listing endpoints.
    #The preferred way to send a modhash is to include an X-Modhash custom HTTP header with your requests.
    #Modhashes are not required when authenticated with OAuth.
    #modhash=content['data']['modhash']
    #log( 'modhash='+repr(modhash) )
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count=len(content['data']['children'])
    filtered_out_posts=0

    GCXM_hasmultiplesubreddit=has_multiple('subreddit', content['data']['children'])
    GCXM_hasmultipledomain=has_multiple('domain', content['data']['children'])
    GCXM_hasmultipleauthor=has_multiple('author', content['data']['children'])
    GCXM_subreddit_key=subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out( entry.get('data') ):
                filtered_out_posts+=1
                continue

            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker, args=(idx, entry,q_liz), name='#t%.2d'%idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    #check the queue to determine progress
    break_counter=0 #to avoid infinite loop
    expected_listitems=(posts_count-filtered_out_posts)
    if expected_listitems>0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size=0
        while q_liz.qsize() < expected_listitems:
            if break_counter>=100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added=q_liz.qsize()-last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter+=1

            last_queue_size=q_liz.qsize()
            xbmc.sleep(50)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    #liz is a tuple for addDirectoryItems
    li=[ liz for idx,liz in sorted(q_liz.queue) ]  #list of (url, listitem[, isFolder]) as a tuple
    #log(repr(li))

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after=content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl=set_query_field(currentUrl, field='after', value=after, replace=True)  #(url, field, value, replace=False):
        #log('$$$currenturl: ' +currentUrl)
        #log('$$$   nextUrl: ' +nextUrl)

        count=current_url_query.get('count')
        #log('$$$count   : ' +repr(count))
        if current_url_query.get('count')==None:
            #firsttime it is none
            count=itemsPerPage
        else:
            #nexttimes it will be kept incremented with itemsPerPage
            try: count=int(current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError: count=itemsPerPage

        nextUrl=set_query_field(nextUrl,'count', count, True)
        #log('$$$   nextUrl: ' +nextUrl)

        # plot shows up on estuary. etc. ( avoids the "No information available" message on description )
        info_label={ "plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,info_label)   #Next Page
    except Exception as e:
        log('    Exception: '+ str(e))

    #the +'s got removed by url conversion
    subreddit_key=subreddit_key.replace(' ','+')
    viewID=WINDOW.getProperty( "viewid-"+subreddit_key )
    #log("  custom viewid %s for %s " %(viewID,subreddit_key) )

    if viewID:
        log("  custom viewid %s for %s " %(viewID,subreddit_key) )
        xbmc.executebuiltin('Container.SetViewMode(%s)' %viewID )
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')

    xbmcplugin.endOfDirectory(handle=pluginhandle,
                              succeeded=True,
                              updateListing=False,   #setting this to True causes the ".." entry to quit the plugin
                              cacheToDisc=True)
def listSubReddit(url, subreddit_key, type_):
    from guis import progressBG
    from utils import post_is_filtered_out, build_script, compose_list_item, xbmc_notify,prettify_reddit_query, set_query_field
    from reddit import reddit_request, has_multiple, assemble_reddit_filter_string, subreddit_icoheader_banner

    global GCXM_hasmultiplesubreddit, GCXM_actual_url_used_to_generate_these_posts,GCXM_reddit_query_of_this_gui,GCXM_hasmultipledomain,GCXM_hasmultipleauthor
    #the +'s got removed by url conversion
    title_bar_name=subreddit_key.replace(' ','+')
    if title_bar_name.startswith('?'):
        title_bar_name=prettify_reddit_query(title_bar_name)
    #log("  title_bar_name %s " %(title_bar_name) )

    log("listSubReddit r/%s\n %s" %(title_bar_name,url) )

    currentUrl = url
    icon=banner=header=None
    xbmc_busy()

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(0,'Retrieving '+subreddit_key)
    content = reddit_request(url)
    loading_indicator.update(10,subreddit_key  )

    if not content:
        xbmc_busy(False)
        loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
        return

    threads = []
    q_liz = Queue()   #output queue (listitem)

    content = json.loads(content)
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count=len(content['data']['children'])
    filtered_out_posts=0

    hms=has_multiple('subreddit', content['data']['children'])

    if hms==False:  #r/random and r/randnsfw returns a random subreddit. we need to use the name of this subreddit for the "next page" link.
        try: g=content['data']['children'][0]['data']['subreddit']
        except ValueError: g=""
        except IndexError:
            xbmc_busy(False)
            loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
            xbmc_notify("List Subreddit",translation(32022))
            return
        if g:
            title_bar_name=g
            #preserve the &after string so that functions like play slideshow and play all videos can 'play' the correct page
            #  extract the &after string from currentUrl -OR- send it with the 'type' argument when calling this function.
            currentUrl=assemble_reddit_filter_string('',g) + '&after=' + type_

        #put subreddit icon/header in the GUI
        icon,banner,header=subreddit_icoheader_banner(g)

    GCXM_hasmultiplesubreddit=hms
    GCXM_hasmultipledomain=has_multiple('domain', content['data']['children'])
    GCXM_hasmultipleauthor=has_multiple('author', content['data']['children'])
    GCXM_actual_url_used_to_generate_these_posts=url
    GCXM_reddit_query_of_this_gui=currentUrl

    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out( entry.get('data') ):
                filtered_out_posts+=1
                continue

            domain,domain_count=count_links_from_same_domain( entry ) #count how many same domains we're hitting
            delay=compute_anti_dos_delay(domain,domain_count)
            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker, args=(idx, entry,q_liz,delay), name='#t%.2d'%idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    log( repr(domains_d) )
    #check the queue to determine progress
    break_counter=0 #to avoid infinite loop
    expected_listitems=(posts_count-filtered_out_posts)
    if expected_listitems>0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size=0
        while q_liz.qsize() < expected_listitems:
            if break_counter>=500:
                #log('break counter reached limit')
                break
            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added=q_liz.qsize()-last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter+=1

            last_queue_size=q_liz.qsize()
            xbmc.sleep(100)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20) #<-- does not seem to work

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        #some post might be filtered out.
        log('some threads did not return a listitem')

    #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )

    #liu=[ qi for qi in sorted(q_liz.queue) ]
    li=[ liz for idx,liz in sorted(q_liz.queue) ]

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after=content['data']['after']
        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        count=current_url_query.get('count')
        if current_url_query.get('count')==None:
            #firsttime it is none
            count=itemsPerPage
        else:
            #nexttimes it will be kept incremented with itemsPerPage
            try: count=int(current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError: count=itemsPerPage

        nextUrl=set_query_field(currentUrl,'count', count, True)
        #log('$$$   nextUrl: ' +nextUrl)

        nextUrl=set_query_field(nextUrl, field='after', value=after, replace=True)  #(url, field, value, replace=False):
        #log('$$$currenturl: ' +currentUrl)
        #log('$$$   nextUrl: ' +nextUrl)

        liz = compose_list_item( translation(32024), "", "DefaultFolderNextSquare.png", "script", build_script("listSubReddit",nextUrl,title_bar_name,after) )

        #for items at the bottom left corner
        liz.setArt({ "clearart": "DefaultFolderNextSquare.png"  })
        liz.setInfo(type='video', infoLabels={"Studio":translation(32024)})
        liz.setProperty('link_url', nextUrl )
        li.append(liz)

    except Exception as e:
        log(" EXCEPTzION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    xbmc_busy(False)

    title_bar_name=urllib.unquote_plus(title_bar_name)
    ui=skin_launcher('listSubReddit',
                     title_bar_name=title_bar_name,
                     listing=li,
                     subreddits_file=subredditsFile,
                     currentUrl=currentUrl,
                     icon=icon,
                     banner=banner,
                     header=header)
    ui.doModal()
    del ui
def listLinksInComment(url, name, type_):
    from guis import progressBG
    from reddit import reddit_request
    from utils import clean_str,remove_duplicates, is_filtered
    from default import comments_link_filter

    log('listLinksInComment:%s:%s' %(type_,url) )

    post_title=''
    global harvest
#    ShowOnlyCommentsWithlink=False
#    if type_=='linksOnly':
#        ShowOnlyCommentsWithlink=True

    #url='https://np.reddit.com/r/videos/comments/64j9x7/doctor_violently_dragged_from_overbooked_cia/dg2pbtj/?st=j1cbxsst&sh=2d5daf4b'
    #url=url.split('?', 1)[0]+'.json'+url.split('?', 1)[1]

    #log(repr(url.split('?', 1)[0]))
    #log(repr(url.split('?', 1)[1]))
    #log(repr(url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]))

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"
    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]
    #UPDATE you need to convert this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_déjà_vu/
    #                        to this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_d%C3%A9j%C3%A0_vu/
    #
    #use safe='' argument in quoteplus to encode only the weird chars part
    url=urllib.quote_plus(url,safe=':/?&')

    if '?' in url:
        url=url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]
    else:
        url+= '.json'

    xbmc_busy()

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(0,'Retrieving comments')
    content = reddit_request(url)
    loading_indicator.update(10,'Parsing')

    if not content:
        loading_indicator.end()
        return

    try:
        xbmc_busy()
        content = json.loads(content)

        #harvest links in the post text (just 1)
        r_linkHunter(content[0]['data']['children'])

        #submitter=content[0]['data']['children'][0]['data']['author']
        submitter=clean_str(content,[0,'data','children',0,'data','author'])

        #the post title is provided in json, we'll just use that instead of messages from addLink()
        #post_title=content[0]['data']['children'][0]['data']['title']
        post_title=clean_str(content,[0,'data','children',0,'data','title'])

        #harvest links in the post itself
        r_linkHunter(content[1]['data']['children'])
        #for i, h in enumerate(harvest):
        #    log( '  %d %s %.4d -%s   link[%s]' % ( i, h[7].ljust(8)[:8], h[0], h[3].ljust(20)[:20],h[2] ) )

        comments_count_orig=len(harvest)
        #log(' len harvest1 '+repr(len(harvest)))
        #remove duplicate links
        def k2(x): return (x[2],x[3])
        harvest=remove_duplicates(harvest,k2)
        comments_count_rd=len(harvest)
        #log(' len harvest2 '+repr(len(harvest)))

        loading_indicator.update(15,'Removed %d duplicates' %(comments_count_orig-comments_count_rd) )

        c_threads=[]
        q_liz=Queue()
        comments_count=len(harvest)
        filtered_posts=0
        for idx, h in enumerate(harvest):
            comment_score=h[0]
            link_url=h[2]
            if comment_score < int_CommentTreshold:
                log('    comment score %d < %d, skipped' %(comment_score,int_CommentTreshold) )
                filtered_posts+=1
                continue

            if is_filtered(comments_link_filter,link_url):
                log('    [{0}] is hidden by comments_link_filter'.format(link_url))
                filtered_posts+=1
                continue

            domain,domain_count=count_links_from_same_domain_comments(link_url) #count how many same domains we're hitting
            delay=compute_anti_dos_delay(domain,domain_count)

            #have threads process each comment post
            t = threading.Thread(target=reddit_comment_worker, args=(idx, h,q_liz,submitter,delay), name='#t%.2d'%idx)
            c_threads.append(t)
            t.start()

        #loading_indicator.update(20,'Filtered %d comments' %(filtered_posts) )
        log(repr(domains_d))
        #check the queue to determine progress
        break_counter=0 #to avoid infinite loop
        expected_listitems=(comments_count-filtered_posts)
        if expected_listitems>0:
            loading_indicator.set_tick_total(expected_listitems)
            last_queue_size=0
            while q_liz.qsize() < expected_listitems:
                if break_counter>=100:
                    break
                #each change in the queue size gets a tick on our progress track
                if last_queue_size < q_liz.qsize():
                    items_added=q_liz.qsize()-last_queue_size
                    loading_indicator.tick(items_added,'Parsing')
                else:
                    break_counter+=1

                last_queue_size=q_liz.qsize()
                xbmc.sleep(50)

        #wait for all threads to finish before collecting the list items
        for idx, t in enumerate(c_threads):
            #log('    joining %s' %t.getName())
            t.join(timeout=20)

        xbmc_busy(False)

        #compare the number of entries to the returned results
        #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
        if q_liz.qsize() != expected_listitems:
            log('some threads did not return a listitem. total comments:%d expecting(%d) but only got(%d)' %(comments_count, expected_listitems, q_liz.qsize()))

        #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )
        li=[ liz for idx,liz in sorted(q_liz.queue) ]
        #log(repr(li))

        with q_liz.mutex:
            q_liz.queue.clear()

    except Exception as e:
        log('  ' + str(e) )

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
# this portion is abandoned for now. initial plan was to textbox with auto-height in a grouplist to mimic the comment tree but cannot figure out how links can be followed.
    from guis import comments_GUI2
    ui = comments_GUI2('view_464_comments_grouplist.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li, id=55)
    #ui = comments_GUI2('aaa.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li, id=55)
    ui.title_bar_text=post_title
    ui.doModal()
    del ui
    return
def listSubReddit(url, subreddit_key, type_):
    from guis import progressBG
    from utils import post_is_filtered_out, build_script, compose_list_item, xbmc_notify
    from reddit import reddit_request, has_multiple, assemble_reddit_filter_string

    global GCXM_hasmultiplesubreddit, GCXM_actual_url_used_to_generate_these_posts, GCXM_reddit_query_of_this_gui, GCXM_hasmultipledomain, GCXM_hasmultipleauthor
    #the +'s got removed by url conversion
    title_bar_name = subreddit_key.replace(' ', '+')
    #log("  title_bar_name %s " %(title_bar_name) )

    log("listSubReddit r/%s\n %s" % (title_bar_name, url))

    currentUrl = url
    icon = banner = header = None
    xbmc_busy()

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(0, 'Retrieving ' + subreddit_key)
    content = reddit_request(url)
    loading_indicator.update(10, subreddit_key)

    if not content:
        xbmc_busy(False)
        loading_indicator.end(
        )  #it is important to close xbmcgui.DialogProgressBG
        return

    threads = []
    q_liz = Queue()  #output queue (listitem)

    content = json.loads(content)
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count = len(content['data']['children'])
    filtered_out_posts = 0

    hms = has_multiple('subreddit', content['data']['children'])

    if hms == False:  #r/random and r/randnsfw returns a random subreddit. we need to use the name of this subreddit for the "next page" link.
        try:
            g = content['data']['children'][0]['data']['subreddit']
        except ValueError:
            g = ""
        except IndexError:
            xbmc_busy(False)
            loading_indicator.end(
            )  #it is important to close xbmcgui.DialogProgressBG
            xbmc_notify("List Subreddit", translation(32022))
            return
        if g:
            title_bar_name = g
            #preserve the &after string so that functions like play slideshow and play all videos can 'play' the correct page
            #  extract the &after string from currentUrl -OR- send it with the 'type' argument when calling this function.
            currentUrl = assemble_reddit_filter_string('',
                                                       g) + '&after=' + type_

        #put subreddit icon/header in the GUI
        icon, banner, header = subreddit_icoheader_banner(g)

    GCXM_hasmultiplesubreddit = hms
    GCXM_hasmultipledomain = has_multiple('domain',
                                          content['data']['children'])
    GCXM_hasmultipleauthor = has_multiple('author',
                                          content['data']['children'])
    GCXM_actual_url_used_to_generate_these_posts = url
    GCXM_reddit_query_of_this_gui = currentUrl

    for idx, entry in enumerate(content['data']['children']):
        try:
            #if entry.get('kind')!='t3':
            #    filtered_out_posts+=1
            #    continue
            if post_is_filtered_out(entry.get('data')):
                filtered_out_posts += 1
                continue
            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker,
                                 args=(idx, entry, q_liz),
                                 name='#t%.2d' % idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    #check the queue to determine progress
    break_counter = 0  #to avoid infinite loop
    expected_listitems = (posts_count - filtered_out_posts)
    if expected_listitems > 0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size = 0
        while q_liz.qsize() < expected_listitems:
            if break_counter >= 100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added = q_liz.qsize() - last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter += 1

            last_queue_size = q_liz.qsize()
            xbmc.sleep(100)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        #some post might be filtered out.
        log('some threads did not return a listitem')

    #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )

    #liu=[ qi for qi in sorted(q_liz.queue) ]
    li = [liz for idx, liz in sorted(q_liz.queue)]

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after = ""
        after = content['data']['after']
        if after:
            if "&after=" in currentUrl:
                nextUrl = currentUrl[:currentUrl.find("&after="
                                                      )] + "&after=" + after
            else:
                nextUrl = currentUrl + "&after=" + after

            liz = compose_list_item(
                translation(32004), "", "DefaultFolderNextSquare.png",
                "script",
                build_script("listSubReddit", nextUrl, title_bar_name, after))

            #for items at the bottom left corner
            liz.setArt({"clearart": "DefaultFolderNextSquare.png"})
            liz.setInfo(type='video',
                        infoLabels={"Studio": translation(32004)})
            liz.setProperty('link_url', nextUrl)
            li.append(liz)

    except Exception as e:
        log(" EXCEPTzION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    xbmc_busy(False)

    title_bar_name = urllib.unquote_plus(title_bar_name)
    ui = skin_launcher('listSubReddit',
                       title_bar_name=title_bar_name,
                       listing=li,
                       subreddits_file=subredditsFile,
                       currentUrl=currentUrl,
                       icon=icon,
                       banner=banner,
                       header=header)
    ui.doModal()
    del ui
def listLinksInComment(url, name, type_):
    from guis import progressBG
    from reddit import reddit_request
    from utils import clean_str

    log('listLinksInComment:%s:%s' % (type_, url))

    post_title = ''
    #    ShowOnlyCommentsWithlink=False
    #    if type_=='linksOnly':
    #        ShowOnlyCommentsWithlink=True

    #url='https://np.reddit.com/r/videos/comments/64j9x7/doctor_violently_dragged_from_overbooked_cia/dg2pbtj/?st=j1cbxsst&sh=2d5daf4b'
    #url=url.split('?', 1)[0]+'.json'+url.split('?', 1)[1]

    #log(repr(url.split('?', 1)[0]))
    #log(repr(url.split('?', 1)[1]))
    #log(repr(url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]))

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"
    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]
    #UPDATE you need to convert this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_déjà_vu/
    #                        to this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_d%C3%A9j%C3%A0_vu/
    #
    #use safe='' argument in quoteplus to encode only the weird chars part
    url = urllib.quote_plus(url, safe=':/?&')

    if '?' in url:
        url = url.split('?', 1)[0] + '.json?' + url.split('?', 1)[1]
    else:
        url += '.json'

    xbmc_busy()

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(0, 'Retrieving comments')
    content = reddit_request(url)
    loading_indicator.update(10, 'Parsing')

    if not content:
        loading_indicator.end()
        return

    try:
        xbmc_busy()
        content = json.loads(content)

        del harvest[:]
        #harvest links in the post text (just 1)
        r_linkHunter(content[0]['data']['children'])

        #submitter=content[0]['data']['children'][0]['data']['author']
        submitter = clean_str(content,
                              [0, 'data', 'children', 0, 'data', 'author'])

        #the post title is provided in json, we'll just use that instead of messages from addLink()
        #post_title=content[0]['data']['children'][0]['data']['title']
        post_title = clean_str(content,
                               [0, 'data', 'children', 0, 'data', 'title'])

        #harvest links in the post itself
        r_linkHunter(content[1]['data']['children'])
        #for i, h in enumerate(harvest):
        #    log( '  %d %s %d -%s   link[%s]' % ( i, h[7].ljust(8)[:8], h[0], h[3].ljust(20)[:20],h[2] ) )

        c_threads = []
        q_liz = Queue()
        comments_count = len(harvest)
        filtered_posts = 0
        for idx, h in enumerate(harvest):
            comment_score = h[0]
            if comment_score < int_CommentTreshold:
                log('    comment score %d < %d, skipped' %
                    (comment_score, int_CommentTreshold))
                filtered_posts += 1
                continue

            #have threads process each comment post
            t = threading.Thread(target=reddit_comment_worker,
                                 args=(idx, h, q_liz, submitter),
                                 name='#t%.2d' % idx)
            c_threads.append(t)
            t.start()

        #check the queue to determine progress
        break_counter = 0  #to avoid infinite loop
        expected_listitems = (comments_count - filtered_posts)
        if expected_listitems > 0:
            loading_indicator.set_tick_total(expected_listitems)
            last_queue_size = 0
            while q_liz.qsize() < expected_listitems:
                if break_counter >= 100:
                    break
                #each change in the queue size gets a tick on our progress track
                if last_queue_size < q_liz.qsize():
                    items_added = q_liz.qsize() - last_queue_size
                    loading_indicator.tick(items_added)
                else:
                    break_counter += 1

                last_queue_size = q_liz.qsize()
                xbmc.sleep(50)

        #wait for all threads to finish before collecting the list items
        for idx, t in enumerate(c_threads):
            #log('    joining %s' %t.getName())
            t.join(timeout=20)

        xbmc_busy(False)

        #compare the number of entries to the returned results
        #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
        if q_liz.qsize() != expected_listitems:
            log('some threads did not return a listitem. total comments:%d expecting(%d) but only got(%d)'
                % (comments_count, expected_listitems, q_liz.qsize()))

        #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )
        li = [liz for idx, liz in sorted(q_liz.queue)]
        #log(repr(li))

        with q_liz.mutex:
            q_liz.queue.clear()

    except Exception as e:
        log('  ' + str(e))

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG
    # this portion is abandoned for now. initial plan was to textbox with auto-height in a grouplist to mimic the comment tree but cannot figure out how links can be followed.
    #    from guis import comments_GUI2
    #    ui = comments_GUI2('view_464_comments_grouplist.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li)
    #    ui.doModal()
    #    del ui
    #    return
    from guis import commentsGUI
    #ui = commentsGUI('view_463_comments.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li, id=55)
    ui = commentsGUI('view_461_comments.xml',
                     addon_path,
                     defaultSkin='Default',
                     defaultRes='1080i',
                     listing=li,
                     id=55)
    #NOTE: the subreddit selection screen and comments screen use the same gui. there is a button that is only for the comments screen
    ui.setProperty(
        'comments', 'yes'
    )  #the links button is visible/hidden in xml by checking for this property

    ui.title_bar_text = post_title
    ui.include_parent_directory_entry = False

    ui.doModal()
    del ui
Example #10
0
def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG

    log('listLinksInComment:%s:%s' %(type_,url) )


    directory_items=[]
    author=""
    ShowOnlyCommentsWithlink=False

    if type_=='linksOnly':
        ShowOnlyCommentsWithlink=True

    url=urllib.quote_plus(url,safe=':/?&')
    if '?' in url:
        url=url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]
    else:
        url+= '.json'

    loading_indicator=progressBG(translation(30024))
    loading_indicator.update(0,'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10,'Parsing')
    content = json.loads(content)

    del harvest[:]

    r_linkHunter(content[0]['data']['children'])

    try:submitter=content[0]['data']['children'][0]['data']['author']
    except: submitter=''

    try:post_title=content[0]['data']['children'][0]['data']['title']
    except:post_title=''

    r_linkHunter(content[1]['data']['children'])

    comment_score=0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:

            comment_score=h[0]

            link_url=h[2]
            desc100=h[3].replace('\n',' ')[0:100] #first 100 characters of description

            kind=h[6] #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d=h[5]   #depth of the comment

            tab=" "*d if d>0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format( uri=urlparse( link_url ) )

            author=h[7]
            DirectoryItem_url=''

            if comment_score < int_CommentTreshold:
                continue


            ld=parse_reddit_link(link_url=link_url, assume_is_video=False, needs_preview=True, get_playable_url=True )

            if kind=='t1':
                list_title=r"[COLOR cadetblue]%3d[/COLOR] %s" %( h[0], tab )
            elif kind=='t3':
                list_title=r"[COLOR cadetblue]Title [/COLOR] %s" %( tab )

            plot=h[3].replace('](', '] (')
            plot= markdown_to_bbcode(plot)
            plot=unescape(plot)  #convert html entities e.g.:(&#39;)

            liz=xbmcgui.ListItem(label=list_title +': '+ desc100)

            liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": plot, "studio": domain, "votes": str(comment_score), "director": author  } )
            isFolder=False

            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty('url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot= "  [COLOR greenyellow][%s] %s"%(domain, plot )  + "[/COLOR]"
                else:
                    plot= "  [COLOR greenyellow][%s]"%( plot ) + "[/COLOR]"
                liz.setLabel(list_title+plot)

                if ld:
                    liz.setArt({"thumb": ld.poster, "poster":ld.poster, "banner":ld.poster, "fanart":ld.poster, "landscape":ld.poster   })

            if DirectoryItem_url:

                directory_items.append( (DirectoryItem_url, liz, isFolder,) )

            else:

                if not ShowOnlyCommentsWithlink:
                    result=h[3].replace('](', '] (')
                    result=markdown_to_bbcode(result)
                    liz=xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": result, "studio": domain, "votes": str(h[0]), "director": author } )
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append( ("", liz, False,) )

        except Exception as e:
            log('  EXCEPTION:' + str(e) )


        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    xbmcplugin.setContent(pluginhandle, "movies")    #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle,'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items )
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' %comments_viewMode)
Example #11
0
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit,GCXM_hasmultipledomain,GCXM_hasmultipleauthor,GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" %(subreddit_key,url) )

    currentUrl = url
    xbmcplugin.setContent(pluginhandle, "movies") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(8,'Retrieving '+subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11,subreddit_key  )

    if not content:
        loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
        return

    page_title="[COLOR cadetblue]%s[/COLOR]" %subreddit_key

    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label={ "plot": translation(30013) }  #Automatically play videos
    if autoplayAll:       addDir("[B]- "+translation(30016)+"[/B]", url, 'autoPlay', "", "ALL", info_label)
    if autoplayUnwatched: addDir("[B]- "+translation(30017)+"[/B]" , url, 'autoPlay', "", "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()   #output queue (listitem)

    content = json.loads(content)

    posts_count=len(content['data']['children'])
    filtered_out_posts=0

    GCXM_hasmultiplesubreddit=has_multiple('subreddit', content['data']['children'])
    GCXM_hasmultipledomain=has_multiple('domain', content['data']['children'])
    GCXM_hasmultipleauthor=has_multiple('author', content['data']['children'])
    GCXM_subreddit_key=subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out( entry.get('data') ):
                filtered_out_posts+=1
                continue

            t = threading.Thread(target=reddit_post_worker, args=(idx, entry,q_liz), name='#t%.2d'%idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    break_counter=0 #to avoid infinite loop
    expected_listitems=(posts_count-filtered_out_posts)
    if expected_listitems>0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size=0
        while q_liz.qsize() < expected_listitems:
            if break_counter>=100:
                break

            if last_queue_size < q_liz.qsize():
                items_added=q_liz.qsize()-last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter+=1

            last_queue_size=q_liz.qsize()
            xbmc.sleep(50)

    for idx, t in enumerate(threads):

        t.join(timeout=20)

    xbmc_busy(False)

    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    li=[ liz for idx,liz in sorted(q_liz.queue) ]  #list of (url, listitem[, isFolder]) as a tuple

    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG

    try:

        after=content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl=set_query_field(currentUrl, field='after', value=after, replace=True)  #(url, field, value, replace=False):


        count=current_url_query.get('count')

        if current_url_query.get('count')==None:

            count=itemsPerPage
        else:

            try: count=int(current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError: count=itemsPerPage

        nextUrl=set_query_field(nextUrl,'count', count, True)

        info_label={ "plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,info_label)   #Next Page
    except Exception as e:
        log('    Exception: '+ str(e))

    subreddit_key=subreddit_key.replace(' ','+')
    viewID=WINDOW.getProperty( "viewid-"+subreddit_key )


    if viewID:
        log("  custom viewid %s for %s " %(viewID,subreddit_key) )
        xbmc.executebuiltin('Container.SetViewMode(%s)' %viewID )
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')

    xbmcplugin.endOfDirectory(handle=pluginhandle,
                              succeeded=True,
                              updateListing=False,   #setting this to True causes the ".." entry to quit the plugin
                              cacheToDisc=True)