Exemplo n.º 1
0
def autoPlay(url, name, autoPlay_type):
    import random
    from domains import sitesBase, parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import unescape, post_is_filtered_out, log, clean_str
    from actions import setting_gif_repeat_count
    from reddit import reddit_request, determine_if_video_media_from_reddit_json


    gif_repeat_count=setting_gif_repeat_count()

    entries = []
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()
    log("**********autoPlay %s*************" %autoPlay_type)
    content = reddit_request(url)
    if not content: return

    content = json.loads(content.replace('\\"', '\''))

    log("Autoplay %s - Parsing %d items" %( autoPlay_type, len(content['data']['children']) )    )

    for j_entry in content['data']['children']:
        try:
            if post_is_filtered_out( j_entry ):
                continue

            title = clean_str(j_entry, ['data','title'])

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            is_a_video = determine_if_video_media_from_reddit_json(j_entry)

            ld=parse_reddit_link(link_url=media_url, assume_is_video=is_a_video, needs_preview=False, get_playable_url=True )

            DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(ld, media_url, title, on_autoplay=True)

            if ld:
                if ld.media_type not in [sitesBase.TYPE_VIDEO, sitesBase.TYPE_GIF, sitesBase.TYPE_VIDS, sitesBase.TYPE_MIXED]:
                    continue

            autoPlay_type_entries_append( entries, autoPlay_type, title, DirectoryItem_url)
            if ld.media_type == sitesBase.TYPE_GIF:
                for _ in range( 0, gif_repeat_count ):
                    autoPlay_type_entries_append( entries, autoPlay_type, title, DirectoryItem_url)

        except Exception as e:
            log("  EXCEPTION Autoplay "+ str( sys.exc_info()[0]) + "  " + str(e) )


    if autoplayRandomize:
        random.shuffle(entries)

    for title, url in entries:
        listitem = xbmcgui.ListItem(title)
        playlist.add(url, listitem)
        log('add to playlist: %s %s' %(title.ljust(25)[:25],url ))
    xbmc.Player().play(playlist)
def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG
    #from resources.domains import make_addon_url_from
    #called from context menu
    log('listLinksInComment:%s:%s' %(type_,url) )

    #does not work for list comments coz key is the playable url (not reddit comments url)
    #msg=WINDOW.getProperty(url)
    #WINDOW.clearProperty( url )
    #log( '   msg=' + msg )

    directory_items=[]
    author=""
    ShowOnlyCommentsWithlink=False

    if type_=='linksOnly':
        ShowOnlyCommentsWithlink=True

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"

    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]

    #use safe='' argument in quoteplus to encode only the weird chars part
    url=urllib.quote_plus(url,safe=':/?&')
    if '?' in url:
        url=url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]
    else:
        url+= '.json'

    loading_indicator=progressBG(translation(30024))
    loading_indicator.update(0,'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10,'Parsing')
    content = json.loads(content)

    del harvest[:]
    #harvest links in the post text (just 1)
    r_linkHunter(content[0]['data']['children'])

    try:submitter=content[0]['data']['children'][0]['data']['author']
    except: submitter=''

    #the post title is provided in json, we'll just use that instead of messages from addLink()
    try:post_title=content[0]['data']['children'][0]['data']['title']
    except:post_title=''
    #for i, h in enumerate(harvest):
    #    log("aaaaa first harvest "+h[2])

    #harvest links in the post itself
    r_linkHunter(content[1]['data']['children'])

    comment_score=0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:
            #log(str(i)+"  score:"+ str(h[0]).zfill(5)+" "+ h[1] +'|'+ h[3] )
            comment_score=h[0]
            #log("score %d < %d (%s)" %(comment_score,int_CommentTreshold, CommentTreshold) )
            link_url=h[2]
            desc100=h[3].replace('\n',' ')[0:100] #first 100 characters of description

            kind=h[6] #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d=h[5]   #depth of the comment

            tab=" "*d if d>0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format( uri=urlparse( link_url ) )

            author=h[7]
            DirectoryItem_url=''

            if comment_score < int_CommentTreshold:
                continue

            #hoster, DirectoryItem_url, videoID, mode_type, thumb_url,poster_url, isFolder,setInfo_type, setProperty_IsPlayable =make_addon_url_from(h[2])
            #if link_url:
            #    log( '  comment %s TITLE:%s... link[%s]' % ( str(d).zfill(3), desc100.ljust(20)[:20],link_url ) )

            ld=parse_reddit_link(link_url=link_url, assume_is_video=False, needs_preview=True, get_playable_url=True )

            if kind=='t1':
                list_title=r"[COLOR cadetblue]%3d[/COLOR] %s" %( h[0], tab )
            elif kind=='t3':
                list_title=r"[COLOR cadetblue]Title [/COLOR] %s" %( tab )

            #helps the the textbox control treat [url description] and (url) as separate words. so that they can be separated into 2 lines
            plot=h[3].replace('](', '] (')
            plot= markdown_to_bbcode(plot)
            plot=unescape(plot)  #convert html entities e.g.:(&#39;)

            liz=xbmcgui.ListItem(label=list_title +': '+ desc100)

            liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": plot, "studio": domain, "votes": str(comment_score), "director": author  } )
            isFolder=False

            #force all links to ytdl to see if it can be played
            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty('url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot= "  [COLOR greenyellow][%s] %s"%(domain, plot )  + "[/COLOR]"
                else:
                    plot= "  [COLOR greenyellow][%s]"%( plot ) + "[/COLOR]"
                liz.setLabel(list_title+plot)

                if ld:
                    liz.setArt({"thumb": ld.poster, "poster":ld.poster, "banner":ld.poster, "fanart":ld.poster, "landscape":ld.poster   })

            if DirectoryItem_url:
                #log( 'IsPlayable:'+setProperty_IsPlayable )
                directory_items.append( (DirectoryItem_url, liz, isFolder,) )
                #xbmcplugin.addDirectoryItem(handle=pluginhandle,url=DirectoryItem_url,listitem=liz,isFolder=isFolder)
            else:
                #this section are for comments that have no links
                if not ShowOnlyCommentsWithlink:
                    result=h[3].replace('](', '] (')
                    result=markdown_to_bbcode(result)
                    liz=xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": result, "studio": domain, "votes": str(h[0]), "director": author } )
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append( ("", liz, False,) )
                    #xbmcplugin.addDirectoryItem(handle=pluginhandle,url="",listitem=liz,isFolder=False)

                #END section are for comments that have no links or unsupported links
        except Exception as e:
            log('  EXCEPTION:' + str(e) )

        #for di in directory_items:
        #    log( str(di) )

        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    #log('  comments_view id=%s' %comments_viewMode)

    #xbmcplugin.setContent(pluginhandle, "mixed")  #in estuary, mixed have limited view id's available. it has widelist which is nice for comments but we'll just stick with 'movies'
    xbmcplugin.setContent(pluginhandle, "episodes")    #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle,'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items )
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' %comments_viewMode)
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit,GCXM_hasmultipledomain,GCXM_hasmultipleauthor,GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" %(subreddit_key,url) )

    currentUrl = url

    #use the "episodes" content type rather than "movies" so that the Wall and InfoWall views in the default Estuary skin show titles on each item like the Youtube and RedditTV addons. 
    #The "movies" type assumes the thumbnails are posters and don't need a title which is definitely not the case for reddit content.  --credit:Kestrel
    xbmcplugin.setContent(pluginhandle, "episodes") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(8,'Retrieving '+subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11,subreddit_key  )

    if not content:
        loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
        return

    page_title="[COLOR cadetblue]%s[/COLOR]" %subreddit_key

    #setPluginCategory lets us show text at the top of window, we take advantage of this and put the subreddit name
    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label={ "plot": translation(30013) }  #Automatically play videos
    if autoplayAll:       addDir("[B]- "+translation(30016)+"[/B]", url, 'autoPlay', "", "ALL", info_label)
    if autoplayUnwatched: addDir("[B]- "+translation(30017)+"[/B]" , url, 'autoPlay', "", "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()   #output queue (listitem)

    content = json.loads(content)

    #A modhash is a token that the reddit API requires to help prevent CSRF. Modhashes can be obtained via the /api/me.json call or in response data of listing endpoints.
    #The preferred way to send a modhash is to include an X-Modhash custom HTTP header with your requests.
    #Modhashes are not required when authenticated with OAuth.
    #modhash=content['data']['modhash']
    #log( 'modhash='+repr(modhash) )
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count=len(content['data']['children'])
    filtered_out_posts=0

    GCXM_hasmultiplesubreddit=has_multiple('subreddit', content['data']['children'])
    GCXM_hasmultipledomain=has_multiple('domain', content['data']['children'])
    GCXM_hasmultipleauthor=has_multiple('author', content['data']['children'])
    GCXM_subreddit_key=subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out( entry.get('data') ):
                filtered_out_posts+=1
                continue

            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker, args=(idx, entry,q_liz), name='#t%.2d'%idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    #check the queue to determine progress
    break_counter=0 #to avoid infinite loop
    expected_listitems=(posts_count-filtered_out_posts)
    if expected_listitems>0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size=0
        while q_liz.qsize() < expected_listitems:
            if break_counter>=100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added=q_liz.qsize()-last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter+=1

            last_queue_size=q_liz.qsize()
            xbmc.sleep(50)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    #liz is a tuple for addDirectoryItems
    li=[ liz for idx,liz in sorted(q_liz.queue) ]  #list of (url, listitem[, isFolder]) as a tuple
    #log(repr(li))

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after=content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl=set_query_field(currentUrl, field='after', value=after, replace=True)  #(url, field, value, replace=False):
        #log('$$$currenturl: ' +currentUrl)
        #log('$$$   nextUrl: ' +nextUrl)

        count=current_url_query.get('count')
        #log('$$$count   : ' +repr(count))
        if current_url_query.get('count')==None:
            #firsttime it is none
            count=itemsPerPage
        else:
            #nexttimes it will be kept incremented with itemsPerPage
            try: count=int(current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError: count=itemsPerPage

        nextUrl=set_query_field(nextUrl,'count', count, True)
        #log('$$$   nextUrl: ' +nextUrl)

        # plot shows up on estuary. etc. ( avoids the "No information available" message on description )
        info_label={ "plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,info_label)   #Next Page
    except Exception as e:
        log('    Exception: '+ str(e))

    #the +'s got removed by url conversion
    subreddit_key=subreddit_key.replace(' ','+')
    viewID=WINDOW.getProperty( "viewid-"+subreddit_key )
    #log("  custom viewid %s for %s " %(viewID,subreddit_key) )

    if viewID:
        log("  custom viewid %s for %s " %(viewID,subreddit_key) )
        xbmc.executebuiltin('Container.SetViewMode(%s)' %viewID )
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')

    xbmcplugin.endOfDirectory(handle=pluginhandle,
                              succeeded=True,
                              updateListing=False,   #setting this to True causes the ".." entry to quit the plugin
                              cacheToDisc=True)
Exemplo n.º 4
0
def autoSlideshow(url, name, type_):

    log('starting slideshow ' + url)
    ev = threading.Event()

    entries = []
    #watchdog_counter=0
    preview_w = 0
    preview_h = 0
    image = ''

    #content = opener.open(url).read()
    content = reddit_request(url)
    if not content: return
    #log( str(content) )
    #content = json.loads(content.replace('\\"', '\''))
    content = json.loads(content)

    log("slideshow %s:Parsing %d items: %s" %
        (type_, len(content['data']['children']),
         'random' if random_post_order else 'normal order'))

    data_children = content['data']['children']

    if random_post_order:
        random.shuffle(data_children)

    for j_entry in data_children:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            log("  TITLE:%s [r/%s]" %
                (title, j_entry.get('data').get('subreddit')))

            try:
                description = unescape(j_entry['data']['media']['oembed']
                                       ['description'].encode('utf-8'))
            except:
                description = ''
            #log('    description  [%s]' %description)
            try:
                post_selftext = unescape(
                    j_entry['data']['selftext'].encode('utf-8'))
            except:
                post_selftext = ''
            #log('    post_selftext[%s]' %post_selftext)

            description = post_selftext + '[CR]' + description if post_selftext else description

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            try:
                preview = j_entry['data']['preview']['images'][0]['source'][
                    'url'].encode('utf-8').replace('&amp;', '&')
                try:
                    preview_h = float(j_entry['data']['preview']['images'][0]
                                      ['source']['height'])
                    preview_w = float(j_entry['data']['preview']['images'][0]
                                      ['source']['width'])
                except:
                    preview_w = 0
                    preview_h = 0

            except Exception as e:
                #log("   getting preview image EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )
                preview = ""

            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=False,
                                   needs_preview=True,
                                   get_playable_url=True)
            if ld:
                if not preview:
                    preview = ld.poster

                if (addon.getSetting('include_albums')
                        == 'true') and (ld.media_type == sitesBase.TYPE_ALBUM):
                    dictlist = listAlbum(media_url, title, 'return_dictlist')
                    for d in dictlist:
                        #log('    (S) adding items from album ' + title  +' ' + d.get('DirectoryItem_url') )
                        t2 = d.get('li_label') if d.get('li_label') else title
                        #entries.append([ t2, d.get('DirectoryItem_url'), d.get('width'), d.get('height'), len(entries)])

                        d['li_label'] = t2
                        entries.append(d)
                        #title=''  #only put the title in once.
                else:
                    if addon.getSetting('use_reddit_preview') == 'true':
                        if preview: image = preview
                        elif ld.poster: image = ld.poster
                        #if preview: entries.append([title,preview,preview_w, preview_h,len(entries)]) #log('      (N)added preview:%s %s' %( title,preview) )
                        #elif ld.poster: entries.append([title,ld.poster,preview_w, preview_h,len(entries)])    #log('      (N)added poster:%s %s' % ( title,ld.poster) )
                    else:
                        if ld.poster:
                            image = ld.poster  #entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        elif preview:
                            image = preview  #entries.append([title,preview,preview_w, preview_h,len(entries)])
                        #if ld.poster: entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        #elif preview: entries.append([title,preview,preview_w, preview_h,len(entries)])

                    append_entry(entries, title, image, preview_w, preview_h,
                                 description)
            else:
                append_entry(entries, title, preview, preview_w, preview_h,
                             description)
                #log('      (N)added preview:%s' % title )

        except Exception as e:
            log('  autoPlay exception:' + str(e))

    #log( repr(entries))

    entries = remove_dict_duplicates(entries, 'DirectoryItem_url')

    #     #for i,e in enumerate(entries): log('  e1-%d %s' %(i, e[1]) )
    #     def k2(x): return x[1]
    #     entries=remove_duplicates(entries, k2)
    #     #for i,e in enumerate(entries): log('  e2-%d %s' %(i, e[1]) )

    for i, e in enumerate(entries):
        log('  possible playable items({0}) {1}...{2}x{3}  {4}'.format(
            i, e['li_label'].ljust(15)[:15], repr(e.get('width')),
            repr(e.get('height')), e.get('DirectoryItem_url')))

    if len(entries) == 0:
        log('  Play All: no playable items')
        xbmc.executebuiltin(
            'XBMC.Notification("%s","%s")' %
            (translation(32054),
             translation(32055)))  #Play All     No playable items
        return

    #if type.endswith("_RANDOM"):
    #    random.shuffle(entries)

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + url )

    log("**********playing slideshow*************")

    for e in entries:
        q.put(e)

    #s= HorizontalSlideScreensaver(ev,q)
    s = ScreensaverManager(ev, q)

    try:
        s.start_loop()
    except Exception as e:
        log("  EXCEPTION slideshowAlbum:=" + str(sys.exc_info()[0]) + "  " +
            str(e))

    return
Exemplo n.º 5
0
def listSubReddit(url, subreddit_key, type_):
    from guis import progressBG
    from utils import post_is_filtered_out, build_script, compose_list_item, xbmc_notify,prettify_reddit_query, set_query_field
    from reddit import reddit_request, has_multiple, assemble_reddit_filter_string, subreddit_icoheader_banner

    global GCXM_hasmultiplesubreddit, GCXM_actual_url_used_to_generate_these_posts,GCXM_reddit_query_of_this_gui,GCXM_hasmultipledomain,GCXM_hasmultipleauthor
    #the +'s got removed by url conversion
    title_bar_name=subreddit_key.replace(' ','+')
    if title_bar_name.startswith('?'):
        title_bar_name=prettify_reddit_query(title_bar_name)
    #log("  title_bar_name %s " %(title_bar_name) )

    log("listSubReddit r/%s\n %s" %(title_bar_name,url) )

    currentUrl = url
    icon=banner=header=None
    xbmc_busy()

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(0,'Retrieving '+subreddit_key)
    content = reddit_request(url)
    loading_indicator.update(10,subreddit_key  )

    if not content:
        xbmc_busy(False)
        loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
        return

    threads = []
    q_liz = Queue()   #output queue (listitem)

    content = json.loads(content)
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count=len(content['data']['children'])
    filtered_out_posts=0

    hms=has_multiple('subreddit', content['data']['children'])

    if hms==False:  #r/random and r/randnsfw returns a random subreddit. we need to use the name of this subreddit for the "next page" link.
        try: g=content['data']['children'][0]['data']['subreddit']
        except ValueError: g=""
        except IndexError:
            xbmc_busy(False)
            loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
            xbmc_notify("List Subreddit",translation(32022))
            return
        if g:
            title_bar_name=g
            #preserve the &after string so that functions like play slideshow and play all videos can 'play' the correct page
            #  extract the &after string from currentUrl -OR- send it with the 'type' argument when calling this function.
            currentUrl=assemble_reddit_filter_string('',g) + '&after=' + type_

        #put subreddit icon/header in the GUI
        icon,banner,header=subreddit_icoheader_banner(g)

    GCXM_hasmultiplesubreddit=hms
    GCXM_hasmultipledomain=has_multiple('domain', content['data']['children'])
    GCXM_hasmultipleauthor=has_multiple('author', content['data']['children'])
    GCXM_actual_url_used_to_generate_these_posts=url
    GCXM_reddit_query_of_this_gui=currentUrl

    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out( entry.get('data') ):
                filtered_out_posts+=1
                continue

            domain,domain_count=count_links_from_same_domain( entry ) #count how many same domains we're hitting
            delay=compute_anti_dos_delay(domain,domain_count)
            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker, args=(idx, entry,q_liz,delay), name='#t%.2d'%idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    log( repr(domains_d) )
    #check the queue to determine progress
    break_counter=0 #to avoid infinite loop
    expected_listitems=(posts_count-filtered_out_posts)
    if expected_listitems>0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size=0
        while q_liz.qsize() < expected_listitems:
            if break_counter>=500:
                #log('break counter reached limit')
                break
            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added=q_liz.qsize()-last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter+=1

            last_queue_size=q_liz.qsize()
            xbmc.sleep(100)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20) #<-- does not seem to work

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        #some post might be filtered out.
        log('some threads did not return a listitem')

    #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )

    #liu=[ qi for qi in sorted(q_liz.queue) ]
    li=[ liz for idx,liz in sorted(q_liz.queue) ]

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after=content['data']['after']
        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        count=current_url_query.get('count')
        if current_url_query.get('count')==None:
            #firsttime it is none
            count=itemsPerPage
        else:
            #nexttimes it will be kept incremented with itemsPerPage
            try: count=int(current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError: count=itemsPerPage

        nextUrl=set_query_field(currentUrl,'count', count, True)
        #log('$$$   nextUrl: ' +nextUrl)

        nextUrl=set_query_field(nextUrl, field='after', value=after, replace=True)  #(url, field, value, replace=False):
        #log('$$$currenturl: ' +currentUrl)
        #log('$$$   nextUrl: ' +nextUrl)

        liz = compose_list_item( translation(32024), "", "DefaultFolderNextSquare.png", "script", build_script("listSubReddit",nextUrl,title_bar_name,after) )

        #for items at the bottom left corner
        liz.setArt({ "clearart": "DefaultFolderNextSquare.png"  })
        liz.setInfo(type='video', infoLabels={"Studio":translation(32024)})
        liz.setProperty('link_url', nextUrl )
        li.append(liz)

    except Exception as e:
        log(" EXCEPTzION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    xbmc_busy(False)

    title_bar_name=urllib.unquote_plus(title_bar_name)
    ui=skin_launcher('listSubReddit',
                     title_bar_name=title_bar_name,
                     listing=li,
                     subreddits_file=subredditsFile,
                     currentUrl=currentUrl,
                     icon=icon,
                     banner=banner,
                     header=header)
    ui.doModal()
    del ui
Exemplo n.º 6
0
def listLinksInComment(url, name, type_):
    from guis import progressBG
    from reddit import reddit_request
    from utils import clean_str,remove_duplicates, is_filtered
    from default import comments_link_filter

    log('listLinksInComment:%s:%s' %(type_,url) )

    post_title=''
    global harvest
#    ShowOnlyCommentsWithlink=False
#    if type_=='linksOnly':
#        ShowOnlyCommentsWithlink=True

    #url='https://np.reddit.com/r/videos/comments/64j9x7/doctor_violently_dragged_from_overbooked_cia/dg2pbtj/?st=j1cbxsst&sh=2d5daf4b'
    #url=url.split('?', 1)[0]+'.json'+url.split('?', 1)[1]

    #log(repr(url.split('?', 1)[0]))
    #log(repr(url.split('?', 1)[1]))
    #log(repr(url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]))

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"
    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]
    #UPDATE you need to convert this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_déjà_vu/
    #                        to this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_d%C3%A9j%C3%A0_vu/
    #
    #use safe='' argument in quoteplus to encode only the weird chars part
    url=urllib.quote_plus(url,safe=':/?&')

    if '?' in url:
        url=url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]
    else:
        url+= '.json'

    xbmc_busy()

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(0,'Retrieving comments')
    content = reddit_request(url)
    loading_indicator.update(10,'Parsing')

    if not content:
        loading_indicator.end()
        return

    try:
        xbmc_busy()
        content = json.loads(content)

        #harvest links in the post text (just 1)
        r_linkHunter(content[0]['data']['children'])

        #submitter=content[0]['data']['children'][0]['data']['author']
        submitter=clean_str(content,[0,'data','children',0,'data','author'])

        #the post title is provided in json, we'll just use that instead of messages from addLink()
        #post_title=content[0]['data']['children'][0]['data']['title']
        post_title=clean_str(content,[0,'data','children',0,'data','title'])

        #harvest links in the post itself
        r_linkHunter(content[1]['data']['children'])
        #for i, h in enumerate(harvest):
        #    log( '  %d %s %.4d -%s   link[%s]' % ( i, h[7].ljust(8)[:8], h[0], h[3].ljust(20)[:20],h[2] ) )

        comments_count_orig=len(harvest)
        #log(' len harvest1 '+repr(len(harvest)))
        #remove duplicate links
        def k2(x): return (x[2],x[3])
        harvest=remove_duplicates(harvest,k2)
        comments_count_rd=len(harvest)
        #log(' len harvest2 '+repr(len(harvest)))

        loading_indicator.update(15,'Removed %d duplicates' %(comments_count_orig-comments_count_rd) )

        c_threads=[]
        q_liz=Queue()
        comments_count=len(harvest)
        filtered_posts=0
        for idx, h in enumerate(harvest):
            comment_score=h[0]
            link_url=h[2]
            if comment_score < int_CommentTreshold:
                log('    comment score %d < %d, skipped' %(comment_score,int_CommentTreshold) )
                filtered_posts+=1
                continue

            if is_filtered(comments_link_filter,link_url):
                log('    [{0}] is hidden by comments_link_filter'.format(link_url))
                filtered_posts+=1
                continue

            domain,domain_count=count_links_from_same_domain_comments(link_url) #count how many same domains we're hitting
            delay=compute_anti_dos_delay(domain,domain_count)

            #have threads process each comment post
            t = threading.Thread(target=reddit_comment_worker, args=(idx, h,q_liz,submitter,delay), name='#t%.2d'%idx)
            c_threads.append(t)
            t.start()

        #loading_indicator.update(20,'Filtered %d comments' %(filtered_posts) )
        log(repr(domains_d))
        #check the queue to determine progress
        break_counter=0 #to avoid infinite loop
        expected_listitems=(comments_count-filtered_posts)
        if expected_listitems>0:
            loading_indicator.set_tick_total(expected_listitems)
            last_queue_size=0
            while q_liz.qsize() < expected_listitems:
                if break_counter>=100:
                    break
                #each change in the queue size gets a tick on our progress track
                if last_queue_size < q_liz.qsize():
                    items_added=q_liz.qsize()-last_queue_size
                    loading_indicator.tick(items_added,'Parsing')
                else:
                    break_counter+=1

                last_queue_size=q_liz.qsize()
                xbmc.sleep(50)

        #wait for all threads to finish before collecting the list items
        for idx, t in enumerate(c_threads):
            #log('    joining %s' %t.getName())
            t.join(timeout=20)

        xbmc_busy(False)

        #compare the number of entries to the returned results
        #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
        if q_liz.qsize() != expected_listitems:
            log('some threads did not return a listitem. total comments:%d expecting(%d) but only got(%d)' %(comments_count, expected_listitems, q_liz.qsize()))

        #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )
        li=[ liz for idx,liz in sorted(q_liz.queue) ]
        #log(repr(li))

        with q_liz.mutex:
            q_liz.queue.clear()

    except Exception as e:
        log('  ' + str(e) )

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
# this portion is abandoned for now. initial plan was to textbox with auto-height in a grouplist to mimic the comment tree but cannot figure out how links can be followed.
    from guis import comments_GUI2
    ui = comments_GUI2('view_464_comments_grouplist.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li, id=55)
    #ui = comments_GUI2('aaa.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li, id=55)
    ui.title_bar_text=post_title
    ui.doModal()
    del ui
    return
Exemplo n.º 7
0
def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG

    log('listLinksInComment:%s:%s' %(type_,url) )


    directory_items=[]
    author=""
    ShowOnlyCommentsWithlink=False

    if type_=='linksOnly':
        ShowOnlyCommentsWithlink=True

    url=urllib.quote_plus(url,safe=':/?&')
    if '?' in url:
        url=url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]
    else:
        url+= '.json'

    loading_indicator=progressBG(translation(30024))
    loading_indicator.update(0,'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10,'Parsing')
    content = json.loads(content)

    del harvest[:]

    r_linkHunter(content[0]['data']['children'])

    try:submitter=content[0]['data']['children'][0]['data']['author']
    except: submitter=''

    try:post_title=content[0]['data']['children'][0]['data']['title']
    except:post_title=''

    r_linkHunter(content[1]['data']['children'])

    comment_score=0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:

            comment_score=h[0]

            link_url=h[2]
            desc100=h[3].replace('\n',' ')[0:100] #first 100 characters of description

            kind=h[6] #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d=h[5]   #depth of the comment

            tab=" "*d if d>0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format( uri=urlparse( link_url ) )

            author=h[7]
            DirectoryItem_url=''

            if comment_score < int_CommentTreshold:
                continue


            ld=parse_reddit_link(link_url=link_url, assume_is_video=False, needs_preview=True, get_playable_url=True )

            if kind=='t1':
                list_title=r"[COLOR cadetblue]%3d[/COLOR] %s" %( h[0], tab )
            elif kind=='t3':
                list_title=r"[COLOR cadetblue]Title [/COLOR] %s" %( tab )

            plot=h[3].replace('](', '] (')
            plot= markdown_to_bbcode(plot)
            plot=unescape(plot)  #convert html entities e.g.:(&#39;)

            liz=xbmcgui.ListItem(label=list_title +': '+ desc100)

            liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": plot, "studio": domain, "votes": str(comment_score), "director": author  } )
            isFolder=False

            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty('url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot= "  [COLOR greenyellow][%s] %s"%(domain, plot )  + "[/COLOR]"
                else:
                    plot= "  [COLOR greenyellow][%s]"%( plot ) + "[/COLOR]"
                liz.setLabel(list_title+plot)

                if ld:
                    liz.setArt({"thumb": ld.poster, "poster":ld.poster, "banner":ld.poster, "fanart":ld.poster, "landscape":ld.poster   })

            if DirectoryItem_url:

                directory_items.append( (DirectoryItem_url, liz, isFolder,) )

            else:

                if not ShowOnlyCommentsWithlink:
                    result=h[3].replace('](', '] (')
                    result=markdown_to_bbcode(result)
                    liz=xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo( type="Video", infoLabels={ "Title": h[1], "plot": result, "studio": domain, "votes": str(h[0]), "director": author } )
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append( ("", liz, False,) )

        except Exception as e:
            log('  EXCEPTION:' + str(e) )


        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    xbmcplugin.setContent(pluginhandle, "movies")    #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle,'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items )
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' %comments_viewMode)
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit, GCXM_hasmultipledomain, GCXM_hasmultipleauthor, GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" % (subreddit_key, url))

    currentUrl = url

    #use the "episodes" content type rather than "movies" so that the Wall and InfoWall views in the default Estuary skin show titles on each item like the Youtube and RedditTV addons.
    #The "movies" type assumes the thumbnails are posters and don't need a title which is definitely not the case for reddit content.  --credit:Kestrel
    xbmcplugin.setContent(
        pluginhandle, "episodes"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(8, 'Retrieving ' + subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11, subreddit_key)

    if not content:
        loading_indicator.end(
        )  #it is important to close xbmcgui.DialogProgressBG
        return

    page_title = "[COLOR cadetblue]%s[/COLOR]" % subreddit_key

    #setPluginCategory lets us show text at the top of window, we take advantage of this and put the subreddit name
    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label = {"plot": translation(30013)}  #Automatically play videos
    if autoplayAll:
        addDir("[B]- " + translation(30016) + "[/B]", url, 'autoPlay', "",
               "ALL", info_label)
    if autoplayUnwatched:
        addDir("[B]- " + translation(30017) + "[/B]", url, 'autoPlay', "",
               "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()  #output queue (listitem)

    content = json.loads(content)

    #A modhash is a token that the reddit API requires to help prevent CSRF. Modhashes can be obtained via the /api/me.json call or in response data of listing endpoints.
    #The preferred way to send a modhash is to include an X-Modhash custom HTTP header with your requests.
    #Modhashes are not required when authenticated with OAuth.
    #modhash=content['data']['modhash']
    #log( 'modhash='+repr(modhash) )
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count = len(content['data']['children'])
    filtered_out_posts = 0

    GCXM_hasmultiplesubreddit = has_multiple('subreddit',
                                             content['data']['children'])
    GCXM_hasmultipledomain = has_multiple('domain',
                                          content['data']['children'])
    GCXM_hasmultipleauthor = has_multiple('author',
                                          content['data']['children'])
    GCXM_subreddit_key = subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out(entry.get('data')):
                filtered_out_posts += 1
                continue

            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker,
                                 args=(idx, entry, q_liz),
                                 name='#t%.2d' % idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    #check the queue to determine progress
    break_counter = 0  #to avoid infinite loop
    expected_listitems = (posts_count - filtered_out_posts)
    if expected_listitems > 0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size = 0
        while q_liz.qsize() < expected_listitems:
            if break_counter >= 100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added = q_liz.qsize() - last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter += 1

            last_queue_size = q_liz.qsize()
            xbmc.sleep(50)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    #liz is a tuple for addDirectoryItems
    li = [liz for idx, liz in sorted(q_liz.queue)
          ]  #list of (url, listitem[, isFolder]) as a tuple
    #log(repr(li))

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after = content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl = set_query_field(
            currentUrl, field='after', value=after,
            replace=True)  #(url, field, value, replace=False):
        #log('$$$currenturl: ' +currentUrl)
        #log('$$$   nextUrl: ' +nextUrl)

        count = current_url_query.get('count')
        #log('$$$count   : ' +repr(count))
        if current_url_query.get('count') == None:
            #firsttime it is none
            count = itemsPerPage
        else:
            #nexttimes it will be kept incremented with itemsPerPage
            try:
                count = int(
                    current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError:
                count = itemsPerPage

        nextUrl = set_query_field(nextUrl, 'count', count, True)
        #log('$$$   nextUrl: ' +nextUrl)

        # plot shows up on estuary. etc. ( avoids the "No information available" message on description )
        info_label = {"plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,
               info_label)  #Next Page
    except Exception as e:
        log('    Exception: ' + str(e))

    #the +'s got removed by url conversion
    subreddit_key = subreddit_key.replace(' ', '+')
    viewID = WINDOW.getProperty("viewid-" + subreddit_key)
    #log("  custom viewid %s for %s " %(viewID,subreddit_key) )

    if viewID:
        log("  custom viewid %s for %s " % (viewID, subreddit_key))
        xbmc.executebuiltin('Container.SetViewMode(%s)' % viewID)
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode(' + viewMode + ')')

    xbmcplugin.endOfDirectory(
        handle=pluginhandle,
        succeeded=True,
        updateListing=
        False,  #setting this to True causes the ".." entry to quit the plugin
        cacheToDisc=True)
Exemplo n.º 9
0
def autoPlay(url, name, type_):
    from domains import sitesBase, parse_reddit_link, ydtl_get_playable_url
    from utils import unescape, post_is_filtered_out, strip_emoji, xbmc_busy, log, translation
    from reddit import reddit_request, determine_if_video_media_from_reddit_json
    from actions import setting_gif_repeat_count

    #collect a list of title and urls as entries[] from the j_entries obtained from reddit
    #then create a playlist from those entries
    #then play the playlist

    gif_repeat_count = setting_gif_repeat_count()
    entries = []
    watchdog_counter = 0
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()

    xbmc_busy()

    content = reddit_request(url)
    if not content: return
    #log( str(content) )
    #content = json.loads(content.replace('\\"', '\''))
    content = json.loads(content)

    log("Autoplay %s - Parsing %d items" %
        (type_, len(content['data']['children'])))

    for j_entry in content['data']['children']:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            title = strip_emoji(title)

            try:
                media_url = j_entry['data']['url']
            except (AttributeError, TypeError, ValueError):
                media_url = j_entry['data']['media']['oembed']['url']

            is_a_video = determine_if_video_media_from_reddit_json(j_entry)

            log("  %cTITLE:%s" % (("v" if is_a_video else " "), title))

            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=False,
                                   needs_preview=False,
                                   get_playable_url=True)

            if ld:
                log('      type:%s %s' % (ld.media_type, ld.link_action))
                if ld.media_type in [
                        sitesBase.TYPE_VIDEO, sitesBase.TYPE_GIF,
                        sitesBase.TYPE_VIDS, sitesBase.TYPE_MIXED
                ]:

                    if ld.media_type == sitesBase.TYPE_GIF:
                        entries.append([
                            title, ld.playable_url,
                            sitesBase.DI_ACTION_PLAYABLE
                        ])
                        for _ in range(0, gif_repeat_count):
                            entries.append([
                                title, ld.playable_url,
                                sitesBase.DI_ACTION_PLAYABLE
                            ])
                    else:
                        entries.append(
                            [title, ld.playable_url, ld.link_action])
            else:
                #log('    checking if ytdl supports %s' %media_url )
                playable_video_url = ydtl_get_playable_url(media_url)
                if playable_video_url:
                    for u in playable_video_url:
                        entries.append(
                            [title, u, sitesBase.DI_ACTION_PLAYABLE])

        except Exception as e:
            log('  autoPlay exception:' + str(e))

    #for i,e in enumerate(entries): log('  e1-%d %s:' %(i, e[1]) )
    #def k2(x): return x[1]
    #entries=remove_duplicates(entries, k2)   #***disable removal of duplicates because it will also remove looping for gif videos
    #for i,e in enumerate(entries): log('  e2-%d %s:' %(i, e[1]) )

    for i, e in enumerate(entries):
        try:
            log('  possible playable items(%.2d) %s...%s (%s)' %
                (i, e[0].ljust(15)[:15], e[1], e[2]))
        except:
            continue

    if len(entries) == 0:
        log('  Play All: no playable items')
        xbmc.executebuiltin(
            'XBMC.Notification("%s","%s")' %
            (translation(32054),
             translation(32055)))  #Play All     No playable items
        return

    entries_to_buffer = 4
    #log('  entries:%d buffer:%d' %( len(entries), entries_to_buffer ) )
    if len(entries) < entries_to_buffer:
        entries_to_buffer = len(entries)
        #log('entries to buffer reduced to %d' %entries_to_buffer )

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + url )

    log("**********autoPlay*************")

    #play_list=[]
    ev = threading.Event()

    t = Worker(entries, q, ev)
    t.daemon = True
    t.start()
    #t.run()

    #wait for worker to finish processing 1st item
    #e.wait(200)

    while True:
        #log( '  c-wait+get buffer(%d) wdt=%d ' %(playlist.size(), watchdog_counter)  )
        try:
            #playable_url = q.get(True, 10)
            playable_entry = q.get(True, 10)
            #playable_url=playable_entry[1]
            q.task_done()
            #play_list.append(playable_entry[1])
            playlist.add(playable_entry[1],
                         xbmcgui.ListItem(playable_entry[0]))
            log('    c-buffered(%d):%s...%s' %
                (playlist.size(), playable_entry[0].ljust(15)[:15],
                 playable_entry[1]))

        except:
            watchdog_counter += 1
            if ev.is_set():  #p is done producing
                break
            #if got 3 empty from queue.
            pass
        watchdog_counter += 1
        #log('  playlist:%d buffer:%d' %( playlist.size(), entries_to_buffer ) )
        if playlist.size() >= entries_to_buffer:  #q.qsize()
            log('  c-buffer count met')
            break
        if watchdog_counter > entries_to_buffer:
            break

    log('  c-buffering done')

    #xbmc_busy(False)

    xbmc.Player().play(playlist)

    watchdog_counter = 0
    while True:
        #log( '  c-get buffer(%d) wdt=%d ' %(playlist.size(), watchdog_counter)  )
        #q.join()
        #log( ' c- join-ed, get... '  )
        try:
            #playable_url = q.get(True,10)
            playable_entry = q.get(True, 10)
            q.task_done()
            #log( '    c- got next item... ' + playable_entry[1] )
            #play_list.append(playable_entry[1])
            playlist.add(playable_entry[1],
                         xbmcgui.ListItem(playable_entry[0]))
            log('    c-got next item(%d):%s...%s' %
                (playlist.size(), playable_entry[0].ljust(15)[:15],
                 playable_entry[1]))
        except:
            watchdog_counter += 1
            if ev.isSet():  #p is done producing
                break

            pass
        #xbmc.PlayList(1).add(playable_url)

        if ev.isSet() and q.empty():
            log(' c- ev is set and q.empty -->  break ')
            break

        if watchdog_counter > 2:
            break

    log(' c-all done ')
Exemplo n.º 10
0
def autoPlay(url, name, type_):
    from domains import sitesBase, parse_reddit_link, ydtl_get_playable_url
    from utils import unescape, post_is_filtered_out, strip_emoji,xbmc_busy, translation, xbmc_notify
    from reddit import reddit_request, determine_if_video_media_from_reddit_json
    from actions import setting_gif_repeat_count

    #collect a list of title and urls as entries[] from the j_entries obtained from reddit
    #then create a playlist from those entries
    #then play the playlist

    gif_repeat_count=setting_gif_repeat_count()
    entries = []
    watchdog_counter=0
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()

    xbmc_busy()

    content = reddit_request(url)
    if not content: return
    #log( str(content) )
    #content = json.loads(content.replace('\\"', '\''))
    content = json.loads(content)

    log("Autoplay %s - Parsing %d items" %( type_, len(content['data']['children']) )    )

    for j_entry in content['data']['children']:
        try:
            title=unescape(j_entry['data']['title'].encode('utf-8'))
            title=strip_emoji(title)

            try:
                media_url = j_entry['data']['url']
            except (AttributeError,TypeError,ValueError):
                media_url = j_entry['data']['media']['oembed']['url']

            is_a_video = determine_if_video_media_from_reddit_json(j_entry)

            log("  %cTITLE:%s"  %( ("v" if is_a_video else " "), title  ) )

            ld=parse_reddit_link(link_url=media_url, assume_is_video=False, needs_preview=False, get_playable_url=True )

            if ld:
                log('      type:%s %s' %( ld.media_type, ld.link_action)   )
                if ld.media_type in [sitesBase.TYPE_VIDEO, sitesBase.TYPE_GIF, sitesBase.TYPE_VIDS, sitesBase.TYPE_MIXED]:

                    if ld.media_type==sitesBase.TYPE_GIF:
                        entries.append([title,ld.playable_url, sitesBase.DI_ACTION_PLAYABLE])
                        for _ in range( 0, gif_repeat_count ):
                            entries.append([title,ld.playable_url, sitesBase.DI_ACTION_PLAYABLE])
                    else:
                        entries.append([title,ld.playable_url, ld.link_action])
            else:
                #log('    checking if ytdl supports %s' %media_url )
                playable_video_url=ydtl_get_playable_url(media_url)
                if playable_video_url:
                    for u in playable_video_url:
                        entries.append([title, u, sitesBase.DI_ACTION_PLAYABLE])

        except Exception as e:
            log( '  autoPlay exception:' + str(e) )

    #for i,e in enumerate(entries): log('  e1-%d %s:' %(i, e[1]) )
    #def k2(x): return x[1]
    #entries=remove_duplicates(entries, k2)   #***disable removal of duplicates because it will also remove looping for gif videos
    #for i,e in enumerate(entries): log('  e2-%d %s:' %(i, e[1]) )

    for i,e in enumerate(entries):
        try:
            log('  possible playable items(%.2d) %s...%s (%s)' %(i, e[0].ljust(15)[:15], e[1],e[2]) )
        except:
            continue

    if len(entries)==0:
        xbmc_notify(translation(32025), translation(32026))  #Play All     No playable items
        xbmc_busy(False)
        return

    entries_to_buffer=4
    #log('  entries:%d buffer:%d' %( len(entries), entries_to_buffer ) )
    if len(entries) < entries_to_buffer:
        entries_to_buffer=len(entries)
        #log('entries to buffer reduced to %d' %entries_to_buffer )

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + url )

    log("**********autoPlay*************")

    #play_list=[]
    ev = threading.Event()

    t = Worker(entries, q, ev)
    t.daemon = True
    t.start()
    #t.run()

    #wait for worker to finish processing 1st item
    #e.wait(200)

    while True:
        #log( '  c-wait+get buffer(%d) wdt=%d ' %(playlist.size(), watchdog_counter)  )
        try:
            #playable_url = q.get(True, 10)
            playable_entry = q.get(True, 10)
            #playable_url=playable_entry[1]
            q.task_done()
            #play_list.append(playable_entry[1])
            playlist.add(playable_entry[1], xbmcgui.ListItem(playable_entry[0]))
            log( '    c-buffered(%d):%s...%s' %(playlist.size(), playable_entry[0].ljust(15)[:15], playable_entry[1])  )

        except:
            watchdog_counter+=1
            if ev.is_set():#p is done producing
                break
            #if got 3 empty from queue.
            pass
        watchdog_counter+=1
        #log('  playlist:%d buffer:%d' %( playlist.size(), entries_to_buffer ) )
        if playlist.size() >= entries_to_buffer:  #q.qsize()
            log('  c-buffer count met')
            break
        if watchdog_counter > entries_to_buffer:
            break

    log('  c-buffering done')

    xbmc_busy(False)

    xbmc.Player().play(playlist)

    watchdog_counter=0
    while True:
        #log( '  c-get buffer(%d) wdt=%d ' %(playlist.size(), watchdog_counter)  )
        #q.join()
        #log( ' c- join-ed, get... '  )
        try:
            #playable_url = q.get(True,10)
            playable_entry = q.get(True,10)
            q.task_done()
            #log( '    c- got next item... ' + playable_entry[1] )
            #play_list.append(playable_entry[1])
            playlist.add(playable_entry[1], xbmcgui.ListItem(playable_entry[0]))
            log( '    c-got next item(%d):%s...%s' %(playlist.size(), playable_entry[0].ljust(15)[:15], playable_entry[1])  )
        except:
            watchdog_counter+=1
            if ev.isSet(): #p is done producing
                break

            pass
        #xbmc.PlayList(1).add(playable_url)

        if ev.isSet() and q.empty():
            log( ' c- ev is set and q.empty -->  break '  )
            break

        if watchdog_counter > 2:
            break

    log( ' c-all done '  )
Exemplo n.º 11
0
def autoSlideshow(url, name, type_):

    log('starting slideshow '+ url)
    ev=threading.Event()

    entries = []

    preview_w=0
    preview_h=0
    image=''

    content = reddit_request(url)
    if not content: return

    content = json.loads(content)

    log("slideshow %s:Parsing %d items: %s" %( type_, len(content['data']['children']), 'random' if random_post_order else 'normal order' )    )

    data_children = content['data']['children']

    if random_post_order:
        random.shuffle(data_children)

    for j_entry in data_children:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            log("  TITLE:%s [r/%s]"  %( title, j_entry.get('data').get('subreddit') )  )

            try:    description = unescape(j_entry['data']['media']['oembed']['description'].encode('utf-8'))
            except: description = ''

            try:    post_selftext=unescape(j_entry['data']['selftext'].encode('utf-8'))
            except: post_selftext=''


            description=post_selftext+'[CR]'+description if post_selftext else description

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            try:
                preview=j_entry['data']['preview']['images'][0]['source']['url'].encode('utf-8').replace('&amp;','&')
                try:
                    preview_h = float( j_entry['data']['preview']['images'][0]['source']['height'] )
                    preview_w = float( j_entry['data']['preview']['images'][0]['source']['width'] )
                except:
                    preview_w=0
                    preview_h=0

            except Exception as e:

                preview=""


            ld=parse_reddit_link(link_url=media_url, assume_is_video=False, needs_preview=True, get_playable_url=True )
            if ld:
                if not preview:
                    preview = ld.poster

                if (addon.getSetting('include_albums')=='true') and (ld.media_type==sitesBase.TYPE_ALBUM) :
                    dictlist = listAlbum( media_url, title, 'return_dictlist')
                    for d in dictlist:

                        t2=d.get('li_label') if d.get('li_label') else title


                        d['li_label']=t2
                        entries.append( d )

                else:
                    if addon.getSetting('use_reddit_preview')=='true':
                        if preview: image=preview
                        elif ld.poster: image=ld.poster

                    else:
                        if ld.poster:  image=ld.poster #entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        elif preview: image=preview  #entries.append([title,preview,preview_w, preview_h,len(entries)])


                    append_entry( entries, title,image,preview_w, preview_h, description )
            else:
                append_entry( entries, title,preview,preview_w, preview_h, description )


        except Exception as e:
            log( '  autoPlay exception:' + str(e) )


    entries = remove_dict_duplicates( entries, 'DirectoryItem_url')


    for i, e in enumerate(entries):
        log('  possible playable items({0}) {1}...{2}x{3}  {4}'.format( i, e['li_label'].ljust(15)[:15], repr(e.get('width')),repr(e.get('height')),  e.get('DirectoryItem_url')) )

    if len(entries)==0:
        log('  Play All: no playable items' )
        xbmc.executebuiltin('XBMC.Notification("%s","%s")' %(translation(32054), translation(32055)  ) )  #Play All     No playable items
        return


    log("**********playing slideshow*************")

    for e in entries:
        q.put(e)

    s= ScreensaverManager(ev,q)

    try:
        s.start_loop()
    except Exception as e:
        log("  EXCEPTION slideshowAlbum:="+ str( sys.exc_info()[0]) + "  " + str(e) )


    return
def listSubReddit(url, subreddit_key, type_):
    from guis import progressBG
    from utils import post_is_filtered_out, build_script, compose_list_item, xbmc_notify
    from reddit import reddit_request, has_multiple, assemble_reddit_filter_string

    global GCXM_hasmultiplesubreddit, GCXM_actual_url_used_to_generate_these_posts, GCXM_reddit_query_of_this_gui, GCXM_hasmultipledomain, GCXM_hasmultipleauthor
    #the +'s got removed by url conversion
    title_bar_name = subreddit_key.replace(' ', '+')
    #log("  title_bar_name %s " %(title_bar_name) )

    log("listSubReddit r/%s\n %s" % (title_bar_name, url))

    currentUrl = url
    icon = banner = header = None
    xbmc_busy()

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(0, 'Retrieving ' + subreddit_key)
    content = reddit_request(url)
    loading_indicator.update(10, subreddit_key)

    if not content:
        xbmc_busy(False)
        loading_indicator.end(
        )  #it is important to close xbmcgui.DialogProgressBG
        return

    threads = []
    q_liz = Queue()  #output queue (listitem)

    content = json.loads(content)
    #log("query returned %d items " % len(content['data']['children']) )
    posts_count = len(content['data']['children'])
    filtered_out_posts = 0

    hms = has_multiple('subreddit', content['data']['children'])

    if hms == False:  #r/random and r/randnsfw returns a random subreddit. we need to use the name of this subreddit for the "next page" link.
        try:
            g = content['data']['children'][0]['data']['subreddit']
        except ValueError:
            g = ""
        except IndexError:
            xbmc_busy(False)
            loading_indicator.end(
            )  #it is important to close xbmcgui.DialogProgressBG
            xbmc_notify("List Subreddit", translation(32022))
            return
        if g:
            title_bar_name = g
            #preserve the &after string so that functions like play slideshow and play all videos can 'play' the correct page
            #  extract the &after string from currentUrl -OR- send it with the 'type' argument when calling this function.
            currentUrl = assemble_reddit_filter_string('',
                                                       g) + '&after=' + type_

        #put subreddit icon/header in the GUI
        icon, banner, header = subreddit_icoheader_banner(g)

    GCXM_hasmultiplesubreddit = hms
    GCXM_hasmultipledomain = has_multiple('domain',
                                          content['data']['children'])
    GCXM_hasmultipleauthor = has_multiple('author',
                                          content['data']['children'])
    GCXM_actual_url_used_to_generate_these_posts = url
    GCXM_reddit_query_of_this_gui = currentUrl

    for idx, entry in enumerate(content['data']['children']):
        try:
            #if entry.get('kind')!='t3':
            #    filtered_out_posts+=1
            #    continue
            if post_is_filtered_out(entry.get('data')):
                filtered_out_posts += 1
                continue
            #have threads process each reddit post
            t = threading.Thread(target=reddit_post_worker,
                                 args=(idx, entry, q_liz),
                                 name='#t%.2d' % idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    #check the queue to determine progress
    break_counter = 0  #to avoid infinite loop
    expected_listitems = (posts_count - filtered_out_posts)
    if expected_listitems > 0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size = 0
        while q_liz.qsize() < expected_listitems:
            if break_counter >= 100:
                break

            #each change in the queue size gets a tick on our progress track
            if last_queue_size < q_liz.qsize():
                items_added = q_liz.qsize() - last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter += 1

            last_queue_size = q_liz.qsize()
            xbmc.sleep(100)

    #wait for all threads to finish before collecting the list items
    for idx, t in enumerate(threads):
        #log('    joining %s' %t.getName())
        t.join(timeout=20)

    xbmc_busy(False)

    #compare the number of entries to the returned results
    #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
    if q_liz.qsize() != expected_listitems:
        #some post might be filtered out.
        log('some threads did not return a listitem')

    #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )

    #liu=[ qi for qi in sorted(q_liz.queue) ]
    li = [liz for idx, liz in sorted(q_liz.queue)]

    #empty the queue.
    with q_liz.mutex:
        q_liz.queue.clear()

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG

    try:
        #this part makes sure that you load the next page instead of just the first
        after = ""
        after = content['data']['after']
        if after:
            if "&after=" in currentUrl:
                nextUrl = currentUrl[:currentUrl.find("&after="
                                                      )] + "&after=" + after
            else:
                nextUrl = currentUrl + "&after=" + after

            liz = compose_list_item(
                translation(32004), "", "DefaultFolderNextSquare.png",
                "script",
                build_script("listSubReddit", nextUrl, title_bar_name, after))

            #for items at the bottom left corner
            liz.setArt({"clearart": "DefaultFolderNextSquare.png"})
            liz.setInfo(type='video',
                        infoLabels={"Studio": translation(32004)})
            liz.setProperty('link_url', nextUrl)
            li.append(liz)

    except Exception as e:
        log(" EXCEPTzION:=" + str(sys.exc_info()[0]) + "  " + str(e))

    xbmc_busy(False)

    title_bar_name = urllib.unquote_plus(title_bar_name)
    ui = skin_launcher('listSubReddit',
                       title_bar_name=title_bar_name,
                       listing=li,
                       subreddits_file=subredditsFile,
                       currentUrl=currentUrl,
                       icon=icon,
                       banner=banner,
                       header=header)
    ui.doModal()
    del ui
def listLinksInComment(url, name, type_):
    from guis import progressBG
    from reddit import reddit_request
    from utils import clean_str

    log('listLinksInComment:%s:%s' % (type_, url))

    post_title = ''
    #    ShowOnlyCommentsWithlink=False
    #    if type_=='linksOnly':
    #        ShowOnlyCommentsWithlink=True

    #url='https://np.reddit.com/r/videos/comments/64j9x7/doctor_violently_dragged_from_overbooked_cia/dg2pbtj/?st=j1cbxsst&sh=2d5daf4b'
    #url=url.split('?', 1)[0]+'.json'+url.split('?', 1)[1]

    #log(repr(url.split('?', 1)[0]))
    #log(repr(url.split('?', 1)[1]))
    #log(repr(url.split('?', 1)[0]+'.json?'+url.split('?', 1)[1]))

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"
    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]
    #UPDATE you need to convert this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_déjà_vu/
    #                        to this: https://www.reddit.com/r/redditviewertesting/comments/4x8v1k/test_test_what_is_d%C3%A9j%C3%A0_vu/
    #
    #use safe='' argument in quoteplus to encode only the weird chars part
    url = urllib.quote_plus(url, safe=':/?&')

    if '?' in url:
        url = url.split('?', 1)[0] + '.json?' + url.split('?', 1)[1]
    else:
        url += '.json'

    xbmc_busy()

    loading_indicator = progressBG('Loading...')
    loading_indicator.update(0, 'Retrieving comments')
    content = reddit_request(url)
    loading_indicator.update(10, 'Parsing')

    if not content:
        loading_indicator.end()
        return

    try:
        xbmc_busy()
        content = json.loads(content)

        del harvest[:]
        #harvest links in the post text (just 1)
        r_linkHunter(content[0]['data']['children'])

        #submitter=content[0]['data']['children'][0]['data']['author']
        submitter = clean_str(content,
                              [0, 'data', 'children', 0, 'data', 'author'])

        #the post title is provided in json, we'll just use that instead of messages from addLink()
        #post_title=content[0]['data']['children'][0]['data']['title']
        post_title = clean_str(content,
                               [0, 'data', 'children', 0, 'data', 'title'])

        #harvest links in the post itself
        r_linkHunter(content[1]['data']['children'])
        #for i, h in enumerate(harvest):
        #    log( '  %d %s %d -%s   link[%s]' % ( i, h[7].ljust(8)[:8], h[0], h[3].ljust(20)[:20],h[2] ) )

        c_threads = []
        q_liz = Queue()
        comments_count = len(harvest)
        filtered_posts = 0
        for idx, h in enumerate(harvest):
            comment_score = h[0]
            if comment_score < int_CommentTreshold:
                log('    comment score %d < %d, skipped' %
                    (comment_score, int_CommentTreshold))
                filtered_posts += 1
                continue

            #have threads process each comment post
            t = threading.Thread(target=reddit_comment_worker,
                                 args=(idx, h, q_liz, submitter),
                                 name='#t%.2d' % idx)
            c_threads.append(t)
            t.start()

        #check the queue to determine progress
        break_counter = 0  #to avoid infinite loop
        expected_listitems = (comments_count - filtered_posts)
        if expected_listitems > 0:
            loading_indicator.set_tick_total(expected_listitems)
            last_queue_size = 0
            while q_liz.qsize() < expected_listitems:
                if break_counter >= 100:
                    break
                #each change in the queue size gets a tick on our progress track
                if last_queue_size < q_liz.qsize():
                    items_added = q_liz.qsize() - last_queue_size
                    loading_indicator.tick(items_added)
                else:
                    break_counter += 1

                last_queue_size = q_liz.qsize()
                xbmc.sleep(50)

        #wait for all threads to finish before collecting the list items
        for idx, t in enumerate(c_threads):
            #log('    joining %s' %t.getName())
            t.join(timeout=20)

        xbmc_busy(False)

        #compare the number of entries to the returned results
        #log( "queue:%d entries:%d" %( q_liz.qsize() , len(content['data']['children'] ) ) )
        if q_liz.qsize() != expected_listitems:
            log('some threads did not return a listitem. total comments:%d expecting(%d) but only got(%d)'
                % (comments_count, expected_listitems, q_liz.qsize()))

        #for t in threads: log('isAlive %s %s' %(t.getName(), repr(t.isAlive()) )  )
        li = [liz for idx, liz in sorted(q_liz.queue)]
        #log(repr(li))

        with q_liz.mutex:
            q_liz.queue.clear()

    except Exception as e:
        log('  ' + str(e))

    loading_indicator.end()  #it is important to close xbmcgui.DialogProgressBG
    # this portion is abandoned for now. initial plan was to textbox with auto-height in a grouplist to mimic the comment tree but cannot figure out how links can be followed.
    #    from guis import comments_GUI2
    #    ui = comments_GUI2('view_464_comments_grouplist.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li)
    #    ui.doModal()
    #    del ui
    #    return
    from guis import commentsGUI
    #ui = commentsGUI('view_463_comments.xml' , addon_path, defaultSkin='Default', defaultRes='1080i', listing=li, id=55)
    ui = commentsGUI('view_461_comments.xml',
                     addon_path,
                     defaultSkin='Default',
                     defaultRes='1080i',
                     listing=li,
                     id=55)
    #NOTE: the subreddit selection screen and comments screen use the same gui. there is a button that is only for the comments screen
    ui.setProperty(
        'comments', 'yes'
    )  #the links button is visible/hidden in xml by checking for this property

    ui.title_bar_text = post_title
    ui.include_parent_directory_entry = False

    ui.doModal()
    del ui
Exemplo n.º 14
0
def autoSlideshow(url, name, type_):

    log('starting slideshow ' + url)
    ev = threading.Event()

    entries = []

    preview_w = 0
    preview_h = 0
    image = ''

    content = reddit_request(url)
    if not content: return

    content = json.loads(content)

    log("slideshow %s:Parsing %d items: %s" %
        (type_, len(content['data']['children']),
         'random' if random_post_order else 'normal order'))

    data_children = content['data']['children']

    if random_post_order:
        random.shuffle(data_children)

    for j_entry in data_children:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            log("  TITLE:%s [r/%s]" %
                (title, j_entry.get('data').get('subreddit')))

            try:
                description = unescape(j_entry['data']['media']['oembed']
                                       ['description'].encode('utf-8'))
            except:
                description = ''

            try:
                post_selftext = unescape(
                    j_entry['data']['selftext'].encode('utf-8'))
            except:
                post_selftext = ''

            description = post_selftext + '[CR]' + description if post_selftext else description

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            try:
                preview = j_entry['data']['preview']['images'][0]['source'][
                    'url'].encode('utf-8').replace('&amp;', '&')
                try:
                    preview_h = float(j_entry['data']['preview']['images'][0]
                                      ['source']['height'])
                    preview_w = float(j_entry['data']['preview']['images'][0]
                                      ['source']['width'])
                except:
                    preview_w = 0
                    preview_h = 0

            except Exception as e:

                preview = ""

            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=False,
                                   needs_preview=True,
                                   get_playable_url=True)
            if ld:
                if not preview:
                    preview = ld.poster

                if (addon.getSetting('include_albums')
                        == 'true') and (ld.media_type == sitesBase.TYPE_ALBUM):
                    dictlist = listAlbum(media_url, title, 'return_dictlist')
                    for d in dictlist:

                        t2 = d.get('li_label') if d.get('li_label') else title

                        d['li_label'] = t2
                        entries.append(d)

                else:
                    if addon.getSetting('use_reddit_preview') == 'true':
                        if preview: image = preview
                        elif ld.poster: image = ld.poster

                    else:
                        if ld.poster:
                            image = ld.poster  #entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        elif preview:
                            image = preview  #entries.append([title,preview,preview_w, preview_h,len(entries)])

                    append_entry(entries, title, image, preview_w, preview_h,
                                 description)
            else:
                append_entry(entries, title, preview, preview_w, preview_h,
                             description)

        except Exception as e:
            log('  autoPlay exception:' + str(e))

    entries = remove_dict_duplicates(entries, 'DirectoryItem_url')

    for i, e in enumerate(entries):
        log('  possible playable items({0}) {1}...{2}x{3}  {4}'.format(
            i, e['li_label'].ljust(15)[:15], repr(e.get('width')),
            repr(e.get('height')), e.get('DirectoryItem_url')))

    if len(entries) == 0:
        log('  Play All: no playable items')
        xbmc.executebuiltin(
            'XBMC.Notification("%s","%s")' %
            (translation(32054),
             translation(32055)))  #Play All     No playable items
        return

    log("**********playing slideshow*************")

    for e in entries:
        q.put(e)

    s = ScreensaverManager(ev, q)

    try:
        s.start_loop()
    except Exception as e:
        log("  EXCEPTION slideshowAlbum:=" + str(sys.exc_info()[0]) + "  " +
            str(e))

    return
Exemplo n.º 15
0
def autoPlay(url, name, autoPlay_type):
    import random
    from domains import sitesBase, parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import unescape, post_is_filtered_out, log, clean_str
    from actions import setting_gif_repeat_count
    from reddit import reddit_request, determine_if_video_media_from_reddit_json

    gif_repeat_count = setting_gif_repeat_count()

    entries = []
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()
    log("**********autoPlay %s*************" % autoPlay_type)
    content = reddit_request(url)
    if not content: return

    content = json.loads(content.replace('\\"', '\''))

    log("Autoplay %s - Parsing %d items" %
        (autoPlay_type, len(content['data']['children'])))

    for j_entry in content['data']['children']:
        try:
            if post_is_filtered_out(j_entry):
                continue

            title = clean_str(j_entry, ['data', 'title'])

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            is_a_video = determine_if_video_media_from_reddit_json(j_entry)

            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=is_a_video,
                                   needs_preview=False,
                                   get_playable_url=True)

            DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(
                ld, media_url, title, on_autoplay=True)

            if ld:
                if ld.media_type not in [
                        sitesBase.TYPE_VIDEO, sitesBase.TYPE_GIF,
                        sitesBase.TYPE_VIDS, sitesBase.TYPE_MIXED
                ]:
                    continue

            autoPlay_type_entries_append(entries, autoPlay_type, title,
                                         DirectoryItem_url)
            if ld.media_type == sitesBase.TYPE_GIF:
                for _ in range(0, gif_repeat_count):
                    autoPlay_type_entries_append(entries, autoPlay_type, title,
                                                 DirectoryItem_url)

        except Exception as e:
            log("  EXCEPTION Autoplay " + str(sys.exc_info()[0]) + "  " +
                str(e))

    if autoplayRandomize:
        random.shuffle(entries)

    for title, url in entries:
        listitem = xbmcgui.ListItem(title)
        playlist.add(url, listitem)
        log('add to playlist: %s %s' % (title.ljust(25)[:25], url))
    xbmc.Player().play(playlist)
def autoPlay(url, name, autoPlay_type):
    import random
    from domains import sitesBase, parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import unescape, post_is_filtered_out, log, clean_str
    from actions import setting_gif_repeat_count
    from reddit import reddit_request, determine_if_video_media_from_reddit_json
    #collect a list of title and urls as entries[] from the j_entries obtained from reddit
    #then create a playlist from those entries
    #then play the playlist

    gif_repeat_count = setting_gif_repeat_count()

    entries = []
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()
    log("**********autoPlay %s*************" % autoPlay_type)
    content = reddit_request(url)
    if not content: return

    content = json.loads(content.replace('\\"', '\''))

    log("Autoplay %s - Parsing %d items" %
        (autoPlay_type, len(content['data']['children'])))

    for j_entry in content['data']['children']:
        try:
            if post_is_filtered_out(j_entry):
                continue

            title = clean_str(j_entry, ['data', 'title'])

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            is_a_video = determine_if_video_media_from_reddit_json(j_entry)

            #log("  Title:%s -%c %s"  %( title, ("v" if is_a_video else " "), media_url ) )
            #hoster, DirectoryItem_url, videoID, mode_type, thumb_url,poster_url, isFolder,setInfo_type, IsPlayable=make_addon_url_from(media_url,is_a_video)
            ld = parse_reddit_link(link_url=media_url,
                                   assume_is_video=is_a_video,
                                   needs_preview=False,
                                   get_playable_url=True)

            DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(
                ld, media_url, title, on_autoplay=True)

            if ld:
                if ld.media_type not in [
                        sitesBase.TYPE_VIDEO, sitesBase.TYPE_GIF,
                        sitesBase.TYPE_VIDS, sitesBase.TYPE_MIXED
                ]:
                    continue

            autoPlay_type_entries_append(entries, autoPlay_type, title,
                                         DirectoryItem_url)
            if ld.media_type == sitesBase.TYPE_GIF:
                for _ in range(0, gif_repeat_count):
                    autoPlay_type_entries_append(entries, autoPlay_type, title,
                                                 DirectoryItem_url)

        except Exception as e:
            log("  EXCEPTION Autoplay " + str(sys.exc_info()[0]) + "  " +
                str(e))

    #def k2(x): return x[1]
    #entries=remove_duplicates(entries, k2)

    if autoplayRandomize:
        random.shuffle(entries)

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + urllib.unquote_plus(url) )
    for title, url in entries:
        listitem = xbmcgui.ListItem(title)
        playlist.add(url, listitem)
        log('add to playlist: %s %s' % (title.ljust(25)[:25], url))
    xbmc.Player().play(playlist)
Exemplo n.º 17
0
def autoPlay(url, name, autoPlay_type):
    import random
    from domains import sitesBase, parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import unescape, post_is_filtered_out, log, clean_str
    from actions import setting_gif_repeat_count
    from reddit import reddit_request, determine_if_video_media_from_reddit_json
    #collect a list of title and urls as entries[] from the j_entries obtained from reddit
    #then create a playlist from those entries
    #then play the playlist

    gif_repeat_count=setting_gif_repeat_count()

    entries = []
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()
    log("**********autoPlay %s*************" %autoPlay_type)
    content = reddit_request(url)
    if not content: return

    content = json.loads(content.replace('\\"', '\''))

    log("Autoplay %s - Parsing %d items" %( autoPlay_type, len(content['data']['children']) )    )

    for j_entry in content['data']['children']:
        try:
            if post_is_filtered_out( j_entry ):
                continue

            title = clean_str(j_entry, ['data','title'])

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            is_a_video = determine_if_video_media_from_reddit_json(j_entry)

            #log("  Title:%s -%c %s"  %( title, ("v" if is_a_video else " "), media_url ) )
            #hoster, DirectoryItem_url, videoID, mode_type, thumb_url,poster_url, isFolder,setInfo_type, IsPlayable=make_addon_url_from(media_url,is_a_video)
            ld=parse_reddit_link(link_url=media_url, assume_is_video=is_a_video, needs_preview=False, get_playable_url=True )

            DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(ld, media_url, title, on_autoplay=True)

            if ld:
                if ld.media_type not in [sitesBase.TYPE_VIDEO, sitesBase.TYPE_GIF, sitesBase.TYPE_VIDS, sitesBase.TYPE_MIXED]:
                    continue

            autoPlay_type_entries_append( entries, autoPlay_type, title, DirectoryItem_url)
            if ld.media_type == sitesBase.TYPE_GIF:
                for _ in range( 0, gif_repeat_count ):
                    autoPlay_type_entries_append( entries, autoPlay_type, title, DirectoryItem_url)

        except Exception as e:
            log("  EXCEPTION Autoplay "+ str( sys.exc_info()[0]) + "  " + str(e) )

    #def k2(x): return x[1]
    #entries=remove_duplicates(entries, k2)

    if autoplayRandomize:
        random.shuffle(entries)

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + urllib.unquote_plus(url) )
    for title, url in entries:
        listitem = xbmcgui.ListItem(title)
        playlist.add(url, listitem)
        log('add to playlist: %s %s' %(title.ljust(25)[:25],url ))
    xbmc.Player().play(playlist)
def autoSlideshow(url, name, type_):

    log('starting slideshow '+ url)
    ev=threading.Event()

    entries = []
    #watchdog_counter=0
    preview_w=0
    preview_h=0
    image=''

    #content = opener.open(url).read()
    content = reddit_request(url)
    if not content: return
    #log( str(content) )
    #content = json.loads(content.replace('\\"', '\''))
    content = json.loads(content)

    log("slideshow %s:Parsing %d items: %s" %( type_, len(content['data']['children']), 'random' if random_post_order else 'normal order' )    )

    data_children = content['data']['children']

    if random_post_order:
        random.shuffle(data_children)

    for j_entry in data_children:
        try:
            title = unescape(j_entry['data']['title'].encode('utf-8'))
            log("  TITLE:%s [r/%s]"  %( title, j_entry.get('data').get('subreddit') )  )

            try:    description = unescape(j_entry['data']['media']['oembed']['description'].encode('utf-8'))
            except: description = ''
            #log('    description  [%s]' %description)
            try:    post_selftext=unescape(j_entry['data']['selftext'].encode('utf-8'))
            except: post_selftext=''
            #log('    post_selftext[%s]' %post_selftext)

            description=post_selftext+'[CR]'+description if post_selftext else description

            try:
                media_url = j_entry['data']['url']
            except:
                media_url = j_entry['data']['media']['oembed']['url']

            try:
                preview=j_entry['data']['preview']['images'][0]['source']['url'].encode('utf-8').replace('&amp;','&')
                try:
                    preview_h = float( j_entry['data']['preview']['images'][0]['source']['height'] )
                    preview_w = float( j_entry['data']['preview']['images'][0]['source']['width'] )
                except:
                    preview_w=0
                    preview_h=0

            except Exception as e:
                #log("   getting preview image EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )
                preview=""


            ld=parse_reddit_link(link_url=media_url, assume_is_video=False, needs_preview=True, get_playable_url=True )
            if ld:
                if not preview:
                    preview = ld.poster

                if (addon.getSetting('include_albums')=='true') and (ld.media_type==sitesBase.TYPE_ALBUM) :
                    dictlist = listAlbum( media_url, title, 'return_dictlist')
                    for d in dictlist:
                        #log('    (S) adding items from album ' + title  +' ' + d.get('DirectoryItem_url') )
                        t2=d.get('li_label') if d.get('li_label') else title
                        #entries.append([ t2, d.get('DirectoryItem_url'), d.get('width'), d.get('height'), len(entries)])

                        d['li_label']=t2
                        entries.append( d )
                        #title=''  #only put the title in once.
                else:
                    if addon.getSetting('use_reddit_preview')=='true':
                        if preview: image=preview
                        elif ld.poster: image=ld.poster
                        #if preview: entries.append([title,preview,preview_w, preview_h,len(entries)]) #log('      (N)added preview:%s %s' %( title,preview) )
                        #elif ld.poster: entries.append([title,ld.poster,preview_w, preview_h,len(entries)])    #log('      (N)added poster:%s %s' % ( title,ld.poster) )
                    else:
                        if ld.poster:  image=ld.poster #entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        elif preview: image=preview  #entries.append([title,preview,preview_w, preview_h,len(entries)])
                        #if ld.poster: entries.append([title,ld.poster,preview_w, preview_h,len(entries)])
                        #elif preview: entries.append([title,preview,preview_w, preview_h,len(entries)])

                    append_entry( entries, title,image,preview_w, preview_h, description )
            else:
                append_entry( entries, title,preview,preview_w, preview_h, description )
                #log('      (N)added preview:%s' % title )

        except Exception as e:
            log( '  autoPlay exception:' + str(e) )

    #log( repr(entries))

    entries = remove_dict_duplicates( entries, 'DirectoryItem_url')

#     #for i,e in enumerate(entries): log('  e1-%d %s' %(i, e[1]) )
#     def k2(x): return x[1]
#     entries=remove_duplicates(entries, k2)
#     #for i,e in enumerate(entries): log('  e2-%d %s' %(i, e[1]) )

    for i, e in enumerate(entries):
        log('  possible playable items({0}) {1}...{2}x{3}  {4}'.format( i, e['li_label'].ljust(15)[:15], repr(e.get('width')),repr(e.get('height')),  e.get('DirectoryItem_url')) )

    if len(entries)==0:
        log('  Play All: no playable items' )
        xbmc.executebuiltin('XBMC.Notification("%s","%s")' %(translation(32054), translation(32055)  ) )  #Play All     No playable items
        return

    #if type.endswith("_RANDOM"):
    #    random.shuffle(entries)

    #for title, url in entries:
    #    log("  added to playlist:"+ title + "  " + url )

    log("**********playing slideshow*************")

    for e in entries:
        q.put(e)

    #s= HorizontalSlideScreensaver(ev,q)
    s= ScreensaverManager(ev,q)

    try:
        s.start_loop()
    except Exception as e:
        log("  EXCEPTION slideshowAlbum:="+ str( sys.exc_info()[0]) + "  " + str(e) )


    return
def listLinksInComment(url, name, type_):
    from domains import parse_reddit_link, build_DirectoryItem_url_based_on_media_type
    from utils import markdown_to_bbcode, unescape
    from guis import progressBG
    #from resources.domains import make_addon_url_from
    #called from context menu
    log('listLinksInComment:%s:%s' % (type_, url))

    #does not work for list comments coz key is the playable url (not reddit comments url)
    #msg=WINDOW.getProperty(url)
    #WINDOW.clearProperty( url )
    #log( '   msg=' + msg )

    directory_items = []
    author = ""
    ShowOnlyCommentsWithlink = False

    if type_ == 'linksOnly':
        ShowOnlyCommentsWithlink = True

    #url='https://www.reddit.com/r/Music/comments/4k02t1/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/' + '.json'
    #only get up to "https://www.reddit.com/r/Music/comments/4k02t1".
    #   do not include                                            "/bonnie_tyler_total_eclipse_of_the_heart_80s_pop/"
    #   because we'll have problem when it looks like this: "https://www.reddit.com/r/Overwatch/comments/4nx91h/ever_get_that_feeling_déjà_vu/"

    #url=re.findall(r'(.*/comments/[A-Za-z0-9]+)',url)[0]

    #use safe='' argument in quoteplus to encode only the weird chars part
    url = urllib.quote_plus(url, safe=':/?&')
    if '?' in url:
        url = url.split('?', 1)[0] + '.json?' + url.split('?', 1)[1]
    else:
        url += '.json'

    loading_indicator = progressBG(translation(30024))
    loading_indicator.update(0, 'Retrieving comments')

    content = reddit_request(url)
    if not content:
        loading_indicator.end()
        return

    loading_indicator.update(10, 'Parsing')
    content = json.loads(content)

    del harvest[:]
    #harvest links in the post text (just 1)
    r_linkHunter(content[0]['data']['children'])

    try:
        submitter = content[0]['data']['children'][0]['data']['author']
    except:
        submitter = ''

    #the post title is provided in json, we'll just use that instead of messages from addLink()
    try:
        post_title = content[0]['data']['children'][0]['data']['title']
    except:
        post_title = ''
    #for i, h in enumerate(harvest):
    #    log("aaaaa first harvest "+h[2])

    #harvest links in the post itself
    r_linkHunter(content[1]['data']['children'])

    comment_score = 0

    loading_indicator.set_tick_total(len(harvest))

    for i, h in enumerate(harvest):
        try:
            #log(str(i)+"  score:"+ str(h[0]).zfill(5)+" "+ h[1] +'|'+ h[3] )
            comment_score = h[0]
            #log("score %d < %d (%s)" %(comment_score,int_CommentTreshold, CommentTreshold) )
            link_url = h[2]
            desc100 = h[3].replace(
                '\n', ' ')[0:100]  #first 100 characters of description

            kind = h[
                6]  #reddit uses t1 for user comments and t3 for OP text of the post. like a poster describing the post.
            d = h[5]  #depth of the comment

            tab = " " * d if d > 0 else "-"

            from urlparse import urlparse
            domain = '{uri.netloc}'.format(uri=urlparse(link_url))

            author = h[7]
            DirectoryItem_url = ''

            if comment_score < int_CommentTreshold:
                continue

            #hoster, DirectoryItem_url, videoID, mode_type, thumb_url,poster_url, isFolder,setInfo_type, setProperty_IsPlayable =make_addon_url_from(h[2])
            #if link_url:
            #    log( '  comment %s TITLE:%s... link[%s]' % ( str(d).zfill(3), desc100.ljust(20)[:20],link_url ) )

            ld = parse_reddit_link(link_url=link_url,
                                   assume_is_video=False,
                                   needs_preview=True,
                                   get_playable_url=True)

            if kind == 't1':
                list_title = r"[COLOR cadetblue]%3d[/COLOR] %s" % (h[0], tab)
            elif kind == 't3':
                list_title = r"[COLOR cadetblue]Title [/COLOR] %s" % (tab)

            #helps the the textbox control treat [url description] and (url) as separate words. so that they can be separated into 2 lines
            plot = h[3].replace('](', '] (')
            plot = markdown_to_bbcode(plot)
            plot = unescape(plot)  #convert html entities e.g.:(&#39;)

            liz = xbmcgui.ListItem(label=list_title + ': ' + desc100)

            liz.setInfo(type="Video",
                        infoLabels={
                            "Title": h[1],
                            "plot": plot,
                            "studio": domain,
                            "votes": str(comment_score),
                            "director": author
                        })
            isFolder = False

            #force all links to ytdl to see if it can be played
            if link_url:
                DirectoryItem_url, setProperty_IsPlayable, isFolder, title_prefix = build_DirectoryItem_url_based_on_media_type(
                    ld, link_url)

                liz.setProperty('IsPlayable', setProperty_IsPlayable)
                liz.setProperty(
                    'url', DirectoryItem_url)  #<-- needed by the xml gui skin
                liz.setPath(DirectoryItem_url)

                if domain:
                    plot = "  [COLOR greenyellow][%s] %s" % (domain,
                                                             plot) + "[/COLOR]"
                else:
                    plot = "  [COLOR greenyellow][%s]" % (plot) + "[/COLOR]"
                liz.setLabel(list_title + plot)

                if ld:
                    liz.setArt({
                        "thumb": ld.poster,
                        "poster": ld.poster,
                        "banner": ld.poster,
                        "fanart": ld.poster,
                        "landscape": ld.poster
                    })

            if DirectoryItem_url:
                #log( 'IsPlayable:'+setProperty_IsPlayable )
                directory_items.append((
                    DirectoryItem_url,
                    liz,
                    isFolder,
                ))
                #xbmcplugin.addDirectoryItem(handle=pluginhandle,url=DirectoryItem_url,listitem=liz,isFolder=isFolder)
            else:
                #this section are for comments that have no links
                if not ShowOnlyCommentsWithlink:
                    result = h[3].replace('](', '] (')
                    result = markdown_to_bbcode(result)
                    liz = xbmcgui.ListItem(label=list_title + desc100)
                    liz.setInfo(type="Video",
                                infoLabels={
                                    "Title": h[1],
                                    "plot": result,
                                    "studio": domain,
                                    "votes": str(h[0]),
                                    "director": author
                                })
                    liz.setProperty('IsPlayable', 'false')

                    directory_items.append((
                        "",
                        liz,
                        False,
                    ))
                    #xbmcplugin.addDirectoryItem(handle=pluginhandle,url="",listitem=liz,isFolder=False)

                #END section are for comments that have no links or unsupported links
        except Exception as e:
            log('  EXCEPTION:' + str(e))

        #for di in directory_items:
        #    log( str(di) )

        loading_indicator.tick(1, desc100)
    loading_indicator.end()

    #log('  comments_view id=%s' %comments_viewMode)

    #xbmcplugin.setContent(pluginhandle, "mixed")  #in estuary, mixed have limited view id's available. it has widelist which is nice for comments but we'll just stick with 'movies'
    xbmcplugin.setContent(
        pluginhandle, "episodes"
    )  #files, songs, artists, albums, movies, tvshows, episodes, musicvideos
    xbmcplugin.setPluginCategory(pluginhandle, 'Comments')

    xbmcplugin.addDirectoryItems(handle=pluginhandle, items=directory_items)
    xbmcplugin.endOfDirectory(pluginhandle)

    if comments_viewMode:
        xbmc.executebuiltin('Container.SetViewMode(%s)' % comments_viewMode)
Exemplo n.º 20
0
def listSubReddit(url, name, subreddit_key):
    from guis import progressBG
    from utils import post_is_filtered_out, set_query_field
    from reddit import has_multiple
    global GCXM_hasmultiplesubreddit,GCXM_hasmultipledomain,GCXM_hasmultipleauthor,GCXM_subreddit_key
    log("listSubReddit subreddit=%s url=%s" %(subreddit_key,url) )

    currentUrl = url
    xbmcplugin.setContent(pluginhandle, "movies") #files, songs, artists, albums, movies, tvshows, episodes, musicvideos

    loading_indicator=progressBG('Loading...')
    loading_indicator.update(8,'Retrieving '+subreddit_key)

    content = reddit_request(url)
    loading_indicator.update(11,subreddit_key  )

    if not content:
        loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG
        return

    page_title="[COLOR cadetblue]%s[/COLOR]" %subreddit_key

    xbmcplugin.setPluginCategory(pluginhandle, page_title)

    info_label={ "plot": translation(30013) }  #Automatically play videos
    if autoplayAll:       addDir("[B]- "+translation(30016)+"[/B]", url, 'autoPlay', "", "ALL", info_label)
    if autoplayUnwatched: addDir("[B]- "+translation(30017)+"[/B]" , url, 'autoPlay', "", "UNWATCHED", info_label)

    threads = []
    q_liz = Queue()   #output queue (listitem)

    content = json.loads(content)

    posts_count=len(content['data']['children'])
    filtered_out_posts=0

    GCXM_hasmultiplesubreddit=has_multiple('subreddit', content['data']['children'])
    GCXM_hasmultipledomain=has_multiple('domain', content['data']['children'])
    GCXM_hasmultipleauthor=has_multiple('author', content['data']['children'])
    GCXM_subreddit_key=subreddit_key
    for idx, entry in enumerate(content['data']['children']):
        try:
            if post_is_filtered_out( entry.get('data') ):
                filtered_out_posts+=1
                continue

            t = threading.Thread(target=reddit_post_worker, args=(idx, entry,q_liz), name='#t%.2d'%idx)
            threads.append(t)
            t.start()

        except Exception as e:
            log(" EXCEPTION:="+ str( sys.exc_info()[0]) + "  " + str(e) )

    break_counter=0 #to avoid infinite loop
    expected_listitems=(posts_count-filtered_out_posts)
    if expected_listitems>0:
        loading_indicator.set_tick_total(expected_listitems)
        last_queue_size=0
        while q_liz.qsize() < expected_listitems:
            if break_counter>=100:
                break

            if last_queue_size < q_liz.qsize():
                items_added=q_liz.qsize()-last_queue_size
                loading_indicator.tick(items_added)
            else:
                break_counter+=1

            last_queue_size=q_liz.qsize()
            xbmc.sleep(50)

    for idx, t in enumerate(threads):

        t.join(timeout=20)

    xbmc_busy(False)

    if q_liz.qsize() != expected_listitems:
        log('some threads did not return a listitem')

    li=[ liz for idx,liz in sorted(q_liz.queue) ]  #list of (url, listitem[, isFolder]) as a tuple

    with q_liz.mutex:
        q_liz.queue.clear()

    xbmcplugin.addDirectoryItems(pluginhandle, li)

    loading_indicator.end() #it is important to close xbmcgui.DialogProgressBG

    try:

        after=content['data']['after']

        o = urlparse.urlparse(currentUrl)
        current_url_query = urlparse.parse_qs(o.query)

        nextUrl=set_query_field(currentUrl, field='after', value=after, replace=True)  #(url, field, value, replace=False):


        count=current_url_query.get('count')

        if current_url_query.get('count')==None:

            count=itemsPerPage
        else:

            try: count=int(current_url_query.get('count')[0]) + int(itemsPerPage)
            except ValueError: count=itemsPerPage

        nextUrl=set_query_field(nextUrl,'count', count, True)

        info_label={ "plot": translation(30004) + '[CR]' + page_title}
        addDir(translation(30004), nextUrl, 'listSubReddit', "", subreddit_key,info_label)   #Next Page
    except Exception as e:
        log('    Exception: '+ str(e))

    subreddit_key=subreddit_key.replace(' ','+')
    viewID=WINDOW.getProperty( "viewid-"+subreddit_key )


    if viewID:
        log("  custom viewid %s for %s " %(viewID,subreddit_key) )
        xbmc.executebuiltin('Container.SetViewMode(%s)' %viewID )
    else:
        if forceViewMode:
            xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')

    xbmcplugin.endOfDirectory(handle=pluginhandle,
                              succeeded=True,
                              updateListing=False,   #setting this to True causes the ".." entry to quit the plugin
                              cacheToDisc=True)