def grab(url, prettyname, id, cachePath, site9gagfile, cacheE): jsondata = [] list = [] line = basic.readoneline(site9gagfile) idpage = re.findall('::' + id + '::::(.+?)::', line, re.DOTALL) if not idpage: page = basic.open_url('http://9gag.tv') else: page = basic.open_url(url + idpage[0], '9gag') jsondata = re.findall(' postGridPrefetchPosts = (.+?)];', page, re.DOTALL) j = json.loads(jsondata[0] + ']') size = len(j) e = 0 for data in j: e = e + 1 if e == size: line = basic.readoneline(site9gagfile) if not '<' + id + '>' in line: basic.writefile( site9gagfile, "a", '::' + str(int(id) + 1) + '::::' + data['prevPostId'] + '::') try: duration = 0 time = re.findall('PT(\d+)M(\d+)S', data['videoDuration'], re.DOTALL) if time: for min, sec in time: duration = int(min) * 60 + int(sec) else: time = re.findall('PT(\d+)M', data['videoDuration'], re.DOTALL) if time: duration = int(time[0]) * 60 else: time = re.findall('PT(\d+)S', data['videoDuration'], re.DOTALL) if time: duration = time[0] except: duration = 60 pass title = basic.cleanTitle(data['ogTitle']) videocache = os.path.join(cachePath, data['videoExternalId']) jsontext = '{"prettyname":"' + prettyname + '","url":"plugin://plugin.video.youtube/?action=play_video&videoid=' + data[ 'videoExternalId'] + '","title":"' + title.encode( 'ascii', 'xmlcharrefreplace' ) + '","duration":"' + str( duration) + '","thumbnail":"' + data['thumbnail_360w'] + '"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if cacheE == 'true' and not os.path.isfile(videocache): basic.writefile(videocache, 'w', jsontext.encode('utf8')) list.append(jsonloaded) return list
def youtube_resolver(url,prettyname,cachePath): match = re.compile('.*?youtube.com/embed/(.+?)\?').findall(url) if not match: match = re.compile('.*?youtube.com/embed/(.*)').findall(url) if match: videocache = os.path.join(cachePath,str(match[0])) if getSetting("cachesite") == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") return jsonline,jsonloaded else: #try: data=basic.open_url('https://www.googleapis.com/youtube/v3/videos?id=' + str(match[0]) +'&key=AIzaSyCeh7CwOCb-wJQoPDgDX1faEiXntqYfIIA&part=snippet,contentDetails') title = '' duration = '' thumbnail = '' title = re.compile('"title": "(.+?)",').findall(data) title = title[0] title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: title2 = title.encode('ascii','xmlcharrefreplace') if title2 <> '': title = title2 dur = re.compile('"duration": "PT(.+?)M(.+?)S"').findall(data) if dur: duration = float(dur[0][0])*60+float(dur[0][1]) else: dur = re.compile('"duration": "PT(.+?)S"').findall(data) if dur: duration = dur[0][0] else: dur = re.compile('"duration": "PT(.+?)M"').findall(data) if dur: duration = float(dur[0][0])*60 thumbnail = re.compile('"high": {\s+"url": "(.+?)",').findall(data) thumbnail = thumbnail[0] jsontext= '{"prettyname":"'+prettyname+'","url":"plugin://plugin.video.youtube/play/?video_id=' + str(match[0])+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumbnail+'"}' jsonloaded = json.loads('{"prettyname":"'+prettyname+'","url":"plugin://plugin.video.youtube/play/?video_id=' + str(match[0])+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumbnail+'"}', encoding="latin-1") if getSetting("cachesite") == 'true': basic.writefile(videocache,'w',jsontext) return jsontext,jsonloaded
def listmovies(url): basic.log(u"rotten.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 jsonpage = basic.open_url(url) print 'jsonpage %s' % jsonpage j = json.loads(jsonpage) for list in j['movies']: order += 1 try: sendlist.append([order, 'tt' + list['alternate_ids']['imdb']]) except: pass chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append( threading.Thread(name='listmovies' + str(i), target=tmdb.searchmovielist, args=( chunks[i], result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id, lists in result: mainlist.append(lists) basic.log(u"rotten.listmovies mainlist: %s" % mainlist) return mainlist
def vimeo_resolver(url,prettyname,cachePath): if url.find('?') > -1: match = re.compile('vimeo.com/video/(.+?)\?').findall(url) else: match = re.compile('vimeo.com/video/(.*)').findall(url) if match: videocache = os.path.join(cachePath,str(match[0])) if getSetting("cachesites") == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") return jsonline,jsonloaded else: try: data=basic.open_url('http://player.vimeo.com/video/'+str(match[0])+'/config?type=moogaloop&referrer=&player_url=player.vimeo.com&v=1.0.0&cdn_url=http://a.vimeocdn.com') parameters = json.loads(data) title = '' duration = '' thumbnail = '' title = parameters['video']['title'] title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 duration = parameters['video']['duration'] thumbnail = parameters['video']['thumbs']['640'] try: url = parameters['request']['files']['h264']['hd']['url'] except: url = parameters['request']['files']['h264']['sd']['url'] jsontext = '{"prettyname":"'+prettyname+'","url":"' + url +'","title":"'+title.encode('ascii','xmlcharrefreplace')+'","duration":"'+str(duration)+'","thumbnail":"'+thumbnail+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if getSetting("cachesites") == 'true': basic.writefile(videocache,'w',jsontext.encode('utf8')) return jsontext,jsonloaded except BaseException as e: print '##ERROR-funvideos:vimeo_resolver: '+str(match[0])+' '+str(e) pass
def grablinks(mainURL, prettyname, sectionstart, sectionend, cachePath, mainsite=None): list = [] html_source_trunk = [] page = basic.open_url(mainURL) try: html_source_trunk = re.findall(sectionstart + '(.*?)' + sectionend, page, re.DOTALL) except: pass threads = [] results = [] for i in range(0, len(html_source_trunk)): print "##funvideos-grablinks: " + html_source_trunk[i] if mainsite: pageURL = html_source_trunk[i].replace(mainsite, '').replace( '/', '').replace('.', '').encode('utf-8') threads.append( threading.Thread(name=mainURL + str(i), target=grabiframes, args=( html_source_trunk[i], prettyname, cachePath, results, i + 1, pageURL, ))) [i.start() for i in threads] [i.join() for i in threads] return results
def listmovies(url): basic.log(u"imdb.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 htmlpage = basic.open_url(url) found = re.findall('data-tconst="(.+?)"', htmlpage, re.DOTALL) for imdb_id in sorted(set(found), key=lambda x: found.index(x)): order += 1 sendlist.append([order, imdb_id]) #with open('/root/.kodi/temp/files.py', 'wb') as f: f.write(repr(sorted(set(found), key=lambda x: found.index(x)))) chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append( threading.Thread(name='listmovies' + str(i), target=tmdb.searchmovielist, args=( chunks[i], result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id, lists in result: mainlist.append(lists) basic.log(u"imdb.listmovies mainlist: %s" % mainlist) return mainlist
def daily_resolver(url,prettyname,cachePath): if url.find('?') > -1: match = re.compile('/embed/video/(.+?)\?').findall(url) else: match = re.compile('/embed/video/(.*)').findall(url) if match: videocache = os.path.join(cachePath,str(match[0])) if getSetting("cachesites") == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") return jsonline,jsonloaded else: try: data=basic.open_url('https://api.dailymotion.com/video/' + str(match[0]) +'?fields=title,duration,thumbnail_url,description') parameters = json.loads(data) title = '' duration = '' thumbnail = '' title = basic.cleanTitle(parameters['title']) title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 duration = parameters['duration'] thumbnail = parameters['thumbnail_url'] jsontext = '{"prettyname":"'+prettyname+'","url":"plugin://plugin.video.dailymotion_com/?mode=playVideo&url=' + str(match[0])+'","title":"'+title.encode('ascii','xmlcharrefreplace')+'","duration":"'+str(duration)+'","thumbnail":"'+thumbnail+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if getSetting("cachesites") == 'true': basic.writefile(videocache,'w',jsontext.encode('utf8')) return jsontext,jsonloaded except BaseException as e: print '##ERROR-funvideos:daily_resolver: '+str(match[0])+' '+str(e) pass
def grab(url,prettyname,cachePath,cacheE): list = [] try: content = basic.open_url(url) spl = content.split('<div class="videoListItem">') for i in range(1, len(spl), 1): entry = spl[i] match = re.compile('data-youtubeid="(.+?)"', re.DOTALL).findall(entry) id = match[0] match = re.compile('<div class="duration">(.+?)</div>', re.DOTALL).findall(entry) duration = match[0].strip() splDuration = duration.split(":") duration = str(int(splDuration[0])*60+int(splDuration[1])) thumb = "http://img.youtube.com/vi/"+id+"/0.jpg" match = re.compile('alt="(.+?)"', re.DOTALL).findall(entry) title = match[0] title = basic.cleanTitle(title) videocache = os.path.join(cachePath,str(id)) title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 jsontext = '{"prettyname":"'+prettyname+'","url":"plugin://plugin.video.youtube/?action=play_video&videoid=' + str(id)+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumb+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if cacheE == 'true' and not os.path.isfile(videocache): basic.writefile(videocache,'w',jsontext.encode('utf8')) list.append(jsonloaded) if list: return list except BaseException as e: print '##ERROR-funvideos:VitaminL_resolver: '+url+' '+str(e) pass
def grab(url, prettyname, cachePath, cacheE): list = [] try: page = basic.open_url(url) j = json.loads(page) for vid in j['videos']['video']: ids = vid['id'] videocache = os.path.join(cachePath, str(ids)) if cacheE == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") else: title = basic.cleanTitle(vid['title']) title2 = '' try: title2 = title.decode('utf8').encode( 'ascii', 'xmlcharrefreplace') except: pass if title2 <> '': title = title2 decomp = re.compile('(\d+):(\d+)', re.DOTALL).findall(vid['length']) duration = int(decomp[0][0]) * 60 + int(decomp[0][1]) thumb = 'http://videos.snotr.com/' + str(ids) + '-large.jpg' finalUrl = 'http://videos.snotr.com/' + str(ids) + '.mp4' jsontext = '{"prettyname":"' + prettyname + '","url":"' + finalUrl + '","title":"' + title + '","duration":"' + str( duration) + '","thumbnail":"' + thumb + '"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if cacheE == 'true' and not os.path.isfile(videocache): basic.writefile(videocache, 'w', jsontext.encode('utf8')) list.append(jsonloaded) return list except BaseException as e: print '##ERROR-funvideos:Snotr_resolver: ' + url + ' ' + str(e)
def grab(url,prettyname,cachePath,cacheE): list = [] try: page = basic.open_url(url) j = json.loads(page) for vid in j['videos']['video']: ids = vid['id'] videocache = os.path.join(cachePath,str(ids)) if cacheE == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") else: title=basic.cleanTitle(vid['title']) title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 decomp = re.compile('(\d+):(\d+)', re.DOTALL).findall(vid['length']) duration = int(decomp[0][0])*60+int(decomp[0][1]) thumb = 'http://videos.snotr.com/'+str(ids)+'-large.jpg' finalUrl='http://videos.snotr.com/'+str(ids)+'.mp4' jsontext = '{"prettyname":"'+prettyname+'","url":"'+finalUrl+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumb+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if cacheE == 'true' and not os.path.isfile(videocache): basic.writefile(videocache,'w',jsontext.encode('utf8')) list.append(jsonloaded) return list except BaseException as e: print '##ERROR-funvideos:Snotr_resolver: '+url+' '+str(e)
def getgenre(url): genrechoice = xbmcgui.Dialog().select htmlpage = basic.open_url(url) found = re.findall('<h3>Top Movies by Genre</h3>.+?</html>',htmlpage, re.DOTALL) newfound = re.findall('<a href="/genre/(.+?)\?',found[0], re.DOTALL) choose=genrechoice(language(30021).encode('utf-8'),newfound) if choose > -1: return newfound[choose]
def sapo_resolver(url,prettyname,cachePath): match = re.compile('file=http://.+?/(.+?)/mov/').findall(url) if match: videocache = os.path.join(cachePath,str(match[0])) if os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") return jsonline,jsonloaded else: try: sapoAPI = basic.open_url('http://rd3.videos.sapo.pt/'+match[0]+'/rss2') title = '' duration = '' thumbnail = '' urlfinal = '' duration = re.compile('<sapo:time>(\d+):(\d+):(\d+)</sapo:time').findall(sapoAPI) for horas,minutos,segundos in duration: duration = (int(segundos))+(int(minutos)*60)+(int(horas)*3600) thumbnail = re.compile('img src="(.+?)"').findall(sapoAPI) title = re.compile('<title>(.+?)</title>').findall(sapoAPI) title2 = '' title = title[1] try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 urlfinal = re.compile('<sapo:videoFile>(.+?)</sapo:videoFile>').findall(sapoAPI) jsontext = '{"prettyname":"'+prettyname+'","url":"'+urlfinal[0]+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumbnail[0]+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if getSetting("cachesites") == 'true': basic.writefile(videocache,'w',jsontext.encode('utf8')) return jsontext,jsonloaded except BaseException as e: print '##ERROR-funvideos:sapo_resolver: '+url+' '+str(e) pass
def youtube_resolver(url,prettyname,cachePath): match = re.compile('.*?youtube.com/embed/(.+?)\?').findall(url) if not match: match = re.compile('.*?youtube.com/embed/(.*)').findall(url) if match: videocache = os.path.join(cachePath,str(match[0])) if getSetting("cachesites") == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") return jsonline,jsonloaded else: try: data=basic.open_url('https://gdata.youtube.com/feeds/api/videos/' + str(match[0]) +'?v2&alt=json') parameters = json.loads(data) title = '' duration = '' thumbnail = '' title = basic.cleanTitle(parameters['entry']['title']['$t']) title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: title2 = title.encode('ascii','xmlcharrefreplace') if title2 <> '': title = title2 print title duration = parameters['entry']['media$group']['yt$duration']['seconds'] thumbnail = parameters['entry']['media$group']['media$thumbnail'][0]['url'] jsontext= '{"prettyname":"'+prettyname+'","url":"plugin://plugin.video.youtube/?action=play_video&videoid=' + str(match[0])+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumbnail+'"}' jsonloaded = json.loads('{"prettyname":"'+prettyname+'","url":"plugin://plugin.video.youtube/?action=play_video&videoid=' + str(match[0])+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumbnail+'"}', encoding="latin-1") if getSetting("cachesites") == 'true': basic.writefile(videocache,'w',jsontext) return jsontext,jsonloaded except BaseException as e: print '##ERROR-funvideos:youtube_resolver: '+str(match[0])+' '+str(e) pass
def listmovies(url): basic.log(u"omdbapi.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 jsonpage = basic.open_url(url) j = json.loads(jsonpage) for list in j['results']: order += 1 sendlist.append([order, list['id']]) chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append( threading.Thread(name='listmovies' + str(i), target=searchmovielist, args=( chunks[i], result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id, lists in result: mainlist.append(lists) basic.log(u"omdbapi.listmovies mainlist: %s" % mainlist) return mainlist
def grab(url,prettyname,cachePath,cacheE): list = [] try: page = basic.open_url(url) page = page.replace("\\","") ids = re.findall('data-content-id="(\d+)"', page, re.DOTALL) for videoid in ids: videocache = os.path.join(cachePath,str(videoid)) if cacheE == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") else: content = basic.open_url("http://www.break.com/embed/"+videoid) matchAuth=re.compile('"AuthToken": "(.+?)"', re.DOTALL).findall(content) matchURL=re.compile('"uri": "(.+?)".+?"height": (.+?),', re.DOTALL).findall(content) matchYT=re.compile('"youtubeId": "(.*?)"', re.DOTALL).findall(content) title=re.compile('"contentName": "(.+?)",', re.DOTALL).findall(content) title = basic.cleanTitle(title[0]) title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 duration=re.compile('"videoLengthInSeconds": "(\d+)",', re.DOTALL).findall(content) thumb = re.compile('"thumbUri": "(.+?)",', re.DOTALL).findall(content) finalUrl="" if matchYT and matchYT[0]!="": finalUrl = "plugin://plugin.video.youtube/play/?video_id=" + matchYT[0] videocache2 = os.path.join(cachePath,str(matchYT[0])) if cacheE == 'true' and not os.path.isfile(videocache): jsontext = '{"prettyname":"'+prettyname+'","url":"'+finalUrl+'","title":"'+title+'","duration":"'+str(duration[0])+'","thumbnail":"'+thumb[0]+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") basic.writefile(videocache2,'w',jsontext.encode('utf8')) else: max=0 for url, height in matchURL: height=int(height) if height>max: finalUrl=url.replace(".wmv",".flv")+"?"+matchAuth[0] max=height jsontext = '{"prettyname":"'+prettyname+'","url":"'+finalUrl+'","title":"'+title+'","duration":"'+str(duration[0])+'","thumbnail":"'+thumb[0]+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if cacheE == 'true' and not os.path.isfile(videocache): basic.writefile(videocache,'w',jsontext.encode('utf8')) list.append(jsonloaded) return list except BaseException as e: print '##ERROR-funvideos:Break_resolver: '+url+' '+str(e)
def getgenre(url): genrechoice = xbmcgui.Dialog().select htmlpage = basic.open_url(url) found = re.findall('<h3>Popular Movies by Genre</h3>.+?</html>', htmlpage, re.DOTALL) newfound = re.findall('<a href="/genre/(.+?)\?', found[0], re.DOTALL) choose = genrechoice("select", newfound) if choose > -1: return newfound[choose]
def grab(url,prettyname,cachePath,cacheE): list = [] try: page = basic.open_url(url) page = page.replace("\\","") ids = re.findall('data-content-id="(\d+)"', page, re.DOTALL) for videoid in ids: videocache = os.path.join(cachePath,str(videoid)) if cacheE == 'true' and os.path.isfile(videocache): jsonline = basic.readfiletoJSON(videocache) jsonloaded = json.loads(jsonline, encoding="utf-8") else: content = basic.open_url("http://www.break.com/embed/"+videoid) matchAuth=re.compile('"AuthToken": "(.+?)"', re.DOTALL).findall(content) matchURL=re.compile('"uri": "(.+?)".+?"height": (.+?),', re.DOTALL).findall(content) matchYT=re.compile('"youtubeId": "(.*?)"', re.DOTALL).findall(content) title=re.compile('"contentName": "(.+?)",', re.DOTALL).findall(content) title = basic.cleanTitle(title[0]) title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 duration=re.compile('"videoLengthInSeconds": "(\d+)",', re.DOTALL).findall(content) thumb = re.compile('"thumbUri": "(.+?)",', re.DOTALL).findall(content) finalUrl="" if matchYT and matchYT[0]!="": finalUrl = "plugin://plugin.video.youtube/?action=play_video&videoid=" + matchYT[0] videocache2 = os.path.join(cachePath,str(matchYT[0])) if cacheE == 'true' and not os.path.isfile(videocache): jsontext = '{"prettyname":"'+prettyname+'","url":"'+finalUrl+'","title":"'+title+'","duration":"'+str(duration[0])+'","thumbnail":"'+thumb[0]+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") basic.writefile(videocache2,'w',jsontext.encode('utf8')) else: max=0 for url, height in matchURL: height=int(height) if height>max: finalUrl=url.replace(".wmv",".flv")+"?"+matchAuth[0] max=height jsontext = '{"prettyname":"'+prettyname+'","url":"'+finalUrl+'","title":"'+title+'","duration":"'+str(duration[0])+'","thumbnail":"'+thumb[0]+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if cacheE == 'true' and not os.path.isfile(videocache): basic.writefile(videocache,'w',jsontext.encode('utf8')) list.append(jsonloaded) return list except BaseException as e: print '##ERROR-funvideos:Break_resolver: '+url+' '+str(e)
def listmovies(url, tip): basic.log(u"cnmg.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 if tip == 'liste': htmlpage = basic.open_url(url) regex = '''<li class="list_item clearfix">(.+?)</li>''' regex2 = '''<a [^>]*href\s*=\s*"[^"]*imdb.com/title/(.*?)/"''' for lists in re.compile(regex, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(htmlpage): for imdb_id in re.compile(regex2, re.DOTALL).findall(lists): order += 1 sendlist.append([order, imdb_id]) target = tmdb.searchmovielist elif tip == 'filme': headers = { 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Referer': url, 'Cookie': 'ps=30' } htmlpage = basic.open_url_headers(url, headers) regex = '''<div class="poza">(.+?)</div>\n</li>''' regex2 = '''img src="(.+?)".+?<h2>.+?title.+?>(.+?)<.+?\((\d+)\).*(?:^$|<li>(.+?)</li>).*(?:^$|<li>(.+?)</li>).+?Gen.+?">(.+?)</ul>.+?(?:^$|\((.+?)\)).+?body".+?(?:^$|href="(.+?)".+?)(?:^$|<span>(.+?)</span>)''' for lists in re.compile(regex, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(htmlpage): for imagine, nume, an, regia, actori, gen, nota, trailer, descriere in re.compile( regex2, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(lists): order += 1 nume = nume.decode('utf-8') sendlist.append([ order, imagine, nume, an, regia, actori, gen, nota, trailer, descriere ]) target = omdbapi.searchmovielist chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append( threading.Thread(name='listmovies' + str(i), target=target, args=( chunks[i], result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id, lists in result: mainlist.append(lists) basic.log(u"imdb.listmovies mainlist: %s" % mainlist) return mainlist
def grab(url,prettyname,cachePath,cacheE): list = [] try: page = basic.open_url(url) links = re.compile('<article class="video-preview" data-viewkey=".+?"><a title=".+?" href="(.+?)">', re.DOTALL).findall(page) for link in links: spage = basic.open_url('http://www.funnyordie.com'+link) title = basic.cleanTitle(re.compile('<meta property="og:title" content="(.+?)">', re.DOTALL).findall(spage)[0]) thumb = re.compile('<meta property="og:image" content="(.+?)">', re.DOTALL).findall(spage)[0] title2 = '' try: title2 = title.decode('utf8').encode('ascii','xmlcharrefreplace') except: pass if title2 <> '': title = title2 duration = re.compile('<meta property="video:duration" content="(\d+)"/>', re.DOTALL).findall(spage)[0] finalUrl= 'http://'+re.compile('<source src="//(.+?)" type=\'video/mp4\'>', re.DOTALL).findall(spage)[0] jsontext = '{"prettyname":"'+prettyname+'","url":"'+finalUrl+'","title":"'+title+'","duration":"'+str(duration)+'","thumbnail":"'+thumb+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") list.append(jsonloaded) return list except BaseException as e: print '##ERROR-funvideos:funnyordie_resolver: '+url+' '+str(e)
def grab(url,prettyname,id,cachePath,site9gagfile,cacheE): jsondata = [] list = [] line = basic.readoneline(site9gagfile) idpage = re.findall('::'+id+'::::(.+?)::', line, re.DOTALL) if not idpage: page = basic.open_url('http://9gag.tv') else: page = basic.open_url(url+idpage[0],'9gag') jsondata = re.findall(' postGridPrefetchPosts = (.+?)];', page, re.DOTALL) j = json.loads(jsondata[0]+']') size = len(j) e=0 for data in j: e = e + 1 if e == size: line = basic.readoneline(site9gagfile) if not '<'+id+'>' in line: basic.writefile(site9gagfile,"a",'::'+str(int(id)+1)+'::::'+data['prevPostId']+'::') try: duration = 0 time = re.findall('PT(\d+)M(\d+)S', data['videoDuration'], re.DOTALL) if time: for min,sec in time: duration = int(min)*60+int(sec) else: time = re.findall('PT(\d+)M', data['videoDuration'], re.DOTALL) if time: duration = int(time[0])*60 else: time = re.findall('PT(\d+)S', data['videoDuration'], re.DOTALL) if time: duration = time[0] except: duration = 60 pass title = basic.cleanTitle(data['ogTitle']) videocache = os.path.join(cachePath,data['videoExternalId']) jsontext = '{"prettyname":"'+prettyname+'","url":"plugin://plugin.video.youtube/?action=play_video&videoid=' +data['videoExternalId']+'","title":"'+title.encode('ascii','xmlcharrefreplace')+'","duration":"'+str(duration)+'","thumbnail":"'+data['thumbnail_360w']+'"}' jsonloaded = json.loads(jsontext, encoding="utf-8") if cacheE == 'true' and not os.path.isfile(videocache): basic.writefile(videocache,'w',jsontext.encode('utf8')) list.append(jsonloaded) return list
def gettari(url, tip=''): htmlpage = basic.open_url(url) tarisoara = [] order = 0 regex = '''class="filters_list">(.+?)</div''' regex2 = '''<a href="(.+?)".+?>(.+?)<''' tari = re.compile(regex, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(htmlpage) if tip == 'tari': search = tari[2] elif tip == 'gen': search = tari[0] for link, nume in re.compile(regex2, re.DOTALL).findall(search): order += 1 nume = nume.decode('utf-8') tarisoara.append([order, link, nume]) return tarisoara
def grablinks(mainURL,prettyname,sectionstart,sectionend,cachePath,mainsite=None): list = [] html_source_trunk = [] page = basic.open_url(mainURL) try: html_source_trunk = re.findall(sectionstart+'(.*?)'+sectionend, page, re.DOTALL) except: pass threads = [] results = [] for i in range(0, len(html_source_trunk)): print "##funvideos-grablinks: "+html_source_trunk[i] if mainsite: pageURL=html_source_trunk[i].replace(mainsite,'').replace('/','').replace('.','').encode('utf-8') threads.append(threading.Thread(name=mainURL+str(i),target=grabiframes,args=(html_source_trunk[i],prettyname,cachePath,results,i+1,pageURL, ))) [i.start() for i in threads] [i.join() for i in threads] return results
def listmovies(url): basic.log(u"imdb.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 htmlpage = basic.open_url(url) found = re.findall('data-tconst="(.+?)"',htmlpage, re.DOTALL) for imdb_id in found: order += 1 sendlist.append([order,imdb_id]) chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=tmdb.searchmovielist,args=(chunks[i],result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id,lists in result: mainlist.append(lists) basic.log(u"imdb.listmovies mainlist: %s" % mainlist) return mainlist
def listmovies(url): basic.log(u"omdbapi.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 jsonpage = basic.open_url(url) j = json.loads(jsonpage) for list in j['results']: order += 1 sendlist.append([order,list['id']]) chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=searchmovielist,args=(chunks[i],result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id,lists in result: mainlist.append(lists) basic.log(u"omdbapi.listmovies mainlist: %s" % mainlist) return mainlist
def getliste(url): htmlpage = basic.open_url(url) liste = [] order = 0 regex = '''<div class="list_preview clearfix">(.+?)<div class="list_meta">(.+?)</div>''' regex2 = '''img src="(.+?)".+?"up">(.+?)<.+?"down">(.+?)<.+?list_name.+?<a href="(.+?)">(.+?)</a>''' #with open('/root/.kodi/temp/files.py', 'wb') as f: f.write(repr(htmlpage)) for lists in re.compile(regex, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(htmlpage): for imagine, aprecieri, deprecieri, link, nume in re.compile( regex2, re.DOTALL).findall(lists[0]): order += 1 nume = nume.decode('utf-8').strip() nume += ' (%s filme) ' % (re.findall('cu.+?(\d+)', lists[1])[0]) info = (striphtml(lists[1]).strip()).decode('utf-8') info += ' | Cu %s aprecieri si %s deprecieri' % (aprecieri, deprecieri) descriere = {'plot': info, 'title': nume} liste.append([order, imagine, link, nume, descriere]) return liste
def getlinks(url,results,order,Source=None): basic.log(u"imdb.getlinks url: %s" % url) try: html_page = basic.open_url(url) if html_page: soup = BeautifulSoup(html_page) if Source == 'IMDB': for link in soup.findAll('a', attrs={'href': re.compile("^/title/.+?/\?ref_=.+?_ov_tt")}): if '?' in link.get('href'): cleanlink = link.get('href').split("?")[0].split("title")[1].replace('/','').replace('awards','').replace('videogallery','') else: cleanlink = link.get('href').split("title")[1].replace('/','').replace('awards','').replace('videogallery','') results.append([order, cleanlink]) order += 1 else: for link in soup.findAll('a', attrs={'href': re.compile("^http://.+?/title/")}): if '?' in link.get('href'): cleanlink = link.get('href').split("?")[0].split("/title/")[1].replace('/','').replace('awards','').replace('videogallery','') else: cleanlink = link.get('href').split("title")[1].replace('/','').replace('awards','').replace('videogallery','') results.append([order, cleanlink]) order += 1 basic.log(u"imdb.getlinks results: %s" % results) return results except BaseException as e: basic.log(u"imdb.getlinks ERROR: %s - %s" % (str(url),str(e)))
def listmovies(url): basic.log(u"rotten.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 jsonpage = basic.open_url(url) print 'jsonpage %s' % jsonpage j = json.loads(jsonpage) for list in j['movies']: order += 1 try: sendlist.append([order,'tt'+list['alternate_ids']['imdb']]) except: pass chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=tmdb.searchmovielist,args=(chunks[i],result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id,lists in result: mainlist.append(lists) basic.log(u"rotten.listmovies mainlist: %s" % mainlist) return mainlist
def grabiframes(mainURL,prettyname,cachePath,results=None,index=None,pageURL=None): list = [] if pageURL: pagecache = os.path.join(cachePath,pageURL) if pageURL and getSetting("cachesites") == 'true' and os.path.isfile(pagecache): jsonline = basic.readfiletoJSON(pagecache) jsonloaded = json.loads(jsonline, encoding="utf-8") if index: results.append(jsonloaded) else: list.append(jsonloaded) else: try: page = basic.open_url(mainURL) except: page = ' ' pass blocker = re.findall('data-videoid="(.+?)"', page, re.DOTALL) if blocker: fakeframe = [] for videoid in blocker: fakeframe.append('<iframe src="http//www.youtube.com/embed/'+videoid+'"</iframe>') html = fakeframe else: html = re.findall('<iframe(.*?)</iframe>', page, re.DOTALL) for trunk in html: try: iframe = re.compile('src="(.+?)"').findall(trunk)[0] except: try: iframe = re.compile("src='(.+?)'").findall(trunk)[0] except: try:iframe = re.compile('data-src="(.+?)"').findall(trunk)[0] except: iframe = '' if iframe: if iframe.find('ad120m.com') > -1 or iframe.find('facebook') > -1 or iframe.find('metaffiliation') > -1 or iframe.find('banner600') > -1 or iframe.find('engine.adbooth.com') > -1 or iframe.find('www.lolx2.com') > -1 or iframe.find('jetpack.wordpress.com') > -1: pass else: print "##filmes-ondemand: "+iframe try: if iframe.find('youtube') > -1: textR,resolver_iframe = youtube_resolver(iframe.replace('-nocookie',''),prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('dailymotion') > -1: textR,resolver_iframe = daily_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('vimeo') > -1: textR,resolver_iframe = vimeo_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('sapo') > -1: textR,resolver_iframe = sapo_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('videolog') > -1: textR,resolver_iframe = videolog_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) except BaseException as e: print '##ERROR-##filmes-ondemand: '+iframe+' '+str(e) else: print '##ERROR-filmes:frame on server not supported: '+iframe if not index: return list
def searchmovie(id): basic.log(u"tmdb.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = "" title = "" plot = "" tagline = "" director = "" writer = "" credits = "" poster = "" fanart = "" temptitle = "" originaltitle = "" if getSetting("cachesites") == "true": cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]), } return response jsonpage = basic.open_url(links.link().tmdb_info_default % (id)) try: jdef = json.loads(jsonpage) except: if "tt" in str(id): try: jdef = omdbapi.searchmovie(str(id)) return jdef except: return False else: return False if LANG <> "en": try: jsonpage = basic.open_url(links.link().tmdb_info % (id, LANG)) j = json.loads(jsonpage) temptitle = j["title"].encode("ascii", "ignore").replace(" ", "") if temptitle <> "": title = j["title"] fanart = links.link().tmdb_backdropbase % (j["backdrop_path"]) poster = links.link().tmdb_posterbase % (j["poster_path"]) for g in j["genres"]: listgenre.append(g["name"]) genre = ", ".join(listgenre) try: plot = j["overview"] except: pass try: tagline = j["tagline"] except: pass fanart = j["backdrop_path"] poster = j["poster_path"] except: pass temptitle = jdef["title"].encode("ascii", "ignore").replace(" ", "") if temptitle <> "": if not title: title = jdef["title"] temporiginaltitle = jdef["original_title"].encode("ascii", "ignore") if temptitle == "": originaltitle = jdef["title"] if temporiginaltitle == "": originaltitle = jdef["title"] else: originaltitle = jdef["original_title"] if not poster: poster = jdef["poster_path"] if not fanart: fanart = jdef["backdrop_path"] if not fanart: fanart = poster if fanart: fanart = links.link().tmdb_backdropbase % (fanart) if poster: poster = links.link().tmdb_posterbase % (poster) if genre == "": for g in jdef["genres"]: listgenre.append(g["name"]) genre = ", ".join(listgenre) if not plot: plot = jdef["overview"] if not tagline: tagline = jdef["tagline"] try: trailer = links.link().youtube_plugin % (jdef["trailers"]["youtube"][0]["source"]) except: trailer = "" try: year = jdef["release_date"].split("-")[0] except: year = "" try: studio = jdef["production_companies"][0]["name"] except: studio = "" for listc in jdef["credits"]["cast"]: listcastr.append(listc["name"] + "|" + listc["character"]) listcast.append(listc["name"]) for crew in jdef["credits"]["crew"]: if crew["job"] == "Director": director = crew["name"] break for crew in jdef["credits"]["crew"]: if crew["job"] == "Story": credits = crew["name"] break for crew in jdef["credits"]["crew"]: if crew["job"] == "Writer": writer = crew["name"] break if crew["job"] == "Novel": writer = crew["name"] break if crew["job"] == "Screenplay": writer = crew["name"] break duration = jdef["runtime"] if not poster or duration == 0 and jdef["imdb_id"]: altsearch = omdbapi.searchmovie(jdef["imdb_id"], False) if not poster: poster = altsearch["poster"] if not fanart: fanart = poster if not plot: plot = altsearch["info"]["plot"] if not tagline: tagline = altsearch["info"]["plot"] if not listcast: listcast = altsearch["info"]["cast"] listcastr = [] if not duration: duration = altsearch["info"]["duration"] if not writer: writer = altsearch["info"]["writer"] if not director: director = altsearch["info"]["director"] if not genre: genre = altsearch["info"]["genre"] info = { "genre": genre, "year": year, "rating": jdef["vote_average"], "cast": listcast, "castandrole": listcastr, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": originaltitle, "duration": duration, "studio": studio, "tagline": tagline, "writer": writer, "premiered": jdef["release_date"], "code": jdef["imdb_id"], "credits": credits, "votes": jdef["vote_count"], "trailer": trailer, } response = { "label": "%s (%s)" % (title, year), "originallabel": "%s (%s)" % (originaltitle, year), "poster": poster, "fanart_image": fanart, "imdbid": jdef["imdb_id"], "year": year, "info": info, } if getSetting("cachesites") == "true": if not str(id).startswith("tt"): tmdbid = id else: tmdbid = jdef["id"] localdb.save_cache( jdef["imdb_id"], tmdbid, "%s (%s)" % (title, year), "%s (%s)" % (originaltitle, year), poster, fanart, year, json.dumps(info), ) return response
def searchmovie(id): basic.log(u"tmdb.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' temptitle = '' originaltitle = '' if getSetting("cachesites") == 'true': cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response jsonpage = basic.open_url(links.link().tmdb_info_default % (id)) try: jdef = json.loads(jsonpage) except: if 'tt' in str(id): try: jdef = omdbapi.searchmovie(str(id)) return jdef except: return False else: return False if LANG <> 'en': try: jsonpage = basic.open_url(links.link().tmdb_info % (id, LANG)) j = json.loads(jsonpage) temptitle = j['title'].encode('ascii', 'ignore').replace(' ', '') if temptitle <> '': title = j['title'] fanart = links.link().tmdb_backdropbase % (j["backdrop_path"]) poster = links.link().tmdb_posterbase % (j["poster_path"]) for g in j['genres']: listgenre.append(g['name']) genre = ', '.join(listgenre) try: plot = j['overview'] except: pass try: tagline = j['tagline'] except: pass fanart = j["backdrop_path"] poster = j["poster_path"] except: pass temptitle = jdef['title'].encode('ascii', 'ignore').replace(' ', '') if temptitle <> '': if not title: title = jdef['title'] temporiginaltitle = jdef['original_title'].encode('ascii', 'ignore') if temptitle == '': originaltitle = jdef['title'] if temporiginaltitle == '': originaltitle = jdef['title'] else: originaltitle = jdef['original_title'] if not poster: poster = jdef['poster_path'] if not fanart: fanart = jdef['backdrop_path'] if not fanart: fanart = poster if fanart: fanart = links.link().tmdb_backdropbase % (fanart) if poster: poster = links.link().tmdb_posterbase % (poster) if genre == '': for g in jdef['genres']: listgenre.append(g['name']) genre = ', '.join(listgenre) if not plot: plot = jdef['overview'] if not tagline: tagline = jdef['tagline'] try: trailer = links.link().youtube_plugin % ( jdef['trailers']['youtube'][0]['source']) except: trailer = '' try: year = jdef["release_date"].split("-")[0] except: year = '' try: studio = jdef['production_companies'][0]['name'] except: studio = '' for listc in jdef['credits']['cast']: listcastr.append(listc['name'] + '|' + listc['character']) listcast.append(listc['name']) for crew in jdef['credits']['crew']: if crew['job'] == 'Director': director = crew['name'] break for crew in jdef['credits']['crew']: if crew['job'] == 'Story': credits = crew['name'] break for crew in jdef['credits']['crew']: if crew['job'] == 'Writer': writer = crew['name'] break if crew['job'] == 'Novel': writer = crew['name'] break if crew['job'] == 'Screenplay': writer = crew['name'] break duration = jdef['runtime'] if not poster or duration == 0 and jdef['imdb_id']: altsearch = omdbapi.searchmovie(jdef['imdb_id'], False) if not poster: poster = altsearch['poster'] if not fanart: fanart = poster if not plot: plot = altsearch['info']['plot'] if not tagline: tagline = altsearch['info']['plot'] if not listcast: listcast = altsearch['info']['cast'] listcastr = [] if not duration: duration = altsearch['info']['duration'] if not writer: writer = altsearch['info']['writer'] if not director: director = altsearch['info']['director'] if not genre: genre = altsearch['info']['genre'] info = { "genre": genre, "year": year, "rating": jdef['vote_average'], "cast": listcast, "castandrole": listcastr, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": originaltitle, "duration": duration, "studio": studio, "tagline": tagline, "writer": writer, "premiered": jdef['release_date'], "code": jdef['imdb_id'], "credits": credits, "votes": jdef['vote_count'], "trailer": trailer } response = { "label": '%s (%s)' % (title, year), "originallabel": '%s (%s)' % (originaltitle, year), "poster": poster, "fanart_image": fanart, "imdbid": jdef['imdb_id'], "year": year, "info": info } if getSetting("cachesites") == 'true': if not str(id).startswith('tt'): tmdbid = id else: tmdbid = jdef['id'] localdb.save_cache(jdef['imdb_id'], tmdbid, '%s (%s)' % (title, year), '%s (%s)' % (originaltitle, year), poster, fanart, year, json.dumps(info)) return response
def grabiframes(mainURL,prettyname,cachePath,results=None,index=None,pageURL=None): list = [] if pageURL: pagecache = os.path.join(cachePath,pageURL) if pageURL and getSetting("cachesites") == 'true' and os.path.isfile(pagecache): jsonline = basic.readfiletoJSON(pagecache) jsonloaded = json.loads(jsonline, encoding="utf-8") if index: results.append(jsonloaded) else: list.append(jsonloaded) else: try: page = basic.open_url(mainURL) except: page = ' ' pass blocker = re.findall('data-videoid="(.+?)"', page, re.DOTALL) if blocker: fakeframe = [] for videoid in blocker: fakeframe.append('<iframe src="http//www.youtube.com/embed/'+videoid+'"</iframe>') html = fakeframe else: html = re.findall('<iframe(.*?)</iframe>', page, re.DOTALL) for trunk in html: try: iframe = re.compile('src="(.+?)"').findall(trunk)[0] except: try: iframe = re.compile("src='(.+?)'").findall(trunk)[0] except: try:iframe = re.compile('data-src="(.+?)"').findall(trunk)[0] except: iframe = '' if iframe: if iframe.find('ad120m.com') > -1 or iframe.find('facebook') > -1 or iframe.find('metaffiliation') > -1 or iframe.find('banner600') > -1 or iframe.find('engine.adbooth.com') > -1 or iframe.find('www.lolx2.com') > -1 or iframe.find('jetpack.wordpress.com') > -1: pass else: print "##funvideos-grabiframes: "+iframe try: if iframe.find('youtube') > -1: textR,resolver_iframe = youtube_resolver(iframe.replace('-nocookie',''),prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('dailymotion') > -1: textR,resolver_iframe = daily_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('vimeo') > -1: textR,resolver_iframe = vimeo_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('sapo') > -1: textR,resolver_iframe = sapo_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) elif iframe.find('videolog') > -1: textR,resolver_iframe = videolog_resolver(iframe,prettyname,cachePath) if resolver_iframe: if index: results.append(resolver_iframe) else: list.append(resolver_iframe) if pageURL and getSetting("cachesites") == 'true': basic.writefile(pagecache,'w',textR) except BaseException as e: print '##ERROR-##funvideos-grabiframes: '+iframe+' '+str(e) else: print '##ERROR-funvideos:frame on server not supported: '+iframe if not index: return list
def searchmovie(id, an=None, cache=True): basic.log(u"omdbapi.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' trailer = '' year = '' dur = 0 if cache: if getSetting("cachesites") == 'true': cached = localdb.get_cache(id, an) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response if an: ordine = id[0] imagine = id[1] nume = id[2] an = id[3] regia = id[4] actori = id[5] gen = id[6] nota = id[7] trailer = id[8] descriere = id[9] id = '1' #jsonpage = basic.open_url(links.link().omdbapi_byname % (nume.encode('ascii','xmlcharrefreplace'), an)) jsonpage = {} else: jsonpage = basic.open_url(links.link().omdbapi_info % (id)) try: jdef = json.loads(jsonpage) except: try: nume = nume.decode('utf-8') except: nume = nume jdef = { 'Title': nume, 'Poster': imagine, 'Genre': striphtml(gen), 'Plot': descriere, 'Year': an, 'Actors': re.sub('Cu: ', '', striphtml(actori)), 'Director': re.sub('Regia: ', '', striphtml(regia)), 'Writer': '', 'Runtime': '', 'imdbRating': re.sub('IMDB: ', '', nota), 'imdbVotes': '', 'trailer': trailer } try: title = jdef['Title'] except: title = nume try: poster = jdef['Poster'] except: poster = imagine fanart = poster try: genre = jdef['Genre'] except: genre = striphtml(gen) try: plot = jdef['Plot'] except: plot = descriere tagline = plot try: year = re.findall('(\d+)', jdef['Year'], re.DOTALL)[0] except: try: year = jdef['Year'] except: year = an try: listcast = jdef['Actors'].split(', ') except: listcast = re.sub('Cu: ', '', striphtml(actori)).split(', ') try: director = jdef['Director'] except: director = re.sub('Regia: ', '', striphtml(regia)) try: writer = jdef['Writer'] except: writer = '' try: duration = re.findall('(\d+) min', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) else: duration = re.findall('(\d) h', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) * 60 except: duration = '' try: rating = jdef['imdbRating'] except: rating = re.sub('IMDB: ', '', nota) try: votes = jdef['imdbVotes'] except: votes = '' try: trailer = jdef['trailer'] except: trailer = '' info = { "genre": genre, "year": year, "rating": rating, "cast": listcast, "castandrole": listcast, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": title, "duration": dur, "studio": '', "tagline": tagline, "writer": writer, "premiered": '', "code": id, "credits": '', "votes": votes, "trailer": trailer } response = { "label": '%s (%s)' % (title, year), "originallabel": '%s (%s)' % (title, year), "poster": poster, "fanart_image": fanart, "imdbid": id, "year": year, "info": info } if cache: if getSetting("cachesites") == 'true': localdb.save_cache(id, '', '%s (%s)' % (title, year), '%s (%s)' % (title, year), poster, fanart, year, json.dumps(info), an) return response
def searchmovie(id,cache=True): basic.log(u"omdbapi.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' trailer = '' year = '' dur = 0 if cache: if getSetting("cachesites") == 'true': cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response jsonpage = basic.open_url(links.link().omdbapi_info % (id)) jdef = json.loads(jsonpage) title = jdef['Title'] poster = jdef['Poster'] fanart = poster genre = jdef['Genre'] plot = jdef['Plot'] tagline = plot try: year = re.findall('(\d+)', jdef['Year'], re.DOTALL)[0] except: year = jdef['Year'] listcast = jdef['Actors'].split(', ') director = jdef['Director'] writer = jdef['Writer'] duration = re.findall('(\d+) min', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) else: duration = re.findall('(\d) h', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0])*60 info = { "genre": genre, "year": year, "rating": jdef['imdbRating'], "cast": listcast, "castandrole": listcast, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": title, "duration": dur, "studio": '', "tagline": tagline, "writer": writer, "premiered": '', "code": id, "credits": '', "votes": jdef['imdbVotes'], "trailer": '' } response = { "label": '%s (%s)' % (title,year), "originallabel": '%s (%s)' % (title,year), "poster": poster, "fanart_image": fanart, "imdbid": id, "year": year, "info": info } if cache: if getSetting("cachesites") == 'true': localdb.save_cache(id,'','%s (%s)' % (title,year),'%s (%s)' % (originaltitle,year),poster,fanart,year,json.dumps(info)) return response
def searchmovie(id, cache=True): basic.log(u"omdbapi.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' trailer = '' year = '' dur = 0 if cache: if getSetting("cachesites") == 'true': cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response jsonpage = basic.open_url(links.link().omdbapi_info % (id)) jdef = json.loads(jsonpage) title = jdef['Title'] poster = jdef['Poster'] fanart = poster genre = jdef['Genre'] plot = jdef['Plot'] tagline = plot try: year = re.findall('(\d+)', jdef['Year'], re.DOTALL)[0] except: year = jdef['Year'] listcast = jdef['Actors'].split(', ') director = jdef['Director'] writer = jdef['Writer'] duration = re.findall('(\d+) min', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) else: duration = re.findall('(\d) h', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) * 60 info = { "genre": genre, "year": year, "rating": jdef['imdbRating'], "cast": listcast, "castandrole": listcast, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": title, "duration": dur, "studio": '', "tagline": tagline, "writer": writer, "premiered": '', "code": id, "credits": '', "votes": jdef['imdbVotes'], "trailer": '' } response = { "label": '%s (%s)' % (title, year), "originallabel": '%s (%s)' % (title, year), "poster": poster, "fanart_image": fanart, "imdbid": id, "year": year, "info": info } if cache: if getSetting("cachesites") == 'true': localdb.save_cache(id, '', '%s (%s)' % (title, year), '%s (%s)' % (originaltitle, year), poster, fanart, year, json.dumps(info)) return response