def createstrm(name, imdbid, year, url): addon_id = links.link().muchm_id addon_path = os.path.join(links.link().installfolder, addon_id) addon_getsettings = links.link().getSetting("muchm_enabled") addon_pos = links.link().getSetting("muchm_pos") if len(addon_pos) == 1: addon_pos = '0' + addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().muchm_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("muchm_enabled", 'false') if addon_getsettings == 'true': name = name + ' (' + year + ')' origname = name strmPath = os.path.join(srtmBasePath, addon_pos + '.' + addon_id + '.strm') searchresponse = '<a href="(.+?)">%s</a>' % name.replace( '(', '\(').replace(')', '\)') name = name.replace('(', '').replace(')', '').replace(' ', '-') url = links.link().muchm_base % search.basic_search( links.link().muchm_search, name, imdbid, year, searchresponse, 'Name') if url: playurl = addonplay % (urllib.quote_plus(origname), urllib.quote_plus(url)) basic.writefile(strmPath, 'w', playurl)
def icesearch(title): if title.lower().startswith('the '): title2 = title.lower().replace('the ','') else: title2 = title if title2[0].isalpha(): url = links.link().ice_base + "/movies/a-z/" + title2[0].upper() else: url = links.link().ice_base + "/movies/a-z/1" html = basic.open_url(url) soup = BeautifulSoup(html) link = soup.find("a", href=re.compile("ip.php"), text=title) if link: return links.link().ice_base+link.parent["href"] else: return None
def listmovies(url, index): basic.log(u"trakt.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 if 'popular' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' } elif 'trending' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' } print headers, url jsonpage = basic.open_url_headers(url, headers) print 'jsonpage %s' % jsonpage j = json.loads(jsonpage) for list in j: order += 1 if 'trending' in url: sendlist.append([order, list['movie']['ids']['tmdb']]) elif 'popular' in url: sendlist.append([order, list['ids']['tmdb']]) chunks = [sendlist[x:x + 5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append( threading.Thread(name='listmovies' + str(i), target=tmdb.searchmovielist, args=( chunks[i], result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id, lists in result: mainlist.append(lists) basic.log(u"trakt.listmovies mainlist: %s" % mainlist) return mainlist
def createstrm(name,imdbid,year,url): addon_id = links.link().wt_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("wt_enabled") addon_pos = links.link().getSetting("wt_pos") if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().wt_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("wt_enabled",'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.strm') url = search.basic_search(links.link().wt_search,name,imdbid,year,'<a href="(.+?)" class="movie-name">','NameYear') if url: playurl = addonplay % (urllib.quote_plus(links.link().wt_base % url),urllib.quote_plus(name)) basic.writefile(strmPath,'w',playurl)
def ytssearch(imdb_id): try: quality = [] magnet = [] try: yts = basic.open_url(links.link().yts_search % (imdb_id)) jtys = json.loads(yts) except: return '','' if 'No movies found' in str(jtys): return '','' for j in jtys["data"]["movies"]: for i in j["torrents"]: #quality.append(j["Quality"]+'_'+j["Size"]) quality.append(i["quality"]) magnet.append(links.link().yts_magnet % (i["hash"],j["title_long"])) return quality,magnet except BaseException as e: print '##ERROR-addonsresolver:ytssearch: '+str(imdb_id)+' '+str(e)
def createstrm(name,imdbid,year,url): addon_id = links.link().rato_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("rato_enabled") addon_pos = links.link().getSetting("rato_pos") if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().rato_play if not os.path.exists(addon_path) and addon_getsettings: links.link().setSetting("rato_enabled",'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.strm') url = search.basic_search(links.link().rato_search,name,imdbid,year,'<span class="more-btn"><a href="(.+?)" >Ver Agora</a>','IMDB') if url: playurl = addonplay % (url,urllib.quote_plus(name)) basic.writefile(strmPath,'w',playurl)
def createstrm(name,imdbid,year,url): addon_id = links.link().muchm_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("muchm_enabled") addon_pos = links.link().getSetting("muchm_pos") if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().muchm_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("muchm_enabled",'false') if addon_getsettings == 'true': name = name +' ('+year+')' origname = name strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.strm') searchresponse = '<a href="(.+?)">%s</a>' % name.replace('(','\(').replace(')','\)') name = name.replace('(','').replace(')','').replace(' ','-') url = links.link().muchm_base % search.basic_search(links.link().muchm_search,name,imdbid,year,searchresponse,'Name') if url: playurl = addonplay % (urllib.quote_plus(origname),urllib.quote_plus(url)) basic.writefile(strmPath,'w',playurl)
def createstrm(name,imdbid,year,url): addon_id = links.link().yify_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("yify_enabled") addon_pos = links.link().getSetting("yify_pos") addonplay = links.link().yify_play if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("yify_enabled",'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.strm') searchresponse = '"title":"%s","link":"(.+?)","post_content":".+?","image":".+?","year":"%s"' % (name,year) url = search.basic_search(links.link().yify_search,name,imdbid,year,searchresponse,'Name') if url: playurl = addonplay % (urllib.quote_plus(name+' ('+year+')'),urllib.quote_plus(url)) basic.writefile(strmPath,'w',playurl)
def createstrm(name, imdbid, year, url): addon_id = links.link().wt_id addon_path = os.path.join(links.link().installfolder, addon_id) addon_getsettings = links.link().getSetting("wt_enabled") addon_pos = links.link().getSetting("wt_pos") if len(addon_pos) == 1: addon_pos = '0' + addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().wt_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("wt_enabled", 'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath, addon_pos + '.' + addon_id + '.strm') url = search.basic_search(links.link().wt_search, name, imdbid, year, '<a href="(.+?)" class="movie-name">', 'NameYear') if url: playurl = addonplay % (urllib.quote_plus( links.link().wt_base % url), urllib.quote_plus(name)) basic.writefile(strmPath, 'w', playurl)
def results(url, auth=True, post=None): try: trakt_key = links.link().trakt_apikey headers = { 'Content-Type': 'application/json', 'trakt-api-key': trakt_key, 'trakt-api-version': '2' } if not post == None: post = json.dumps(post) if (links.link().trakt_user == '' or links.link().trakt_password == ''): pass elif auth == False: pass else: token = auth_token(links.link().trakt_user, links.link().trakt_password) headers.update({ 'trakt-user-login': links.link().trakt_user, 'trakt-user-token': token }) request = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(request, timeout=30) result = response.read() response.close() return result except BaseException as e: basic.log(u"trakt.results ##Error: %s" % str(e))
def createstrm(name,imdbid,year,url): addon_id = links.link().abelhas_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("abelhas_enabled") addon_pos = links.link().getSetting("abelhas_pos") if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().abelhas_search if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("abelhas_enabled",'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.strm') playurl = addonplay % (urllib.quote_plus(name+ ' ('+year+')')) basic.writefile(strmPath,'w',playurl)
def createstrm(name,imdbid,year,url): addon_id = links.link().salts_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("salts_enabled") addon_pos = links.link().getSetting("salts_pos") if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().salts_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("salts_enabled",'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.strm') playurl = addonplay % (urllib.quote_plus(name),year,urllib.quote_plus(name).replace('+','-')+'-'+year) basic.writefile(strmPath,'w',playurl)
def createstrm(name,imdbid,year,url): addon_id = links.link().sdp_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("sdp_enabled") addon_getsettingspref = links.link().getSetting("pref_sdp_source") addon_pos = links.link().getSetting("sdp_pos") if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().sdp_search if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("sdp_enabled",'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.strm') url = search.sdpsearch(name,imdbid) if url == 'MATCH': if addon_getsettingspref == 'All': automatic = '' elif addon_getsettingspref == 'Any': automatic = 'sim' else: automatic = addon_getsettingspref playurl= addonplay % (imdbid,urllib.quote_plus(name.replace(' ('+year+')','')),automatic) basic.writefile(strmPath,'w',playurl)
def auth_token(trakt_user, trakt_password): try: trakt_key = links.link().trakt_apikey headers = {'Content-Type': 'application/json', 'trakt-api-key': trakt_key, 'trakt-api-version': '2'} post = json.dumps({'login': trakt_user, 'password': trakt_password}) print headers,post request = urllib2.Request('https://api.trakt.tv/auth/login', data=post, headers=headers) response = urllib2.urlopen(request, timeout=10) result = response.read() result = json.loads(result) auth = result['token'] response.close() return auth except BaseException as e: basic.log(u"trakt.auth ##Error: %s" % str(e))
def createstrm(name,imdbid,year,url): addon_id = links.link().stream_id addon_path = os.path.join(links.link().installfolder,addon_id) addon_getsettings = links.link().getSetting("stream_enabled") addon_pos = links.link().getSetting("stream_pos") if len(addon_pos) == 1: addon_pos = '0'+addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().stream_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("stream_enabled",'false') if addon_getsettings == 'true': qual,magnet = search.ytssearch(imdbid) if magnet: for i in range(0,len(magnet)): strmPath = os.path.join(srtmBasePath,addon_pos+'.'+addon_id+'.'+qual[i]+'.strm') playurl = addonplay % (urllib.quote_plus(magnet[i])) basic.writefile(strmPath,'w',playurl)
def createstrm(name, imdbid, year, url): addon_id = links.link().ice_id addon_path = os.path.join(links.link().installfolder, addon_id) addon_getsettings = links.link().getSetting("ice_enabled") addon_pos = links.link().getSetting("ice_pos") if len(addon_pos) == 1: addon_pos = '0' + addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().ice_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("ice_enabled", 'false') if addon_getsettings == 'true': url = search.icesearch(name + ' (' + year + ')') if url: strmPath = os.path.join(srtmBasePath, addon_pos + '.' + addon_id + '.strm') playurl = addonplay % (urllib.quote_plus(url)) basic.writefile(strmPath, 'w', playurl)
def createstrm(name, imdbid, year, url): addon_id = links.link().ice_id addon_path = os.path.join(links.link().installfolder, addon_id) addon_getsettings = links.link().getSetting("ice_enabled") addon_pos = links.link().getSetting("ice_pos") if len(addon_pos) == 1: addon_pos = "0" + addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().ice_play if not os.path.exists(addon_path) and addon_getsettings == "true": links.link().setSetting("ice_enabled", "false") if addon_getsettings == "true": url = search.icesearch(name + " (" + year + ")") if url: strmPath = os.path.join(srtmBasePath, addon_pos + "." + addon_id + ".strm") playurl = addonplay % (urllib.quote_plus(url)) basic.writefile(strmPath, "w", playurl)
def createstrm(name, imdbid, year, url): addon_id = links.link().yify_id addon_path = os.path.join(links.link().installfolder, addon_id) addon_getsettings = links.link().getSetting("yify_enabled") addon_pos = links.link().getSetting("yify_pos") addonplay = links.link().yify_play if len(addon_pos) == 1: addon_pos = '0' + addon_pos srtmBasePath = links.link().strmPath if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("yify_enabled", 'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath, addon_pos + '.' + addon_id + '.strm') searchresponse = '"title":"%s","link":"(.+?)","post_content":".+?","image":".+?","year":"%s"' % ( name, year) url = search.basic_search(links.link().yify_search, name, imdbid, year, searchresponse, 'Name') if url: playurl = addonplay % (urllib.quote_plus(name + ' (' + year + ')'), urllib.quote_plus(url)) basic.writefile(strmPath, 'w', playurl)
def createstrm(name, imdbid, year, url): addon_id = links.link().kmediatorrent_id addon_path = os.path.join(links.link().installfolder, addon_id) addon_getsettings = links.link().getSetting("kmediatorrent_enabled") addon_pos = links.link().getSetting("kmediatorrent_pos") if len(addon_pos) == 1: addon_pos = '0' + addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().kmediatorrent_play if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("kmediatorrent_enabled", 'false') if addon_getsettings == 'true': qual, magnet = search.ytssearch(imdbid) if magnet: for i in range(0, len(magnet)): strmPath = os.path.join( srtmBasePath, addon_pos + '.' + addon_id + '.' + qual[i] + '.strm') playurl = addonplay % (urllib.quote_plus(magnet[i])) basic.writefile(strmPath, 'w', playurl)
def createstrm(name, imdbid, year, url): addon_id = links.link().sdp_id addon_path = os.path.join(links.link().installfolder, addon_id) addon_getsettings = links.link().getSetting("sdp_enabled") addon_getsettingspref = links.link().getSetting("pref_sdp_source") addon_pos = links.link().getSetting("sdp_pos") if len(addon_pos) == 1: addon_pos = '0' + addon_pos srtmBasePath = links.link().strmPath addonplay = links.link().sdp_search if not os.path.exists(addon_path) and addon_getsettings == 'true': links.link().setSetting("sdp_enabled", 'false') if addon_getsettings == 'true': strmPath = os.path.join(srtmBasePath, addon_pos + '.' + addon_id + '.strm') url = search.sdpsearch(name, imdbid) if url == 'MATCH': if addon_getsettingspref == 'All': automatic = '' elif addon_getsettingspref == 'Any': automatic = 'sim' else: automatic = addon_getsettingspref playurl = addonplay % ( imdbid, urllib.quote_plus(name.replace(' (' + year + ')', '')), automatic) basic.writefile(strmPath, 'w', playurl)
def results(url, auth=True, post=None): try: trakt_key = links.link().trakt_apikey headers = {'Content-Type': 'application/json', 'trakt-api-key': trakt_key, 'trakt-api-version': '2'} if not post == None: post = json.dumps(post) if (links.link().trakt_user == '' or links.link().trakt_password == ''): pass elif auth == False: pass else: token = auth_token(links.link().trakt_user, links.link().trakt_password) headers.update({'trakt-user-login': links.link().trakt_user, 'trakt-user-token': token}) request = urllib2.Request(url, data=post, headers=headers) response = urllib2.urlopen(request, timeout=30) result = response.read() response.close() return result except BaseException as e: basic.log(u"trakt.results ##Error: %s" % str(e))
def searchtrailer(name): ytpage = open_url(links.link().youtube_trailer_search % (urllib.unquote_plus(name))) youtubeid = re.compile('"videoId": "(.+?)"').findall(ytpage) return youtubeid[0]
def searchmovie(id): basic.log(u"tmdb.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' temptitle = '' originaltitle = '' if getSetting("cachesites") == 'true': cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response jsonpage = basic.open_url(links.link().tmdb_info_default % (id)) try: jdef = json.loads(jsonpage) except: if 'tt' in str(id): try: jdef = omdbapi.searchmovie(str(id)) return jdef except: return False else: return False if LANG <> 'en': try: jsonpage = basic.open_url(links.link().tmdb_info % (id, LANG)) j = json.loads(jsonpage) temptitle = j['title'].encode('ascii', 'ignore').replace(' ', '') if temptitle <> '': title = j['title'] fanart = links.link().tmdb_backdropbase % (j["backdrop_path"]) poster = links.link().tmdb_posterbase % (j["poster_path"]) for g in j['genres']: listgenre.append(g['name']) genre = ', '.join(listgenre) try: plot = j['overview'] except: pass try: tagline = j['tagline'] except: pass fanart = j["backdrop_path"] poster = j["poster_path"] except: pass temptitle = jdef['title'].encode('ascii', 'ignore').replace(' ', '') if temptitle <> '': if not title: title = jdef['title'] temporiginaltitle = jdef['original_title'].encode('ascii', 'ignore') if temptitle == '': originaltitle = jdef['title'] if temporiginaltitle == '': originaltitle = jdef['title'] else: originaltitle = jdef['original_title'] if not poster: poster = jdef['poster_path'] if not fanart: fanart = jdef['backdrop_path'] if not fanart: fanart = poster if fanart: fanart = links.link().tmdb_backdropbase % (fanart) if poster: poster = links.link().tmdb_posterbase % (poster) if genre == '': for g in jdef['genres']: listgenre.append(g['name']) genre = ', '.join(listgenre) if not plot: plot = jdef['overview'] if not tagline: tagline = jdef['tagline'] try: trailer = links.link().youtube_plugin % ( jdef['trailers']['youtube'][0]['source']) except: trailer = '' try: year = jdef["release_date"].split("-")[0] except: year = '' try: studio = jdef['production_companies'][0]['name'] except: studio = '' for listc in jdef['credits']['cast']: listcastr.append(listc['name'] + '|' + listc['character']) listcast.append(listc['name']) for crew in jdef['credits']['crew']: if crew['job'] == 'Director': director = crew['name'] break for crew in jdef['credits']['crew']: if crew['job'] == 'Story': credits = crew['name'] break for crew in jdef['credits']['crew']: if crew['job'] == 'Writer': writer = crew['name'] break if crew['job'] == 'Novel': writer = crew['name'] break if crew['job'] == 'Screenplay': writer = crew['name'] break duration = jdef['runtime'] if not poster or duration == 0 and jdef['imdb_id']: altsearch = omdbapi.searchmovie(jdef['imdb_id'], False) if not poster: poster = altsearch['poster'] if not fanart: fanart = poster if not plot: plot = altsearch['info']['plot'] if not tagline: tagline = altsearch['info']['plot'] if not listcast: listcast = altsearch['info']['cast'] listcastr = [] if not duration: duration = altsearch['info']['duration'] if not writer: writer = altsearch['info']['writer'] if not director: director = altsearch['info']['director'] if not genre: genre = altsearch['info']['genre'] info = { "genre": genre, "year": year, "rating": jdef['vote_average'], "cast": listcast, "castandrole": listcastr, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": originaltitle, "duration": duration, "studio": studio, "tagline": tagline, "writer": writer, "premiered": jdef['release_date'], "code": jdef['imdb_id'], "credits": credits, "votes": jdef['vote_count'], "trailer": trailer } response = { "label": '%s (%s)' % (title, year), "originallabel": '%s (%s)' % (originaltitle, year), "poster": poster, "fanart_image": fanart, "imdbid": jdef['imdb_id'], "year": year, "info": info } if getSetting("cachesites") == 'true': if not str(id).startswith('tt'): tmdbid = id else: tmdbid = jdef['id'] localdb.save_cache(jdef['imdb_id'], tmdbid, '%s (%s)' % (title, year), '%s (%s)' % (originaltitle, year), poster, fanart, year, json.dumps(info)) return response
def searchmovie(id): basic.log(u"tmdb.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = "" title = "" plot = "" tagline = "" director = "" writer = "" credits = "" poster = "" fanart = "" temptitle = "" originaltitle = "" if getSetting("cachesites") == "true": cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]), } return response jsonpage = basic.open_url(links.link().tmdb_info_default % (id)) try: jdef = json.loads(jsonpage) except: if "tt" in str(id): try: jdef = omdbapi.searchmovie(str(id)) return jdef except: return False else: return False if LANG <> "en": try: jsonpage = basic.open_url(links.link().tmdb_info % (id, LANG)) j = json.loads(jsonpage) temptitle = j["title"].encode("ascii", "ignore").replace(" ", "") if temptitle <> "": title = j["title"] fanart = links.link().tmdb_backdropbase % (j["backdrop_path"]) poster = links.link().tmdb_posterbase % (j["poster_path"]) for g in j["genres"]: listgenre.append(g["name"]) genre = ", ".join(listgenre) try: plot = j["overview"] except: pass try: tagline = j["tagline"] except: pass fanart = j["backdrop_path"] poster = j["poster_path"] except: pass temptitle = jdef["title"].encode("ascii", "ignore").replace(" ", "") if temptitle <> "": if not title: title = jdef["title"] temporiginaltitle = jdef["original_title"].encode("ascii", "ignore") if temptitle == "": originaltitle = jdef["title"] if temporiginaltitle == "": originaltitle = jdef["title"] else: originaltitle = jdef["original_title"] if not poster: poster = jdef["poster_path"] if not fanart: fanart = jdef["backdrop_path"] if not fanart: fanart = poster if fanart: fanart = links.link().tmdb_backdropbase % (fanart) if poster: poster = links.link().tmdb_posterbase % (poster) if genre == "": for g in jdef["genres"]: listgenre.append(g["name"]) genre = ", ".join(listgenre) if not plot: plot = jdef["overview"] if not tagline: tagline = jdef["tagline"] try: trailer = links.link().youtube_plugin % (jdef["trailers"]["youtube"][0]["source"]) except: trailer = "" try: year = jdef["release_date"].split("-")[0] except: year = "" try: studio = jdef["production_companies"][0]["name"] except: studio = "" for listc in jdef["credits"]["cast"]: listcastr.append(listc["name"] + "|" + listc["character"]) listcast.append(listc["name"]) for crew in jdef["credits"]["crew"]: if crew["job"] == "Director": director = crew["name"] break for crew in jdef["credits"]["crew"]: if crew["job"] == "Story": credits = crew["name"] break for crew in jdef["credits"]["crew"]: if crew["job"] == "Writer": writer = crew["name"] break if crew["job"] == "Novel": writer = crew["name"] break if crew["job"] == "Screenplay": writer = crew["name"] break duration = jdef["runtime"] if not poster or duration == 0 and jdef["imdb_id"]: altsearch = omdbapi.searchmovie(jdef["imdb_id"], False) if not poster: poster = altsearch["poster"] if not fanart: fanart = poster if not plot: plot = altsearch["info"]["plot"] if not tagline: tagline = altsearch["info"]["plot"] if not listcast: listcast = altsearch["info"]["cast"] listcastr = [] if not duration: duration = altsearch["info"]["duration"] if not writer: writer = altsearch["info"]["writer"] if not director: director = altsearch["info"]["director"] if not genre: genre = altsearch["info"]["genre"] info = { "genre": genre, "year": year, "rating": jdef["vote_average"], "cast": listcast, "castandrole": listcastr, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": originaltitle, "duration": duration, "studio": studio, "tagline": tagline, "writer": writer, "premiered": jdef["release_date"], "code": jdef["imdb_id"], "credits": credits, "votes": jdef["vote_count"], "trailer": trailer, } response = { "label": "%s (%s)" % (title, year), "originallabel": "%s (%s)" % (originaltitle, year), "poster": poster, "fanart_image": fanart, "imdbid": jdef["imdb_id"], "year": year, "info": info, } if getSetting("cachesites") == "true": if not str(id).startswith("tt"): tmdbid = id else: tmdbid = jdef["id"] localdb.save_cache( jdef["imdb_id"], tmdbid, "%s (%s)" % (title, year), "%s (%s)" % (originaltitle, year), poster, fanart, year, json.dumps(info), ) return response
def searchmovie(id,cache=True): basic.log(u"omdbapi.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' trailer = '' year = '' dur = 0 if cache: if getSetting("cachesites") == 'true': cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response jsonpage = basic.open_url(links.link().omdbapi_info % (id)) jdef = json.loads(jsonpage) title = jdef['Title'] poster = jdef['Poster'] fanart = poster genre = jdef['Genre'] plot = jdef['Plot'] tagline = plot try: year = re.findall('(\d+)', jdef['Year'], re.DOTALL)[0] except: year = jdef['Year'] listcast = jdef['Actors'].split(', ') director = jdef['Director'] writer = jdef['Writer'] duration = re.findall('(\d+) min', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) else: duration = re.findall('(\d) h', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0])*60 info = { "genre": genre, "year": year, "rating": jdef['imdbRating'], "cast": listcast, "castandrole": listcast, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": title, "duration": dur, "studio": '', "tagline": tagline, "writer": writer, "premiered": '', "code": id, "credits": '', "votes": jdef['imdbVotes'], "trailer": '' } response = { "label": '%s (%s)' % (title,year), "originallabel": '%s (%s)' % (title,year), "poster": poster, "fanart_image": fanart, "imdbid": id, "year": year, "info": info } if cache: if getSetting("cachesites") == 'true': localdb.save_cache(id,'','%s (%s)' % (title,year),'%s (%s)' % (originaltitle,year),poster,fanart,year,json.dumps(info)) return response
def listmovies(url,index): basic.log(u"trakt.listmovies url: %s" % url) mainlist = [] sendlist = [] result = [] threads = [] order = 0 if 'popular' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' } elif 'trending' in url: headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': links.link().trakt_apikey, 'page': index, 'limit': '25' } print headers,url jsonpage = basic.open_url_headers(url,headers) print 'jsonpage %s' % jsonpage j = json.loads(jsonpage) for list in j: order += 1 if 'trending' in url: sendlist.append([order,list['movie']['ids']['tmdb']]) elif 'popular' in url: sendlist.append([order,list['ids']['tmdb']]) chunks=[sendlist[x:x+5] for x in xrange(0, len(sendlist), 5)] for i in range(len(chunks)): threads.append(threading.Thread(name='listmovies'+str(i),target=tmdb.searchmovielist,args=(chunks[i],result, ))) [i.start() for i in threads] [i.join() for i in threads] result = sorted(result, key=basic.getKey) for id,lists in result: mainlist.append(lists) basic.log(u"trakt.listmovies mainlist: %s" % mainlist) return mainlist
def sdpsearch(name,imdb): threads = [] result = [] for i in range(7): threads.append(threading.Thread(name=name+str(i),target=_sdpsearch,args=(name,links.link().sdp_search_add[i],result, ))) [i.start() for i in threads] [i.join() for i in threads] if result: for res in result: if 'MATCH' in res: return res
'server.socket_host': '0.0.0.0', 'server.socket_port':443, 'server.ssl_module':'pyopenssl', 'server.ssl_certificate':'ssl/server.crt', 'server.ssl_private_key':'ssl/server.key', 'tools.sessions.on' : True, 'tools.sessions.storage_type' : "file", 'tools.sessions.storage_path' : "session_files", 'tools.sessions.timeout' : 180, #'server.ssl_certificate_chain':'gd_bundle.crt' } #cherrypy.tree.mount(main.shortener(),'/',config = funcs.conf) #cherrypy.tree.mount(ut.utils(),'/ut',config = funcs.conf_ut) cherrypy.tree.mount(unstatic.unstatic(),'/display',config = conf) cherrypy.tree.mount(links.link(),'/',config = conf) cherrypy.tree.mount(owners.owner(),'/owner',config = conf) cherrypy.tree.mount(other.main(),'/other',config = conf) #cherrypy.config.update({'error_page.404': error_page_404}) #cherrypy.server.socket_host = socket.gethostbyname( # socket.gethostname() #set tu own ip, so other computers can accsess # ) #May not work on Linux cherrypy.config.update(server_config) cherrypy.engine.start() cherrypy.engine.block() #cherrypy.quickstart(shortener(), '/', conf)
def user(userID, userIDA, per, perA, andriod, ios, andA, iosA): # stopwords = stopwords.words('english') extractor = urlextract.URLExtract() translator = Translator() # stopwords = stopwords.words('english') # user_id=[] # with open('users.csv', 'rb') as fil: # user = csv.reader(fil) # for row in user: # user_id.append(row[2]) #intilization # total_posts=0 # total_postsA=0 total_posts_per_year = 0 total_posts_per_yearA = 0 shared = 0 added = 0 posted = 0 updated = 0 sharedA = 0 addedA = 0 postedA = 0 updatedA = 0 langu = [] languA = [] months = [0] * 12 monthsA = [0] * 12 daysX_monthsY = np.zeros([12, 31]) daysX_monthsYA = np.zeros([12, 31]) userOSAND = 0 UserOSIOS = 0 userAOSAND = 0 UserAOSIOS = 0 # sharedRatio=0 # updateRatio=0 # addRatio=0 # postRatio=0 # sharedRatioA=0 # updateRatioA=0 # addRatioA=0 # postRatioA=0 season = [0] * 4 # winter spring summer autumn seasonA = [0] * 4 # winter spring summer autumn hashTags = [] # store hashtags used number_hash = 0 weekends = 0 postsWeekends52 = 0 postsWeekends51 = 0 postsWeekends50 = 0 weekendsA = 0 postsWeekends52A = 0 postsWeekends51A = 0 postsWeekends50A = 0 average_posts_weekEnd = 0 average_posts_summer = 0 average_posts_winter = 0 average_posts_spring = 0 average_posts_autumn = 0 tophash = [] tags = [] tagnbr = [] tagsA = [] tagnbrA = [] taggedposts = 0 taggedpostsA = 0 weekposts52 = 0 weekposts51 = 0 weekposts50 = 0 weekposts52A = 0 weekposts51A = 0 weekposts50A = 0 daysweek = np.zeros([3, 7]) * 4 daysweekA = np.zeros([3, 7]) activityM52 = 0 activityN52 = 0 activityM51 = 0 activityN51 = 0 activityM50 = 0 activityN50 = 0 activityM52A = 0 activityN52A = 0 activityM51A = 0 activityN51A = 0 activityM50A = 0 activityN50A = 0 url = [] urlA = [] urlSize = 0 urlSizeA = 0 # user=0 # userA=5 # shows whether they post M(1)or N(0) or 2 Both personalityActivityTime = [ 1, 1, 0, 1, 0, 1, 2, 2, 1, 1, 1, 1, 0, 0, 0, 2, 0, 1, 2, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 2, 1, 0, 0, 2, 2, 2, 1, 2, 1, 1 ] # shows whether they none 0 or add 1 share 2 update 3 post 4 personalityTypePost = [ 2, 0, 2, 2, 0, 2, 0, 0, 4, 0, 2, 4, 2, 2, 4, 2, 2, 2, 2, 0, 2, 0, 0, 2, 2, 2, 2, 4, 2, 2, 2, 4, 4, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 4, 2 ] # shows how many times they post none 0 or hour 1 couples of day 2 once per day 3 rarely 4 personalityDay = [ 4, 4, 3, 4, 2, 4, 1, 4, 2, 4, 4, 4, 4, 4, 2, 4, 3, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 4, 2, 2, 2, 4, 4, 4, 4, 4, 4, 2, 2, 2, 4, 2, 4, 4 ] path = 'dataset\user_posts_' + userID + '.csv' pathA = 'dataset\user_posts_' + userIDA + '.csv' def get_wordnet_pos(treebank_tag): if treebank_tag.startswith('J'): return wordnet.ADJ elif treebank_tag.startswith('V'): return wordnet.VERB elif treebank_tag.startswith('N'): return wordnet.NOUN elif treebank_tag.startswith('R'): return wordnet.ADV else: return wordnet.NOUN def lima(word, words): lemmatiser = WordNetLemmatizer() words_tag = dict(pos_tag(words)) return lemmatiser.lemmatize(word, get_wordnet_pos(words_tag.get(word))) def clean(words): # words = re.sub('[^a-zA-Z]', '', words.lower()).split() tknzr = TweetTokenizer() # tokenizer = RegexpTokenizer('\w+|\S+') # words=nltk.word_tokenize(words.lower()) words = tknzr.tokenize(words) exclude = set(string.punctuation) words2 = [word for word in words if not word in exclude] words_tag = dict(pos_tag(words)) words = [ word.lower() for word in words2 if not word in nltk.corpus.stopwords.words('english') and not word.isdigit() ] # print(words) words = [lima(word, words) for word in words] # print(words) words = ' '.join(words) # print(words) return words def display_topics(model, feature_names, no_top_words): for topic_idx, topic in enumerate(model.components_): l = "Topic %d:" % (topic_idx) l = " ".join([ feature_names[i].encode("utf-8") for i in topic.argsort()[:-no_top_words - 1:-1] ]) tophash.append(feature_names[i].encode("utf-8")) def topic_hash(hashtags): vectorizer = TfidfVectorizer(min_df=0.2, stop_words='english') X = vectorizer.fit_transform(hashtags) no_topics = min(10, len(hashtags)) nmf = NMF(n_components=no_topics, random_state=1, alpha=.1, l1_ratio=.5, init='nndsvd').fit(X) display_topics(nmf, vectorizer.get_feature_names(), 1) def extract_hash(word): i = 0 h = '' while (i < len(word)): s = word[i] if (s == '#'): i += 1 while (i < len(word) and not (word[i] == '#' or word[i] == ' ' or word[i] == "\n")): s = word[i] h += s i += 1 hashTags.append(h) h = '' else: i += 1 def get_season(now): if isinstance(now, datetime): now = now.date() return next(s for s, (start, end) in seasons if start <= now <= end) def get_tags(tag): tag = tag.replace('u', '') tag = tag.replace('[', '') tag = tag.replace(']', '') tag = tag.replace('\'', '') tag = (tag.split(',')) tagnbr.append(len(tag)) i = 0 while (i < len(tag)): if (int(tag[i]) not in tags): tags.append(int(tag[i])) i += 1 return tags def get_tags_anoamly(tagA): tagL = tagA.replace('u', '') tagL = tagL.replace('[', '') tagL = tagL.replace(']', '') tagL = tagL.replace('\'', '') tagL = (tagL.split(',')) tagnbrA.append(len(tagL)) l = 0 while (l < len(tagL)): if (int(tagL[l]) not in tagsA): tagsA.append(int(tagL[l])) l += 1 return tagsA with open(path, 'rb') as f: posts = csv.reader(f) for items in posts: # check date time datetime_object = datetime.strptime(items[3], "%Y-%m-%d %H:%M:%S") hour = datetime_object.hour month = datetime_object.month year = datetime_object.year day = datetime_object.day dates = datetime.date(datetime_object) seasons = [('winter', (date(year, 1, 1), date(year, 3, 20))), ('spring', (date(year, 3, 21), date(year, 6, 20))), ('summer', (date(year, 6, 21), date(year, 9, 22))), ('autumn', (date(year, 9, 23), date(year, 12, 20))), ('winter', (date(year, 12, 21), date(year, 12, 31)))] # activity in 2017 if (year == 2017): total_posts_per_year += 1 # posts/month months[month - 1] += 1 # posts /day daysX_monthsY[month - 1][day - 1] += 1 # week number weekNumber = dates.isocalendar()[1] # activity of last 3 weeks in 2017 # total nbr of posts/each week # posts/day in each week # activity time in each day in each week if (weekNumber == 52): weekend = datetime_object.weekday() if (weekend == 4 or weekend == 5): # weekends+=1 postsWeekends52 += 1 weekposts52 += 1 daysweek[0][(dates.weekday()) - 1] += 1 if (hour >= 6 and hour < 18): activityM52 += 1 elif (hour >= 18 and hour < 24): activityN52 += 1 elif (hour >= 0 and hour < 6): activityN52 += 1 if (weekNumber == 51): weekend = datetime_object.weekday() if (weekend == 4 or weekend == 5): # weekends+=1 postsWeekends51 += 1 weekposts51 += 1 if (hour >= 6 and hour < 18): activityM51 += 1 elif (hour >= 18 and hour < 24): activityN51 += 1 elif (hour >= 0 and hour < 6): activityN51 += 1 daysweek[1][(dates.weekday()) - 1] += 1 if (weekNumber == 50): weekend = datetime_object.weekday() if (weekend == 4 or weekend == 5): # weekends+=1 postsWeekends50 += 1 weekposts50 += 1 if (hour >= 6 and hour < 18): activityM50 += 1 elif (hour >= 18 and hour < 24): activityN50 += 1 elif (hour >= 0 and hour < 6): activityN50 += 1 daysweek[2][(dates.weekday()) - 1] += 1 # which type they use most share/add/update/post if (items[4] == 'added'): added += 1 elif (items[4] == 'updated'): updated += 1 elif (items[4] == 'posted'): posted += 1 else: shared += 1 # season if (get_season(datetime_object) == 'winter'): season[0] += 1 elif (get_season(datetime_object) == 'spring'): season[1] += 1 elif (get_season(datetime_object) == 'summer'): season[2] += 1 elif (get_season(datetime_object) == 'autumn'): season[3] += 1 weekends = len( [1 for i in calendar.monthcalendar(2017, 12) if i[5] != 0]) weekends += len( [1 for i in calendar.monthcalendar(2017, 12) if i[4] != 0]) # lang detector if (items[0]): t = translator.detect(json.dumps(items[0].decode('utf-8'))) langu.append(t.lang) # msg=items[0] # if(t.lang=="en"): # message.append(clean(msg).encode('utf-8')) # tags k = items[5] if (len(k) > 2): taggedposts += 1 get_tags(k) urls = extractor.find_urls(items[0]) url.append(urls) urlSize += len(urls) # nbr of words # word_tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+') # mess=items[0] # mess = unicode(mess, errors='ignore') # words1 = word_tokenizer.tokenize(mess.lower()) # words=[word for word in words1 if # not word in stopwords] # postLength.append(len(words)) # nbr of urls # extract hashTags extract_hash(items[0]) # count hash tags number_hash += items[0].count('#') # anomaly user with open(pathA, 'rb') as files: postsA = csv.reader(files) for anomaly in postsA: # check date time datetime_object = datetime.strptime(anomaly[3], "%Y-%m-%d %H:%M:%S") hour = datetime_object.hour month = datetime_object.month year = datetime_object.year day = datetime_object.day dates = datetime.date(datetime_object) seasons = [('winter', (date(year, 1, 1), date(year, 3, 20))), ('spring', (date(year, 3, 21), date(year, 6, 20))), ('summer', (date(year, 6, 21), date(year, 9, 22))), ('autumn', (date(year, 9, 23), date(year, 12, 20))), ('winter', (date(year, 12, 21), date(year, 12, 31)))] # activity in 2017 if (year == 2017): total_posts_per_yearA += 1 # posts/month monthsA[month - 1] += 1 # posts /day daysX_monthsYA[month - 1][day - 1] += 1 # week number weekNumber = dates.isocalendar()[1] # activity of last 3 weeks in 2017 # total nbr of posts/each week # posts/day in each week # activity time in each day in each week if (weekNumber == 52): weekend = datetime_object.weekday() if (weekend == 4 or weekend == 5): # weekendsA+=1 postsWeekends52A += 1 weekposts52A += 1 daysweekA[0][(dates.weekday()) - 1] += 1 if (hour >= 6 and hour < 18): activityM52A += 1 elif (hour >= 18 and hour < 24): activityN52A += 1 elif (hour >= 0 and hour < 6): activityN52A += 1 if (weekNumber == 51): weekend = datetime_object.weekday() if (weekend == 4 or weekend == 5): # weekendsA+=1 postsWeekends51A += 1 weekposts51A += 1 if (hour >= 6 and hour < 18): activityM51A += 1 elif (hour >= 18 and hour < 24): activityN51A += 1 elif (hour >= 0 and hour < 6): activityN51A += 1 daysweekA[1][(dates.weekday()) - 1] += 1 if (weekNumber == 50): weekend = datetime_object.weekday() if (weekend == 4 or weekend == 5): # weekendsA+=1 postsWeekends50A += 1 weekposts50A += 1 if (hour >= 6 and hour < 18): activityM50A += 1 elif (hour >= 18 and hour < 24): activityN50A += 1 elif (hour >= 0 and hour < 6): activityN50A += 1 daysweekA[2][(dates.weekday()) - 1] += 1 # which type they use most share/add/update/post if (anomaly[4] == 'added'): addedA += 1 elif (anomaly[4] == 'updated'): updatedA += 1 elif (anomaly[4] == 'posted'): postedA += 1 else: sharedA += 1 # season if (get_season(datetime_object) == 'winter'): seasonA[0] += 1 elif (get_season(datetime_object) == 'spring'): seasonA[1] += 1 elif (get_season(datetime_object) == 'summer'): seasonA[2] += 1 elif (get_season(datetime_object) == 'autumn'): seasonA[3] += 1 weekendsA = len( [1 for i in calendar.monthcalendar(2017, 12) if i[5] != 0]) weekendsA += len( [1 for i in calendar.monthcalendar(2017, 12) if i[4] != 0]) # tags kA = anomaly[5] if (len(kA) > 2): taggedpostsA += 1 get_tags_anoamly(kA) # lang detector tA = translator.detect(json.dumps(anomaly[0].decode('utf-8'))) languA.append(tA.lang) # msgA=anomaly[0] # if(tA.lang=='en'): # messageA.append(clean(msgA).encode('utf-8')) urlsA = extractor.find_urls(items[0]) urlA.append(urlsA) urlSizeA += len(urlsA) # # nbr of words # word_tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+') # mess=anomaly[0] # mess = unicode(mess, errors='ignore') # words1 = word_tokenizer.tokenize(mess.lower()) # words=[word for word in words1 if # not word in stopwords] # postLength.append(len(words)) # # nbr of urls # urls = extractor.find_urls(anomaly[0]) # urls_Size+=len(urls) # extract hashTags extract_hash(anomaly[0]) # count hash tags number_hash += anomaly[0].count('#') # print tags # print tags.__contains__(1649402345136017) # print(float(activityA/365.0)) # # average number of words # average_nbr_words=sum(postLength)/len(postLength) # # average number of posts /month # average_nbr_posts_month=sum(months)/12.0 # # average links in all posts # average_nbr_words=urls_Size/float(total_posts) # # total posts in weekends / total nbr of posts # average_posts_weekEnd=postsWeekends/float(total_posts) # # total posts in seasons # average_posts_autumn=season[3]/float(total_posts_per_year) # average_posts_summer=season[2]/float(total_posts_per_year) # average_posts_spring=season[1]/float(total_posts_per_year) # average_posts_winter=season[0]/float(total_posts_per_year) # print tagnbr # print tagnbrA # print tags # print tagsA # print langu # print message,messageA # print 'M',activityM52,activityM50,activityM51,'N',activityN52,activityN51,activityN50 # print 'M',activityM52A,activityM50A,activityM51A,'N',activityN52A,activityN51A,activityN50A scoreA1 = 2 scoreB1 = 2 # if(andriod==andA): # scoreB1+=1 # else: # scoreA1+=1 # if(ios==iosA): # scoreB1+=1 # else: # scoreA1+=1 # print scoreA1,scoreB1 import topicModel scoreTA, scoreTB = topicModel.topic(userID, userIDA, scoreA1, scoreB1, 1) # print scoreTA,scoreTB import lang scoreLA, scoreLB = lang.language(langu, languA, total_posts_per_year, total_posts_per_yearA, scoreA1, scoreB1, 1) # print scoreLA,scoreLB # print added,updated,posted,shared # print addedA,updatedA,postedA,sharedA import typeOfPost scorePA, scorePB = typeOfPost.type_post(added, shared, updated, posted, total_posts_per_year, addedA, sharedA, updatedA, postedA, total_posts_per_yearA, personalityTypePost[per], scoreA1, scoreB1, 1) # print scorePA,scorePB import tagged scoreTagA, scoreTagB = tagged.tagged(tags, tagnbr, taggedposts, total_posts_per_year, tagsA, tagnbrA, taggedpostsA, total_posts_per_yearA, scoreA1, scoreB1, 1) # print scoreTagA,scoreTagB import freq # print freq.activityTime(activityM52, activityM51, activityM50, activityN52, activityN51, activityN50, activityM52A, activityM51A, activityM50A, activityN52A, activityN51A, activityN50A, personalityActivityTime[0]) # print freq.day(daysweek, daysweekA) # print freq.weekend(postsWeekends52,postsWeekends51,postsWeekends50,weekends,postsWeekends52A,postsWeekends51A,postsWeekends50A,weekendsA) scoreFA, scoreFB = freq.frequency( postsWeekends52, postsWeekends51, postsWeekends50, weekends, postsWeekends52A, postsWeekends51A, postsWeekends50A, weekendsA, activityM52, activityM51, activityM50, activityN52, activityN51, activityN50, activityM52A, activityM51A, activityM50A, activityN52A, activityN51A, activityN50A, personalityActivityTime[per], daysweek, daysweekA, season, total_posts_per_year, seasonA, total_posts_per_yearA, weekposts52, weekposts51, weekposts50, weekposts52A, weekposts51A, weekposts50A, personalityDay[per], scoreA1, scoreB1, 1) import links links.link(url, urlSize, total_posts_per_year, urlA, urlSizeA, total_posts_per_yearA) return scoreTA, scoreTB, scoreLA, scoreLB, scorePA, scorePB, scoreTagA, scoreTagB, scoreFA, scoreFB
def playtrailer(url, name): if url == None: return url = links.link().youtube_plugin % (url) item = xbmcgui.ListItem(path=url) item.setProperty("IsPlayable", "true") xbmc.Player().play(url, item)
import math import numpy as np from euler_angles import eulerAnglesXYXToRotationMatrix, RMatrixXYXToEulerAngles from kinematics import forward_kinematics, inverse_kinematics from joints import joint from links import link from robot import robot from util import deg_to_rad #set robot links = [ link(np.array([0, 0, 287]), 0), link(np.array([-180, 0, 70]), 1), link(np.array([0, 0, 370]), 2), link(np.array([0, 0, 179.5]), 3), link(np.array([0, 0, 256]), 4), link(np.array([0, 0, 124]), 5) ] joints = [ joint(0, np.array([0, 0, 1]), [-180, 180], [0, 1]), joint(0, np.array([0, 1, 0]), [-138, 64], [1, 2]), joint(0, np.array([0, 1, 0]), [-124, 124], [2, 3]), joint(0, np.array([0, 0, 1]), [-180, 180], [3, 4]), joint(0, np.array([0, 1, 0]), [-140, 141], [4, 5]), joint(0, np.array([0, 0, 1]), [-math.inf, math.inf], [5, -1]) ] r6Robot = robot(links, joints) #forward forward_kinematics
def searchmovie(id, an=None, cache=True): basic.log(u"omdbapi.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' trailer = '' year = '' dur = 0 if cache: if getSetting("cachesites") == 'true': cached = localdb.get_cache(id, an) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response if an: ordine = id[0] imagine = id[1] nume = id[2] an = id[3] regia = id[4] actori = id[5] gen = id[6] nota = id[7] trailer = id[8] descriere = id[9] id = '1' #jsonpage = basic.open_url(links.link().omdbapi_byname % (nume.encode('ascii','xmlcharrefreplace'), an)) jsonpage = {} else: jsonpage = basic.open_url(links.link().omdbapi_info % (id)) try: jdef = json.loads(jsonpage) except: try: nume = nume.decode('utf-8') except: nume = nume jdef = { 'Title': nume, 'Poster': imagine, 'Genre': striphtml(gen), 'Plot': descriere, 'Year': an, 'Actors': re.sub('Cu: ', '', striphtml(actori)), 'Director': re.sub('Regia: ', '', striphtml(regia)), 'Writer': '', 'Runtime': '', 'imdbRating': re.sub('IMDB: ', '', nota), 'imdbVotes': '', 'trailer': trailer } try: title = jdef['Title'] except: title = nume try: poster = jdef['Poster'] except: poster = imagine fanart = poster try: genre = jdef['Genre'] except: genre = striphtml(gen) try: plot = jdef['Plot'] except: plot = descriere tagline = plot try: year = re.findall('(\d+)', jdef['Year'], re.DOTALL)[0] except: try: year = jdef['Year'] except: year = an try: listcast = jdef['Actors'].split(', ') except: listcast = re.sub('Cu: ', '', striphtml(actori)).split(', ') try: director = jdef['Director'] except: director = re.sub('Regia: ', '', striphtml(regia)) try: writer = jdef['Writer'] except: writer = '' try: duration = re.findall('(\d+) min', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) else: duration = re.findall('(\d) h', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) * 60 except: duration = '' try: rating = jdef['imdbRating'] except: rating = re.sub('IMDB: ', '', nota) try: votes = jdef['imdbVotes'] except: votes = '' try: trailer = jdef['trailer'] except: trailer = '' info = { "genre": genre, "year": year, "rating": rating, "cast": listcast, "castandrole": listcast, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": title, "duration": dur, "studio": '', "tagline": tagline, "writer": writer, "premiered": '', "code": id, "credits": '', "votes": votes, "trailer": trailer } response = { "label": '%s (%s)' % (title, year), "originallabel": '%s (%s)' % (title, year), "poster": poster, "fanart_image": fanart, "imdbid": id, "year": year, "info": info } if cache: if getSetting("cachesites") == 'true': localdb.save_cache(id, '', '%s (%s)' % (title, year), '%s (%s)' % (title, year), poster, fanart, year, json.dumps(info), an) return response
def playtrailer(url,name): if url == None: return url = links.link().youtube_plugin % (url) item = xbmcgui.ListItem(path=url) item.setProperty("IsPlayable", "true") xbmc.Player().play(url, item)
def searchmovie(id, cache=True): basic.log(u"omdbapi.searchmovie id: %s" % id) listgenre = [] listcast = [] listcastr = [] genre = '' title = '' plot = '' tagline = '' director = '' writer = '' credits = '' poster = '' fanart = '' trailer = '' year = '' dur = 0 if cache: if getSetting("cachesites") == 'true': cached = localdb.get_cache(id) if cached: response = { "label": cached[2], "originallabel": cached[3], "poster": cached[4], "fanart_image": cached[5], "imdbid": cached[0], "year": cached[6], "info": json.loads(cached[7]) } return response jsonpage = basic.open_url(links.link().omdbapi_info % (id)) jdef = json.loads(jsonpage) title = jdef['Title'] poster = jdef['Poster'] fanart = poster genre = jdef['Genre'] plot = jdef['Plot'] tagline = plot try: year = re.findall('(\d+)', jdef['Year'], re.DOTALL)[0] except: year = jdef['Year'] listcast = jdef['Actors'].split(', ') director = jdef['Director'] writer = jdef['Writer'] duration = re.findall('(\d+) min', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) else: duration = re.findall('(\d) h', jdef['Runtime'], re.DOTALL) if duration: dur = int(duration[0]) * 60 info = { "genre": genre, "year": year, "rating": jdef['imdbRating'], "cast": listcast, "castandrole": listcast, "director": director, "plot": plot, "plotoutline": plot, "title": title, "originaltitle": title, "duration": dur, "studio": '', "tagline": tagline, "writer": writer, "premiered": '', "code": id, "credits": '', "votes": jdef['imdbVotes'], "trailer": '' } response = { "label": '%s (%s)' % (title, year), "originallabel": '%s (%s)' % (title, year), "poster": poster, "fanart_image": fanart, "imdbid": id, "year": year, "info": info } if cache: if getSetting("cachesites") == 'true': localdb.save_cache(id, '', '%s (%s)' % (title, year), '%s (%s)' % (originaltitle, year), poster, fanart, year, json.dumps(info)) return response