def onChange(self, types, menu, text): EXlog('types %s / menu %s / Text %s' % (types, menu, text)) if menu == "discover": if "discover" in self.manager.screen_names: self.manager.clear_widgets( screens=[self.manager.get_screen('discover')]) self.manager.add_widget( ListDiscover(name="discover", types=types, menu=text)) self.manager.current = 'discover' if menu == "info": if "info" in self.manager.screen_names: self.manager.clear_widgets( screens=[self.manager.get_screen('info')]) self.manager.add_widget(ListInfo(name="info")) self.manager.current = 'info' if menu == "list": if "list" in self.manager.screen_names: self.manager.clear_widgets( screens=[self.manager.get_screen('list')]) self.manager.add_widget( ListDiscover(name="list", types=types, menu=text)) self.manager.current = "list" if menu == "player": if "player" in self.manager.screen_names: self.manager.clear_widgets( screens=[self.manager.get_screen('player')]) self.manager.add_widget(VideoAlan(name="player", path=url)) self.manager.current = "player" if menu == "param": if "param" in self.manager.screen_names: self.manager.clear_widgets( screens=[self.manager.get_screen('param')]) self.manager.add_widget(ListParam(name="param")) self.manager.current = 'param' if menu == "tmdb": if text == 'account': return tmdb().connect() if text == 'decount': return tmdb().deconnect() if self.tmdb_username: if "list" in self.manager.screen_names: self.manager.clear_widgets( screens=[self.manager.get_screen('list')]) self.manager.add_widget( ListDiscover(name="list", types=types, menu=text)) self.manager.current = 'list' else: return tmdb().connect() self.ids.nav_layout.toggle_nav_drawer()
def process(self, args): # Perform any base class processing. if not super(IdProcess, self).process(args): return # Set up the configuration. self.configure() # Combine the title elements together since we allow both a # single variable or combined together. Once we have that, # normalize the name. title = " ".join(args.title) title = self.normalize_title(title) # Do a search for the title using the TMDB library. try: movie = tmdb.tmdb(title) if movie: print args.format % { "id": movie.getId(0), "title": movie.getName(0), } except: # We don't do anything in exceptions so it doesn't print data. pass
def __init__(self, **kwargs): super(ListInfo, self).__init__(**kwargs) json = tmdb().getByid(kwargs['types'], kwargs['tmdbid']) #introduit le dict dans self #a revoir c'est moche for value in json[0]: setattr(self, value, json[0][value]) #self.ids.cicle_l.add_widget(MyCircle(num = self.vote_average)) self.ids.list_label.text = self.title
def coding_challenge(): scraper() tmdb() combine()
#!/usr/bin/python2 import tmdb import _mysql import sys tmdb.configure("0ed82318c3b01c2800a6fcffdc6d2d34") titel = sys.argv[1] url = sys.argv[2] movie = tmdb.tmdb(titel) def GetTitle(movie): return movie.getName(0) def GetRating(movie): return movie.getRating(0) def GetImage(movie): return movie.getPoster(0,"c") def GetDescription(movie): return movie.getOverview(0).replace("'","\\'") db=_mysql.connect(host="localhost", user="******", passwd=":-)", db="filmer") query = "INSERT INTO film (titel,bild,beskrivning,rating,url) VALUES('"+ GetTitle(movie) + "', '" + GetImage(movie) + "','" + GetDescription(movie) +"','" + GetRating(movie) + "', '" + url + "')" db.query(query)
def updateDaumMovie(cate, metadata): # (1) from detail page poster_url = None metadata_id = metadata.id.split('_')[0] update_movie_by_web(metadata, metadata_id) """ try: data = JSON.ObjectFromURL(url=DAUM_MOVIE_DETAIL % metadata_id) info = data['data'] metadata.title = info['titleKo'] metadata.title_sort = unicodedata.normalize('NFKD', metadata.title) metadata.original_title = info['titleEn'] metadata.genres.clear() metadata.year = int(info['prodYear']) try: metadata.content_rating = String.DecodeHTMLEntities(String.StripTags(info['admissionDesc']).strip()) except: pass try: metadata.rating = float(info['moviePoint']['inspectPointAvg']) except: pass for item in info['genres']: metadata.genres.add(item['genreName']) try: metadata.duration = int(info['showtime'])*60 except: pass try: metadata.originally_available_at = Datetime.ParseDate(info['releaseDate']).date() except: pass try: metadata.summary = String.DecodeHTMLEntities(String.StripTags(info['plot']).strip()) except: pass metadata.countries.clear() for item in info['countries']: metadata.countries.add(item['countryKo']) except: update_movie_by_web(metadata, metadata_id) """ try: poster_url = info['photo']['fullname'] except: pass # (2) cast crew directors = list() producers = list() writers = list() roles = list() data = JSON.ObjectFromURL(url=DAUM_MOVIE_CAST % metadata_id) for item in data['data']: cast = item['castcrew'] if cast['castcrewCastName'] in [u'감독', u'연출']: director = dict() director[ 'name'] = item['nameKo'] if item['nameKo'] else item['nameEn'] if item['photo']['fullname']: director['photo'] = item['photo']['fullname'] directors.append(director) elif cast['castcrewCastName'] == u'제작': producer = dict() producer[ 'name'] = item['nameKo'] if item['nameKo'] else item['nameEn'] if item['photo']['fullname']: producer['photo'] = item['photo']['fullname'] producers.append(producer) elif cast['castcrewCastName'] in [u'극본', u'각본']: writer = dict() writer[ 'name'] = item['nameKo'] if item['nameKo'] else item['nameEn'] if item['photo']['fullname']: writer['photo'] = item['photo']['fullname'] writers.append(writer) elif cast['castcrewCastName'] in [u'주연', u'조연', u'출연', u'진행']: role = dict() role['role'] = cast['castcrewTitleKo'] role['name'] = item['nameKo'] if item['nameKo'] else item['nameEn'] if item['photo']['fullname']: role['photo'] = item['photo']['fullname'] roles.append(role) # else: # Log.Debug("unknown role: castcrewCastName=%s" % cast['castcrewCastName']) if directors: metadata.directors.clear() for director in directors: meta_director = metadata.directors.new() if 'name' in director: meta_director.name = director['name'] if 'photo' in director: meta_director.photo = director['photo'] if producers: metadata.producers.clear() for producer in producers: meta_producer = metadata.producers.new() if 'name' in producer: meta_producer.name = producer['name'] if 'photo' in producer: meta_producer.photo = producer['photo'] if writers: metadata.writers.clear() for writer in writers: meta_writer = metadata.writers.new() if 'name' in writer: meta_writer.name = writer['name'] if 'photo' in writer: meta_writer.photo = writer['photo'] if roles: metadata.roles.clear() for role in roles: meta_role = metadata.roles.new() if 'role' in role: meta_role.role = role['role'] if 'name' in role: meta_role.name = role['name'] if 'photo' in role: meta_role.photo = role['photo'] # (3) from photo page url_tmpl = DAUM_MOVIE_PHOTO data = JSON.ObjectFromURL(url=url_tmpl % metadata_id) max_poster = int(Prefs['max_num_posters']) max_art = int(Prefs['max_num_arts']) idx_poster = 0 idx_art = 0 for item in data['data']: if item['photoCategory'] == '1' and idx_poster < max_poster: art_url = item['fullname'] if not art_url: continue #art_url = RE_PHOTO_SIZE.sub("/image/", art_url) idx_poster += 1 try: metadata.posters[art_url] = Proxy.Preview( HTTP.Request(item['thumbnail']), sort_order=idx_poster) except: pass elif item['photoCategory'] in ['2', '50'] and idx_art < max_art: art_url = item['fullname'] if not art_url: continue #art_url = RE_PHOTO_SIZE.sub("/image/", art_url) idx_art += 1 try: metadata.art[art_url] = Proxy.Preview(HTTP.Request( item['thumbnail']), sort_order=idx_art) except: pass Log.Debug('Total %d posters, %d artworks' % (idx_poster, idx_art)) if idx_poster == 0: if poster_url: poster = HTTP.Request(poster_url) try: metadata.posters[poster_url] = Proxy.Media(poster) except: pass ################ LifeForWhat 추가부분 watcha_headers = { 'accept': 'application/vnd.frograms+json;version=20', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7', 'origin': 'https://watcha.com', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-site', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36', 'x-watcha-client': 'watcha-WebApp', 'x-watcha-client-language': 'ko', 'x-watcha-client-region': 'KR', 'x-watcha-client-version': '1.0.0' } """movie_name = unicodedata.normalize('NFKC', unicode("기생충")).strip() page = HTTP.Request('https://api.watcha.com/api/searches?query=%s' % (movie_name), headers = watcha_headers) Log.Info(str(page))""" # 리뷰 클리어 metadata.reviews.clear() # 컬렉션 클리어 metadata.collections.clear() # tmdb collection 을 먼저 찾는다. tmdb_title_for_search = metadata.original_title tmdb_year = metadata.year try: j, c = tmdb.tmdb().search(name=tmdb_title_for_search, year=tmdb_year) try: tmdb_collection = c['name'] if tmdb_collection != "": metadata.collections.add('💿 ' + tmdb_collection) except Exception as e: Log.Info(str(e)) pass except Exception as e: Log.Info(str(e)) pass # Watcha try: Log.Info('WATCHA SEARCHING TITLE : ' + metadata.title) Log.Info('WATCHA SEARCHING YEAR : ' + str(metadata.year)) w = watcha.watcha(keyword=metadata.title, year=int(metadata.year), media_type='movies') w2 = w.info if Prefs['w_collection_by_flavor'] == True and Prefs['w_cookie'] != "": try: predicted_point = w.predicted_rating temp_string = "⭐ 왓챠 예상 별점 : %s" % str( round((predicted_point / 2), 1)) metadata.collections.add(temp_string) Log.Info("예상별점") Log.Info(temp_string) except Exception as e: import traceback Log.Info(str(e)) Log.Info(str(traceback.print_exc)) Log.Info('WATCHA SEARCHED TITLE : ' + str(w2['API_INFO']['title'])) Log.Info('WATCHA SEARCHED YEAR : ' + str(w2['API_INFO']['year'])) for item in w2['코멘트']: # ⭐ wname = '' wsource = u'왓챠' wtext = '' wline = '' wimage = '' offiYN = item['user']['official_user'] if offiYN == True: wname = item['user']['name'] if wname in Prefs['black_critic']: continue wtext = item['text'] wimage = item['user_content_action']['rating'] if wname != "" and wtext != "" and wimage != "": meta_review = metadata.reviews.new() meta_review.author = wname meta_review.source = u'왓챠' meta_review.text = '⭐ ' + str( wimage) + ' | ' + wtext.replace('<', '〈').replace( '>', '〉') meta_review.link = 'https://www.watcha.com/' if float(wimage) >= float(Prefs['thresh_hold_point']): meta_review.image = 'rottentomatoes://image.review.fresh' else: meta_review.image = 'rottentomatoes://image.review.rotten' # 이제 Collection 파트 whitelist = ['수상', '아카데미', '영화제'] blacklist_keyword = [ '여성', '여자', '페미', '소장', '메모', '소장', '베스트', '내가', '나의', '최고', '본 영화', '보물', '볼 영화', '관람', '감상', '본것', '내 영화' ] blacklist_user = ['유정'] try: d = {'watcha': w2} # 복붙하느라... temp_list = d['watcha']['컬렉션'] except: temp_list = [] collections = [] # 콜렉션용 각종 조건들을 붙인다... # 페미니스트가 너무 많음.. 왓챠에는.. for coll in temp_list: for white in whitelist: if white in coll['title'] or coll['likes_count'] > 8000: collections.append(coll['title']) break for coll in temp_list: if coll['likes_count'] < 100: continue # 좋아요가 100개 미만은 버린다. keep_going = False years_list = re.findall('\d{4}', coll['title']) years_list = [ item for item in years_list if int(item) > 1890 and int(item) < 2030 ] if len(years_list) > 0: continue # 년도가 들어간 건 버린다 if keep_going == False: for black in blacklist_keyword: if black in coll['title'].replace(' ', ' '): keep_going = True break if keep_going == False: for blackuser in blacklist_user: if blackuser in coll['user']['name']: keep_going = True break if keep_going == False and coll['title'] not in collections: collections.append(coll['title']) #Log.Error(str(collections)) final_black_list_keyword_list = Prefs[ 'collection_black_keyword'].split('|') for collection in collections: temp_string = collection if temp_string.count('수상') > 0: temp_string = "🏆 " + temp_string elif temp_string.count('후보') > 0: temp_string = "🏆 " + temp_string elif temp_string.count('대상') > 0: temp_string = "🏆 " + temp_string elif temp_string.count('주연상') > 0: try: temp_string = "🏆 " + temp_string except: #Log.Info(str(temp_string)) pass else: temp_string = "🎬 " + temp_string # 최종 블랙리스트로 거른다. for item in final_black_list_keyword_list: if item in temp_string: Log.Info(temp_string) Log.Info(item) temp_string = "" continue if temp_string == "": continue metadata.collections.add(temp_string) except Exception as e: import traceback Log.Info(str(e)) Log.Info(traceback.print_exc) # 네이버 파트 naver_result = naver.search(keyword=metadata.title, year=int(metadata.year)) crtics_naver = naver.critics(naver_result['code']) for item in crtics_naver: # ⭐ wname = '' wsource = u'네이버' wtext = '' wline = '' wimage = '' wname = item['name'] if wname in Prefs['black_critic']: continue wtext = item['text'] wimage = item['score'] if wname != "" and wtext != "" and wimage != "": meta_review = metadata.reviews.new() meta_review.author = wname meta_review.source = u'네이버' Log.Info(str(wtext)) meta_review.text = '⭐ ' + str(wimage) + ' | ' + wtext.replace( '<', '〈').replace('>', '〉') meta_review.link = 'https://www.watcha.com/' if float(wimage) >= float(Prefs['thresh_hold_point']): meta_review.image = 'rottentomatoes://image.review.fresh' else: meta_review.image = 'rottentomatoes://image.review.rotten'
def _jsonload(types, menu, NextPage): EXlog(("json", types, menu)) return tmdb().getTmdb(types, menu, NextPage)
def callback(self, *args): if args[0] == 'Yes': tmdb_account = tmdb().getToken() print(tmdb_account)