async def crew_embed(input_name, cmd_alias): lbxd_id = __check_if_fixed_search(input_name) person_json = await __search_letterboxd(input_name, cmd_alias, lbxd_id) description, name, url, tmdb_id = __get_details(person_json) api_url = 'https://api.themoviedb.org/3/person/{}'.format(tmdb_id) description += await __get_dates(api_url) picture = await __get_picture(api_url) return create_embed(name, url, description, picture)
async def user_embed(username): username = username.lower() url = 'https://letterboxd.com/{}'.format(username) lbxd_id = __check_if_fixed_search(username) if not lbxd_id: lbxd_id = await __search_profile(username) member_json = await __get_userjson(lbxd_id) display_name, avatar_url, description = await __get_infos(member_json, lbxd_id) fav_text, fav_posters_link = __get_favs(member_json) description += fav_text return create_embed(display_name, url, description, avatar_url)
def get_embedding(cache, args, name): res = cache[name] if res is None: embedding_filename = get_required_arg(args, name) embedding_filename = getattr(args, name) if embedding_filename is None: embedding_filename = get_required_arg(args, name.replace('embedding', 'train')) res = helpers.create_embed(embedding_filename) min_count = 1 log("Created token dictionary of size {0} ({1} words, {2} special tokens) from {3} (min_count = {4})".format(res.token_count, res.word_count, res.special_token_count, embedding_filename, min_count)) cache[name] = res return res
async def film_embed(keywords, with_mkdb=False): input_year = __check_year(keywords) lbxd_id = __check_if_fixed_search(keywords) film_json = await __search_request(keywords, input_year, lbxd_id) lbxd_id = film_json['id'] title = film_json['name'] year = film_json.get('releaseYear') lbxd_url, tmdb_id, poster_path = __get_links(film_json) description = await __create_description(lbxd_id, tmdb_id, title) if with_mkdb: description += await __get_mkdb_rating(lbxd_url) description += await __get_stats(lbxd_id) if year: title += ' (' + str(year) + ')' return create_embed(title, lbxd_url, description, poster_path)
def get_embedding(cache, args, name): res = cache[name] if res is None: embedding_filename = get_required_arg(args, name) embedding_filename = getattr(args, name) if embedding_filename is None: embedding_filename = get_required_arg( args, name.replace('embedding', 'train')) res = helpers.create_embed(embedding_filename) min_count = 1 log("Created token dictionary of size {0} ({1} words, {2} special tokens) from {3} (min_count = {4})" .format(res.token_count, res.word_count, res.special_token_count, embedding_filename, min_count)) cache[name] = res return res
async def review_embed(username, film_search): username, display_name, user_id, __ = await user_details(username) film_id, film_title, film_year, poster_path, film_url = await film_details( film_search) activity_url = film_url.replace( '.com/', '.com/{}/'.format(username)) + 'activity' response, nb_entries = await __find_entries(user_id, display_name, film_id, film_title, film_year) description, embed_url = __create_description(response, activity_url) if nb_entries > 1: embed_url = activity_url entry_word = 'entries' if nb_entries > 1 else 'entry' title = '{0} {1} of {2} ({3})'.format(display_name, entry_word, film_title, film_year) return create_embed(title, embed_url, description, poster_path)
async def list_embed(username, keywords): __, __, user_lbxd_id, __ = await user_details(username) list_id = await __find_list(keywords, user_lbxd_id) description, url, poster_url, name = await __get_infos(list_id) return create_embed(name, url, description, poster_url)
async def diary_embed(username): username, display_name, user_id, avatar_url = await user_details(username) url = 'https://letterboxd.com/{}/films/diary'.format(username) title = 'Recent diary activity from {}'.format(display_name) description = await __get_activity(user_id) return create_embed(title, url, description, avatar_url)