def searchOMDB(title, year=None, doSearch=False, ver=None): try: c=0 res = None if doSearch: if Prefs["use_debug"]: Log("OMDB Search: Title:%s Year:%s" % (title,year)) while res == None and c < 3: try: if year == None: res = omdb.search(title, c=Prefs['ca_api_key'], ver=ver) else: res = omdb.search('%s %s' % (title, year), c=Prefs['ca_api_key'], ver=ver) except: c += 1 time.sleep(1.0) else: if Prefs["use_debug"]: Log("OMDB Request: Title:%s Year:%s" % (title,year)) while res == None and c < 3: try: if year == None: res = omdb.request(t=title, r='json', c=Prefs['ca_api_key'], ver=ver, timeout=10) else: res = omdb.request(t=title, y=int(year), c=Prefs['ca_api_key'], ver=ver, r='json', timeout=10) except: c += 1 time.sleep(1.0) return res except Exception as e: Log("interface.py>searchOMDB() >> : >>> %s" % (e)) return None
def test_request(self): self.assertEqual( omdb.request(t='True Grit').json()['Title'], 'True Grit') self.assertEqual( omdb.request(i='tt0065126').json()['imdbID'], 'tt0065126') self.assertEqual( omdb.request(s='True Grit').json()['Search'][0]['Title'], 'True Grit')
def test_request(self): self.assertEqual(omdb.request(t='True Grit').json()['Title'], 'True Grit') self.assertEqual(omdb.request(i='tt0065126').json()['imdbID'], 'tt0065126') self.assertEqual( omdb.request(s='True Grit').json()['Search'][0]['Title'], 'True Grit' )
def test_request(): res = omdb.request(t='True Grit') assert res.json()['Title'] == 'True Grit' res = omdb.request(i='tt0065126') assert res.json()['imdbID'] == 'tt0065126' res = omdb.request(s='True Grit') assert res.json()['Search'][0]['Title'] == 'True Grit'
def get_movies_info(movie_data): with open(movie_data.replace('movies.dat', 'movies_info.csv'), 'a') as f_w: with open(movie_data, 'r') as f_r: count = 0 genres = [ 'Animation', "Children's", 'Comedy', 'Adventure', 'Fantasy', 'Romance', 'Drama', 'Action', 'Crime', 'Thriller', 'Horror', 'Sci-Fi', 'Documentary', 'War', 'Musical', 'Mystery', 'Film-Noir', 'Western' ] for line in f_r.readlines(): count += 1 if count <= 2998: continue line = line.split("::") title = line[1] year = title[-5:-1] title = title[:-7] genre = line[2].strip().split("|") for g in genre: if g not in genres: genres.append(g) print("----------genres:", genres) res = omdb.request(t=title, y=year, r='json', apikey='432549e') content = res.content line = line[0] + ',' + str(content).replace('b\'', '').replace( '\'', '') print(line) f_w.write(line + '\n')
def get_films(): _raw = omdb.request(s="The H", plot="short", r="json").json() if "Search" in _raw: films = _raw["Search"] for f in films: film = omdb.imdbid(f["imdbID"]) yield dict( (k.lower(), (None if v == 'N/A' else v)) for k,v in film.items() )
def request_movies(movies): if 'omdb_movies_requests' not in CACHE_DICTION: CACHE_DICTION['omdb_movies_requests'] = [json.loads(omdb.request(t=m).text) for m in movies] f = open(CACHE_FNAME, 'w') f.write(json.dumps(CACHE_DICTION)) f.close() return [Movie(c['imdbID'], c['Title'].lower(), c['imdbRating'], c['Year'], c['Production'], c['Actors']) for c in CACHE_DICTION['omdb_movies_requests'] if c['Response'] == 'True']
def get_actor_rating(movie): #Get imdb movie id options = omdb.search(movie) imdb_id = 0 year = 0 #simply find most recent movie with the tite for option in options: if int(option["year"]) > year: year = int(option["year"]) imdb_id = option["imdb_id"] res = omdb.request(i=imdb_id) data = res.json() if data["Response"] == "False": print "Movie not found" exit() actors = [a.strip() for a in data["Actors"].split(',')] directors = [d.strip() for d in data["Director"].split(',')] actor_scores = [] director_scores = [] for i in xrange(len(actors)): actor_scores.append((actors[i], get_score(actors[i], 100))) for i in xrange(len(directors)): director_scores.append((directors[i], get_score(directors[i], 100))) return actor_scores, director_scores, data["Ratings"]
def imdb(dict_response): message = "" try: query_text = dict_response['_text'].lower() if query_text.find("imdb ") != -1: query_text = query_text[5:] response = omdb.request(t='' + query_text + '', r='json') data = json.loads(response.text) mediatype = data["Type"] year = data["Year"] title = data["Title"] if mediatype == "movie": message += "Found a Movie, \"" + title + "\" (" + year + ")\n" elif mediatype == "series": message += "Found a TV show, \"" + title + "\" (" + year + ")\n" for key in data: if key in ["Rated", "Runtime", "Genre", "Director", "Writer"]: if data[key] != "N/A": message += key + ": " + data[key] + "\n" if key == "imdbRating": message += "IMDB: " + data[key] + "\n" if data["Plot"] != "N/A": message += "Plot: " + data["Plot"] except: message = technical_issues() return message
def requestOMDB(t, y, Season, i, ver=None): try: if Prefs["use_debug"]: Log("OMDB Request: Title:%s Year:%s Season:%s imdb:%s" % (t, y, Season, i)) c = 0 res = None while res == None and c < 3: try: res = omdb.request(t=t, y=int(y), Season=str(Season), i=i, c=Prefs['ca_api_key'], ver=ver, r='json', timeout=10) except Exception as e: c += 1 time.sleep(1.0) return res except Exception as e: Log("interface.py>requestOMDB() >> : >>> %s" % (e)) return None
def populate_movie_from_OMDB(movie_info): # query API for move title using OMDB API parameters title = movie_info.title #urllib.quote_plus(movie_info.title) res = omdb.request(t=title, y=movie_info.year, r='JSON', apikey="e5b6d27b", tomatoes="true") print "fetching [%s]" % title # Exception Handler: do this where you expect a failure try: json_content = json.loads(res.content) # do this if failure except: print res.content return # updates a column with datetime stamp movie_info.omdbLoad = datetime.datetime.now() # fetch attributes of json content to pass to movie_info object poster = check_api_result(json_content, 'Poster') if poster: movie_info.poster = poster imdbRating = check_api_result(json_content, 'imdbRating') if imdbRating: movie_info.imdbRating = float(imdbRating) imdbID = check_api_result(json_content, 'imdbID') if imdbID: movie_info.imdbID = imdbID movie_info.imdbURL = "http://www.imdb.com/title/%s" % imdbID runtime = check_api_result(json_content, 'Runtime') if runtime: movie_info.runtime = runtime.replace(' min', '') director = check_api_result(json_content, 'Director') if director: movie_info.director = director actors = check_api_result(json_content, 'Actors') if actors: movie_info.actors = actors tomatoMeter = check_api_result(json_content, 'tomatoMeter') if tomatoMeter: movie_info.tomatoMeter = int(tomatoMeter) tomatoUserRating = check_api_result(json_content, 'tomatoUserRating') if tomatoUserRating: movie_info.tomatoUserRating = float(tomatoUserRating) tomatoUserMeter = check_api_result(json_content, 'tomatoUserMeter') if tomatoUserMeter: movie_info.tomatoUserMeter = int(tomatoUserMeter) mpaa_rating = check_api_result(json_content, 'Rated') if mpaa_rating: movie_info.mpaa_rating = mpaa_rating metascore = check_api_result(json_content, 'Metascore') if metascore: movie_info.metascore = int(metascore) shortPlot = check_api_result(json_content, 'Plot') if shortPlot: movie_info.shortPlot = shortPlot dbsession.add(movie_info) dbsession.commit()
def scrape_omdb_id(omdb_id): """ Given a single OMDB movie id (IMDB), return JSON movie metadata (or a failure message). The worker pool runs many processes for this function given our target id list. """ time.sleep(0.1) # Adjustable sleep try: current_target = omdb.request( i=omdb_id, r='json', plot='full', apikey=api_key, timeout=5) #This is where we request data from OMDBAPI.com except: # Failed to contact OMDB try: time.sleep(1) #Sleeping current_target = omdb.request( i=omdb_id, r='json', plot='full', apikey=api_key, timeout=10) # 2nd attempt w/ greater timeout except: # Failed to contact OMDB twice current_target = None if (current_target != None): # We succeeded in contacting OMDB. try: # Current target JSON is valid current_target = current_target.json() if current_target['Response'] == "False": current_target['imdb_id'] = str(omdb_id) return current_target except: # Failed to parse current target JSON. Return failure. print("Failed to parse: {}".format(omdb_id)) return { "Response": "False", "Error_Message": "Parse Error", "imdb_id": str(omdb_id) } else: # We failed to contact OMDB, return a failure print("Failed to connect: {}".format(omdb_id)) return { "Response": "False", "Error_Message": "Connection Error", "imdb_id": str(omdb_id) }
def movie_from_title(movie_title): try: om_result = omdb.request(t=movie_title).json() img_url = om_result['Poster'] year = om_result['Year'] om_id = om_result['imdbID'] return MovieShort(om_id, movie_title, year, img_url) except Exception as e: return None
def scrape_omdb(omdb_id_list): """ When called, will scrape omdbapi for the range of imdb ids we generated during the generate_imdb_tt_list() step. Will display a progress bar so you know the script is still functioning. WARNING: If you set the range to 1 million records, expect the script to take up to 2-3 days. """ imdb_json_data = {} #Empty data field imdb_json_data['items'] = [] #Empty data list titled 'items' progress_iterator = 0 #For the progress bar json_batch_iterator = 0 #Every json_batch_iterator iterations we will revert to 0, limiting the size of each json file. current_filename = str(omdb_id_list[0]) + "_" + str( omdb_id_list[-1]) + '.json' #Create a new fileformat for current_tag in omdb_id_list: #for loop iterate over the list of imdb tags we generated sleep( 0.1 ) #Sleeping 100 milliseconds, to attempt to mitigate cloudflare 524 errors try: current_target = omdb.request( i=current_tag, r='json', plot='full', apikey=api_key, timeout=10) #This is where we request data from OMDBAPI.com except: skipped_ids.append( current_tag ) #We want to keep track of the IDs which were skipped due to timeout errors! continue if ( current_target.status_code != 200 ): #Check if the scraped data contains an error (such as exceeding the quantity of their database's contents) skipped_ids.append( current_tag ) # We want to keep track of the IDs which were skipped due to timeout errors! continue else: if (current_target.json()['Response'] == "True" ): # If false: Something has gone wrong! try: imdb_json_data['items'].append( current_target.json()) # Scrape succeeded. Store JSON. pbar.update(progress_iterator + 1) # Display incremented progress progress_iterator += 1 # Iterate the progress bar for next iteration json_batch_iterator += 1 # Iterate the json batch number iterator except: skipped_ids.append(current_tag) # Write failure to disk! else: skipped_ids.append(current_tag) # Write failure to disk! write_json_to_disk( current_filename, imdb_json_data['items'] ) # Final output, likely not triggered the batch limit if statement trigger above
def handle_movie_details(movie_id): resp = omdb.request(i=movie_id).json() description = resp['Plot'] title = resp["Title"] cover_url = resp["Poster"] year = resp["Year"] director = resp["Director"] data = requests.get(endpoint_url+"?$select=locations&title={0}".format(title)).json() print(data) return "HELLO"
def get_api(imdb_id): #try / except in case of miscellaneous errors try: #Get all movie information & reviews movie = db.execute("SELECT * FROM movies WHERE imdbid = :id", { "id": imdb_id }).fetchall() reviews = db.execute("SELECT * FROM reviews WHERE imdbid = :id", { "id": imdb_id }).fetchall() avgS = 0 c = 0 #Retrieve movie information from omdb imdb_id = movie[0][3] attrlist = [ "Title", "Year", "Rated", "Released", "Runtime", "Genre", "Director", "Writer", "Actors", "Plot", "Language", "Country", "Awards", "Poster", "Metascore", "imdbRating", "imdbVotes", "imdbID", "Type", "DVD", "BoxOffice", "Production", "Website", "Response" ] omdbResults = omdb.request(i=imdb_id) omdbData = omdbResults.json() movieInfo = {} for attr in attrlist: movieInfo[attr] = omdbData[attr] #Outputs average score for review in reviews: avgS += review[2] c += 1 avgS = avgS / c #Round to 1 decimal place avgS = round(avgS, 1) #Prepare results to be jsonified, ordered in terms of time added finalRes = { 'title': movieInfo['Title'], 'year': movieInfo['Year'], 'imdb_id': movieInfo['imdbID'], 'director': movieInfo['Director'], 'actors': movieInfo['Actors'], 'imdb_rating': movieInfo['imdbRating'], 'review_count': len(reviews), 'average_score': avgS } #Gives a jsonified output return jsonify(finalRes) except: abort(404)
def update_sched(request): ''' Error checking for if user doesnt put all fields in ''' if (request.POST.get('title') == '' or request.POST.get('year') == '' or request.POST.get('showdate') == '' or request.POST.get('showtimes') == ''): messages.info(request, "You're missing some fields!") return HttpResponseRedirect(request.META.get('HTTP_REFERER')) else: movie_title = request.POST.get('title') movie_year = request.POST.get('year') ''' Check to see if movie exists in database already ''' movie = Movie.objects.all().filter(title=movie_title).filter( year=movie_year).first() ''' If movie does not exist in database... ''' if movie == None: r = omdb.request(t=movie_title, y=movie_year, plot='short', r='json', apikey='7e685318') result = json.loads(r.content) if result['Response'] == 'False': messages.info(request, "Movie not found in database...") return HttpResponseRedirect(request.META.get('HTTP_REFERER')) score = '-1' for rating in result['Ratings']: if rating['Source'] == 'Rotten Tomatoes': score = rating['Value'] movie = Movie(imdb_id=result['imdbID'], title=movie_title, year=movie_year, mpaa_rating=result['Rated'], runtime=result['Runtime'], genre=result['Genre'], actors=result['Actors'], writers=result['Writer'], directors=result['Director'], plot=result['Plot'], poster_url=result['Poster'], trailer_url='None', rotten_tomatoes_rating=score, image_path='None') movie.save() showtimes = request.POST.get('showtimes').split(',') id = Showings.objects.all().count() ''' make a showing database entry then save it ''' showtime = Showings(date=request.POST.get('showdate'), time=request.POST.get('showtimes'), attendance=-1, showing_id=id, movie=movie) showtime.save() return HttpResponseRedirect('/backend')
def _fetch_omdb(self): try: logger.debug('Fetching IMDB ID {id} ...'.format(id=self._imdb_id)) res = omdb.request(i=self._imdb_id, r='json') raw_json = res.content.decode('utf-8') content = json.loads(raw_json) self._set_from_omdb(content) except (HTTPError, KeyError): pass else: # save to cache with open(self._cache_fpath, "wt") as fd: fd.write(raw_json)
def get_movie(title): if title in CACHE_DICTION: response = CACHE_DICTION[title] else: res = omdb.request(t=title) response = res.content.decode('ascii') CACHE_DICTION[title] = response cache_file = open(CACHE_FNAME, 'w', encoding='utf-8') cache_file.write(json.dumps(CACHE_DICTION)) cache_file.close() response = json.loads(response) return response
def media_database_builder(file, API_KEY, start_index, stop_index): """ This function used requests imdb movies and tv information using the omdb module and appends them to a dataframe INPUTS: file = file containing imdb keys API Key Start index and Stop index for imdb keys OUTPUTS: Excel csv spreadsheet containing information """ # convert unique imdb title to dataframe col_name = ['imdbid'] imdb_id = pd.read_csv(file, sep='\t', names=col_name) omdb.set_default('apikey', API_KEY) client = OMDBClient(apikey=API_KEY) rows_list = [] for i in range(int(start_index), int(stop_index)): print(i) movie_ID = imdb_id.iloc[i, 0] try: movie_dict = omdb.get(imdbid=movie_ID) rows_list.append(movie_dict) # Generates HTTP Error r = omdb.request(imdbid=movie_ID) # Value error - imdb ID yeilds dictionary that is empty # HTTPError (522) - Exceeded requests per unit time # Timeout - Overrules default timeout value except (ValueError, requests.HTTPError, requests.ReadTimeout) as err: if err == ValueError: continue else: i = i - 1 time.sleep(2.0) print('Error has occured') print('Retry request in 2.0 seconds') continue media_df = pd.DataFrame(rows_list) media_csv = media_df.to_csv(r'media_data.csv', mode='a', header=False, index=None, encoding="utf-8") return media_csv
def get_movies(self): #API_KEY = getattr(settings, 'API_KEY', None) omdb.set_default('apikey', API_KEY) for movie in MOVIES: wp_call = omdb.request(t=movie) if wp_call.status_code != 200: print("Erreur HTTP, code ", str(wp_call.status_code)) exit(1) infos_json = wp_call.json() if infos_json and 'Error' in infos_json.keys(): print('Error: ', infos_json['Error']) else: self.movies.append(Movie(infos_json))
def get_movie_details(movie): dict_details = {} run = True while run: try: res = omdb.request(t=movie, apikey='' ,r='xml') xml_content = res.content to_dict = dict(xmltodict.parse(xml_content)['root']['movie']) dict_details = [to_dict['@year'], to_dict['@imdbRating'], to_dict['@genre'].split(',')[0], to_dict['@actors'].split(',')[:2], to_dict['@runtime']] run = False except Exception as e: run = False continue return dict_details
def get_pelicula(titulo): res = omdb.request(t=titulo) peli = res.json() info_ = ("\n\n\nTítulo\n"+peli['Title']+"\n\n"+ "Estreno\n"+peli['Released']+"\n\n"+ "Género\n"+peli['Genre']+"\n\n"+ "Elenco principal\n"+peli['Actors']+"\n\n"+ "Premios \n"+peli['Awards']+"\n\n"+ "Dirigida por\n"+peli['Director']+"\n\n"+ "Escrita por\n"+peli['Writer']+"\n\n"+ "Rating IMDb\n"+peli['imdbRating']+" de 10 ("+ peli['imdbVotes'] +" votos) \n\n"+ "Produccion \n"+peli['Production']+"\n\n" +peli['Poster'] ) return info_
def update_info(movie_data, info_data): movies = {} with open(movie_data, 'r') as f_r: for line in f_r.readlines(): line = line.split("::") title = line[1] year = title[-5:-1] title = title[:-7] genre = line[2].strip().split("|") movies[line[0]] = [title, year, genre] print(movies) with open(info_data.replace('movies_info.csv', 'movies_update.csv'), 'a+') as f_w: with open(info_data, 'r') as f_r: count = 0 for line in f_r.readlines(): count += 1 if count <= 2934: continue if "Movie not found!" in line: info = line.split(",") num = info[0] if "The" in movies[num][0]: print("---------The in title!") movies[num][0] = movies[num][0].replace(", The", "") movies[num][0] = "The " + movies[num][0] print("movies[num]", movies[num]) title = movies[num][0] year = movies[num][1] res = omdb.request(t=title, y=year, r='json', apikey='dc870354') content = res.content content = str(content).replace('b\'', '').replace( '\'', '') + '\n' if "Movie not found!" in content: content = "{\"Title\":\"" + str( movies[num][0]) + "\",\"Year\":\"" + str( movies[num][1]) + "\",\"Genre\":\"" + " ".join( movies[num][2] ) + "\",\"Posters\":\"" + "N/A\"}" + "\n" line = str(num) + ',' + content print(line) f_w.write(line)
def get_data(self): """Download information about the media via OMDB.""" if not os.path.exists(self.data_path): logging.debug('Loading data on {}'.format(self.filename)) response = omdb.request(t=self.query, y=self.year).json() if response['Response'] == 'True': with open(self.data_path, 'w') as f: json.dump(response, f) return response else: logging.error( 'Unable to load data for {}'.format(self.filename)) return {} with open(self.data_path) as f: return json.loads(f.read())
def get_data(self): """Download information about the media via OMDB.""" if not os.path.exists(self.data_path): logging.debug('Loading data on {}'.format(self.filename)) response = omdb.request(t=self.query, y=self.year, apikey=os.environ['OMDB_API_KEY']).json() if response['Response'] == 'True': with open(self.data_path, 'w') as f: json.dump(response, f) return response else: logging.error('Unable to load data for {}'.format( self.filename)) return {} with open(self.data_path) as f: return json.loads(f.read())
def get_info(self): movieTitle = self.name.rstrip("\n") # get rid of newline characters status = False try: res = omdb.request(t=movieTitle, plot='short', tomatoes='true', timeout=5, apikey=self.apikey) info = json.loads(res.text) status = True except: self.status = False if status: self.IMDb_rating = info['Ratings'][0]['Value'] self.poster = info['Poster'] self.name = info['Title'] self.plot = info['Plot'] self.director = info['Director'] self.genre = info['Genre'].split(",")
def omdb_request(self): API_KEY = getattr(settings, 'API_KEY', None) if API_KEY is None: print("Error: no API_KEY in settings.py") exit(1) moviemons = getattr(settings, 'MOVIEMONS', None) if moviemons is None: print("Error: no moviemons in settings.py") exit(1) omdb.set_default('apikey', API_KEY) for movie in moviemons: wp_call = omdb.request(t=movie) if wp_call.status_code != 200: print("Erreur HTTP, code ", str(wp_call.status_code)) exit(1) infos_json = wp_call.json() if infos_json and 'Error' in infos_json.keys(): print('Error: ', infos_json['Error']) else: self.moviemons[infos_json['Title']] = Moviemon(infos_json)
def search_movies(movies, parameter): movies_to_seach = [] for title in movies: res = omdb.request(t=title, r='json') data = json.loads(res.content) if 'Error' in data: print('Movie not found!') else: release_date = data['Released'] objDate = datetime.strptime(release_date, '%d %b %Y') release_date = datetime.strftime(objDate, '%d.%m.%Y') popularity = int(data['imdbVotes'].replace(',', '')) length = int((data['Runtime'].split())[0]) title = data['Title'] movies_to_seach.append({ 'Title': title, 'Release date': release_date, 'Rating': data['imdbRating'], 'Popularity': popularity, 'Length': length }) movie_folder_name = title.replace(':', ' ').replace('?', ' ') movie_folder_path = path + '\\' + movie_folder_name if not os.path.exists(movie_folder_path): os.makedirs(movie_folder_path) poster = data['Poster'] downloader(poster, movie_folder_name, movie_folder_path) file = open(movie_folder_path + "\\" + movie_folder_name + ".txt", "w") file.write(str(json.dumps(data, indent=4))) file.close() if parameter != 'null': movies_to_seach = sorted(movies_to_seach, key=lambda k: k[parameter], reverse=False) for movie in movies_to_seach: print(str(movie).replace("'", " "))
def what_movie(update,context): user = update.effective_user if context.args: title = context.args[0].strip() logger.info(f"{user.username} triggers what_movie: {title}") m=omdb.request(t=title) if m: movie = '\n'.join(map(lambda s: s['title'], m.text)) else: movie = f"can't find movie {title}" else: movie = "/what_movie <title>" context.bot.send_message( chat_id=update.effective_chat.id, text=movie ) logger.info("what_movie: %r" % movie)
def requestOMDB(title, year=None, season=None, imdb=None, ver=None, timeout=None): try: if Prefs["use_debug"]: Log("OMDB Request: Title:%s Year:%s Season:%s imdb:%s" % (title, year, season, imdb)) if timeout == None: timeout = 10 else: try: timeout = int(timeout) except: timeout = 10 c = 0 res = None while res == None and c < 3: try: res = omdb.request(t=title, y=year, Season=season, i=imdb, c=Prefs['ca_api_key'], ver=ver, r='json', timeout=timeout) except Exception as e: c += 1 time.sleep(1.0) return res except Exception as e: Log("interface.py>requestOMDB() >> : >>> %s" % (e)) return None
def get_info_film(fichier): """ Obtenir plus d'infos sur les films (pays d'origine et genre) à partir d'Open Movie Database arg: fichier xml des tournages de films sortie: fichier xml de tournages avec info de genre et pays """ # creation liste des films auxquels on est intérésées films = [] # obtention titre et realisateur a partir de notre fichier de films with open(fichier, "r") as fichierxml: tree = etree.parse(fichierxml) ti = tree.xpath("//film/Titre") rel = tree.xpath("//film/Realisateur") for titre, realisateur in zip(ti, rel): films.append((titre.text, realisateur.text)) print("Collecte des données depuis OMBD...") genres = [] #liste de genres de tous les tournages payss = [] #liste de pays de tous les tournages # requete sur OMDB pour obtenir plus d'infos sur chaque film # on cherche par directeur aussi pour eviter deux films différents avec le meme titre for film, realisateur in films: res = omdb.request(t=film, director=realisateur) infofilm = json.loads((res.content).decode("utf-8")) try: genre = infofilm["Genre"] except KeyError: genre = "N/A" try: pays = infofilm["Country"] except KeyError: pays = "N/A" finally: print(film, genre, pays) genres.append(genre) payss.append(pays) print("Fin.") return genres, payss
def com_omdb_request(self, media_title, media_year, media_fullplot, media_tomatoes): omdb.request(media_title, media_year, media_fullplot, media_tomatoes)
def home(): searchresults = [] #Ensures is_logged & current_user are in session if 'is_logged' not in session: session['is_logged'] = False if 'current_user' not in session: session['current_user'] = "" #Uses the Movie Search Form from forms.py form = MovieSearchForm() #Only display form if user is logged in if session['is_logged'] == True: #Display search results if (form.search.data != None): searchresults = [] #Find results from movies in DB results = db.execute( "SELECT * FROM movies WHERE title LIKE :src OR lowercase LIKE :src OR year LIKE :src OR imdbid LIKE :src;", { "src": "%" + form.search.data + "%" }).fetchall() for result in results: #Use imdbID to retrieve more information from omdb imdb_id = result[3] attrlist = [ "Title", "Year", "Rated", "Released", "Runtime", "Genre", "Director", "Writer", "Actors", "Plot", "Language", "Country", "Awards", "Poster", "Metascore", "imdbRating", "imdbVotes", "imdbID", "Type", "DVD", "BoxOffice", "Production", "Website", "Response" ] #Retrieval from omdb omdbResults = omdb.request(i=imdb_id) omdbData = omdbResults.json() temp = {} #Attaches every attribute of movie from omdb into search result for attr in attrlist: temp[attr] = omdbData[attr] #searchresults to be displayed by Jinjja in home.html searchresults.append(temp) #Redirect to home page & display search results return render_template('home.html', accolades=accolades, is_logged=True, form=form, searchresults=searchresults) else: #Redirect user to home if user is NOT logged in but somehow manages to use the search bar #Prevent tampering from returning errors return render_template('home.html', accolades=accolades) return render_template('home.html', accolades=accolades, is_logged=True, form=form)
def __pymoviedb_do(): global movies global err_lines movies = {} err_lines = set() err_lines.add( "# errors logged at program exec at %s\n" % datetime.utcnow() ) if os.path.isfile(_cfg_list_file()): with open(_cfg_list_file()) as data_file: n = json.load(data_file) for v in n: movies[v['imdbID']] = v l = _get_folders() for movie in l: _dir, _name, _cur_dir = movie[0], movie[1], os.getcwd() dat = {} print_v ( "processing: %s\n" % _name ) dat['base'] = _name dat['res'] = 'NA' if __helpers.which('exiftool') is not None: match = __helpers.getFileMatches(_dir + _name, tuple(__cfg._exts)) if match is not None: dat['res'] = __helpers.getFileRes(match) # search s_title = _name.replace("_", " ") imdbid = False res = None skip = False jsondb = None if os.path.isfile(_cfg_imdb_file(movie)): with open(_cfg_imdb_file(movie)) as f: imdbid = f.read() imdbid = imdbid.strip("\r\n") print ( "%s: " % _name, end='' ) sys.stdout.flush() if __cfg.force_regen == True or not os.path.isfile(_cfg_info_file(movie)): print ( "network: ", end='' ) sys.stdout.flush() if (imdbid): try: res = omdb.request(i=imdbid) except: skip = True else: try: res = omdb.request(t=s_title) except: skip = True if not skip: jsondb = json.loads(res.content.decode('utf-8')) else: jsondb = None print ( "fail", end='' ) else: # force_regen == false print ( "file: ", end='' ) sys.stdout.flush() with open(_cfg_info_file(movie)) as f: jsondb = json.loads(f.read()) if len(jsondb) == 0: jsondb = None print ( "fail", end='' ) else: print ( "ok", end='' ) if jsondb == None or skip or 'Error' in jsondb.keys(): print ( "...skip" ) err_lines.add( "skipping %s%s\n" % (_dir, _name) ) # continue with next movie continue # goto next line print ( "\n", end='' ) dat['Title'] = jsondb['Title'] dat['imdbID'] = jsondb['imdbID'] dat['Year'] = jsondb['Year'] dat['Released'] = jsondb['Released'] dat['imdbRating'] = jsondb['imdbRating'] dat['Language'] = jsondb['Language'] movies[dat['imdbID']] = dat with open(_cfg_info_file(movie), 'w') as f: json.dump(dat, f, indent=2) # for movie in l: end # sort back movies n = sorted(movies.values(), key=itemgetter('base')) movies = {} for v in n: movies[v['imdbID']] = v # write moviews with open(_cfg_list_file(), "w") as f: json.dump(n, f, indent=2) # write err with open(_cfg_err_file(), "w") as f: f.writelines(sorted(err_lines))
def getMovieData(title, year = None): res = omdb.request(t=title, y=year) return res.content
def movie(imdb_id): #Uses Review Form object from forms.py form = ReviewForm() #Update DB when new form is submitted if form.validate_on_submit(): db.execute( "INSERT INTO reviews (userid, imdbid, rating, content) VALUES (:un, :ii, :ra, :co);", { "un": session['current_user'], "ii": imdb_id, "ra": form.select.data, "co": form.content.data }) db.commit() return (redirect(url_for('movie', imdb_id=imdb_id))) # Ensure that movie exists movie = db.execute("SELECT * FROM movies WHERE imdbid = :id", { 'id': imdb_id }).fetchone() if movie == None: abort(404) else: #Retrieve additional data from omdb imdb_id = movie[3] attrlist = [ "Title", "Year", "Rated", "Released", "Runtime", "Genre", "Director", "Writer", "Actors", "Plot", "Language", "Country", "Awards", "Poster", "Metascore", "imdbRating", "imdbVotes", "imdbID", "Type", "DVD", "BoxOffice", "Production", "Website", "Response" ] omdbResults = omdb.request(i=imdb_id) omdbData = omdbResults.json() temp = {} for attr in attrlist: temp[attr] = omdbData[attr] # Get all reviews revu = db.execute("SELECT * FROM reviews WHERE imdbid = :imdbid", { "imdbid": imdb_id }).fetchall() attrlist2 = ["Author", "MovieID", "Rating", "ReviewContent"] reviews = [] temp2 = {} #Check if user has submitted a review userRev = False for i in range(len(revu)): temp2 = {} for j in range(len(revu[i])): temp2[attrlist2[j]] = revu[i][j] reviews.append(temp2) if temp2['Author'] == session['current_user']: userRev = True #Sets reviews to blank list, not None if revu == None: reviews = [] #Get average score from Metascore & imdbRating try: avgR = (float(temp['Metascore']) + float(temp['imdbRating']) * 10) / 2 except: try: avgR = temp['imdbRating'] except: avgR = temp['Metascore'] return render_template("movie.html", movie=temp, reviews=reviews, avgR=avgR, form=form, userRev=userRev)
#all titles in Cartelera titles_enCartelera = tree.xpath('//*[@id="principalContainer"]/div[6]/div/ul[2]/li/a/text()') film_data = tree.xpath('//p[@class="peliculaMiniFicha"]/text()') #create dictionary of flicks for cartelera del dia #if odmb request is True #you can also get tomatoMeter and tomatoUserMeter imdbRating_dict = {} fliks_runtime= [] fliks_info = [] titles_notFound = [] for i in range(len(titles_enCartelera)): try: res = omdb.request(t=titles_enCartelera[i], r='json', tomatoes=True) json_content = res.content content = json.loads(json_content) #print json.dumps(content, indent=2) if (content['Response']== "True" and content['imdbRating'] != 'N/A'): fliks_runtime.append([content['Title'], content['Runtime']]) fliks_info.append([content['imdbRating'], content['Title'], content['Director'], content['Country'], content['Year'], content['Runtime']]) imdbRating_dict[content['Title']] = content['imdbRating'] #tomatoMeter_dict[content['Title']] = content['tomatoMeter'] #tomatoUserMeter_dict[content['Title']] = content['tomatoUserMeter'] else: titles_notFound.append(titles_enCartelera[i]) continue except: continue
import omdb # https://github.com/dgilland/omdb.py import json for x in range(0, 100): res = omdb.request(i='tt0103644') xml_content = res.content json_content = json.loads(xml_content.decode('utf-8')) print(json_content['Type'])
#!/usr/bin/env python import omdb import json import sys # print sys.argv[1] x = sys.argv[1] res = omdb.request(t=x) xml_content = res.content parsed = json.loads(xml_content) # print parsed print "Name :", parsed["Title"] print "Type :", parsed["Type"] print "Year :", parsed["Year"] print "Language :", parsed["Language"] print "Cast :", parsed["Actors"] print "Plot :", parsed["Plot"]
# search s_title = _name.replace("_", " ") imdbid = False res = None skip = False jsondb = None if os.path.isfile(_dir + _name + '/' + pyme_cfg.imdb_file): with open(_dir + _name + '/' + pyme_cfg.imdb_file) as f: imdbid = f.read() imdbid = imdbid.strip("\r\n") if pyme_cfg.force_regen == True or not os.path.isfile(_dir + _name + '/' + pyme_cfg.info_file): if not pyme_cfg.quiet: print ('\tnetwork : omdb ...', end='') if (imdbid): try: res = omdb.request(i=imdbid) except: skip = True else: try: res = omdb.request(t=s_title) except: skip = True if not skip: jsondb = json.loads(res.content) else: jsondb = None else: # force_regen == false if not pyme_cfg.quiet: print ('\tnetwork : file ...', end='') with open(_dir + _name + '/' + pyme_cfg.info_file) as f: jsondb = json.loads(f.read())
def get(self, key, ratingkey): if not x.auth(key): return {'message': 'Unauthorized'} #log('{} accessed plexbotapi'.format(x.array['key'])) tvdb = tapi.TVDB(conf.ttdb_key, banners=True) omdb.set_default('apikey', conf.omdb_key) year = datetime.datetime.today().year '''function returns viable data from tautulli''' taut = Tautulli() # if not isinstance(ratingkey, int): ratingkey = int(ratingkey) metadata = taut.get('get_metadata', rating_key=ratingkey) try: text = '' _type = metadata['response']['data']['library_name'] except: logging.info('Tried to announce season (ratingkey: {})'.format(ratingkey)) text = 'Added a new show (rating key: {}), but Tautulli is unable to determine type of show'.format(ratingkey) webhook = Webhook.partial(conf.discord_webhook, conf.discord_webtoken, adapter=RequestsWebhookAdapter()) webhook.send(text, username='******') return(metadata) if _type == 'Series': thetvdb = metadata['response']['data']['parent_guid'].split('//')[1].split('/')[0] episode = int(metadata['response']['data']['media_index']) season = int(metadata['response']['data']['parent_media_index']) _metadata = tvdb.get_series(thetvdb, 'en') title = _metadata.SeriesName plot = _metadata[season][episode].Overview rating = str(_metadata[season][episode].Rating) + '/10' episode_name = _metadata[season][episode].EpisodeName release = _metadata[season][episode].FirstAired imdbid = ttdb(thetvdb) omdbdata = omdb.imdbid('{}'.format(imdbid)) url = 'https://www.imdb.com/title/{}/'.format(imdbid) if rating == '0/10': rating = 'N/A' if release is '': release = str(year) + '*' if rating is '' or rating == '/10' or rating == 'N/A': rating = '1.0/10*' if plot == '': plot = 'N/A' if title == '' or title == 'N/A': title = 'N/A' embed = discord.Embed(title='{} ({}x{}) is on Plex!'.format(title, season, episode), url=url, colour=discord.Colour(0xf9c38b)) embed.add_field(name='Episode name', value=episode_name, inline=False) embed.add_field(name='Season', value=season, inline=True) embed.add_field(name='Episode', value=episode, inline=True) embed.add_field(name='Release date', value=release, inline=True) embed.add_field(name='Rating', value=rating, inline=True) embed.add_field(name='Plot', value=plot, inline=False) try: if omdbdata['poster'] != 'N/A': embed.set_thumbnail(url=omdbdata['poster']) except: pass embed.set_footer(text='Plexbot.py', icon_url='https://zhf1943ap1t4f26r11i05c7l-wpengine.netdna-ssl.com/wp-content/uploads/2018/01/pmp-icon-1.png') announcedata = {'key': ratingkey, 'title': title + ' - ' + episode_name, 'type': _type, 'season': season, 'episode': episode, 'release': str(release), 'rating': rating, 'plot': plot } elif _type == 'Films' or _type == '4K Movies' or _type == 'Norsk': imdbid = metadata['response']['data']['guid'].split('//')[1].split('?')[0] metadata = json.loads(omdb.request(i=imdbid).text) title = metadata['Title'] release = metadata['Released'] plot = metadata['Plot'] rating = metadata['Ratings'][0]['Value'] omdbdata = omdb.imdbid('{}'.format(imdbid)) if rating == '0/10': rating = 'N/A' if release is '': release = str(year) + '*' if rating is '' or rating == '/10': rating = '1.0/10*' if plot == '': plot = 'N/A' if title == '' or title == 'N/A': title = 'N/A' url = 'https://www.imdb.com/title/{}/'.format(imdbid) embed = discord.Embed(title='New movie "{}" available'.format(title), url=url, colour=discord.Colour(0xf9c38b)) embed.add_field(name='Original title', value=title) embed.add_field(name='Release date', value=release, inline=True) embed.add_field(name='Rating', value=rating, inline=True) embed.add_field(name='Plot', value=plot) try: if omdbdata['poster'] != 'N/A': embed.set_thumbnail(url=metadata['Poster']) except: pass embed.set_footer(text='Plexbot.py', icon_url='https://zhf1943ap1t4f26r11i05c7l-wpengine.netdna-ssl.com/wp-content/uploads/2018/01/pmp-icon-1.png') announcedata = {'key': ratingkey, 'title': title, 'type': _type, 'season': None, 'episode': None, 'release': release, 'rating': rating, 'plot': plot } else: logging.info('Added rating key {} in new library: {}'.format(ratingkey, _type)) embed = discord.Embed(title='A new item was added') embed.add_field(name='Rating key', value=ratingkey) embed.add_field(name='Section', value=_type) embed.set_footer(text='Plexbot.py', icon_url='https://zhf1943ap1t4f26r11i05c7l-wpengine.netdna-ssl.com/wp-content/uploads/2018/01/pmp-icon-1.png') announcedata = {'key': ratingkey, 'type': _type, 'season': None, 'episode': None, 'release': None, 'rating': None, 'plot': None } webhook = Webhook.partial(conf.discord_webhook, conf.discord_webtoken, adapter=RequestsWebhookAdapter()) webhook.send(text, embed=embed, username='******') # Make sure 'other' is updated. x.announces.append(announcedata) print(x.announces) with open('papi/announces.json', 'w') as g: g.write(json.dumps(x.announces)) g.close() return {'result': 'Announced'}