def test_no_year(self): # Make sure parser doesn't crash for movies with no year parser = ImdbParser() parser.parse('tt3303790') assert parser.name == 'Master of None' # There is no year assert not parser.year
def parse_what(self, what): """Given an imdb id or movie title, looks up from imdb and returns a dict with imdb_id and title keys""" imdb_id = extract_id(what) title = what if imdb_id: # Given an imdb id, find title parser = ImdbParser() try: parser.parse('http://www.imdb.com/title/%s' % imdb_id) except Exception: raise QueueError('Error parsing info from imdb for %s' % imdb_id) if parser.name: title = parser.name else: # Given a title, try to do imdb search for id console('Searching imdb for %s' % what) search = ImdbSearch() result = search.smart_match(what) if not result: raise QueueError( 'ERROR: Unable to find any such movie from imdb, use imdb url instead.' ) imdb_id = extract_id(result['url']) title = result['name'] self.options['imdb_id'] = imdb_id self.options['title'] = title return {'title': title, 'imdb_id': imdb_id}
def test_no_plot(self): # Make sure parser doesn't crash for movies with no plot parser = ImdbParser() parser.parse('tt0245062') assert parser.name == 'The Magnet' # There is no plot assert not parser.plot_outline
def test_no_plot(self): # Make sure parser doesn't crash for movies with no plot parser = ImdbParser() parser.parse('tt1300562') assert parser.name == 'Goodbye Mothers' # There is no plot assert not parser.plot_outline
def test_plot_with_links(self): """Make sure plot doesn't terminate at the first link. GitHub #756""" parser = ImdbParser() parser.parse('tt2503944') assert parser.plot_outline == ("Chef Adam Jones (Bradley Cooper) had it all - and lost it. A two-star Michelin " "rockstar with the bad habits to match, the former enfant terrible of the Paris " "restaurant scene did everything different every time out, and only ever cared " "about the thrill of creating explosions of taste. To land his own kitchen and " "that third elusive Michelin star though, he'll need the best of the best on " "his side, including the beautiful Helene (Sienna Miller).")
def upgrade(ver, session): if ver == 0: # Translate old qualities into new quality requirements movie_table = table_schema('movie_queue', session) for row in session.execute( select([movie_table.c.id, movie_table.c.quality])): # Webdl quality no longer has dash new_qual = row['quality'].replace('web-dl', 'webdl') if new_qual.lower() != 'any': # Old behavior was to get specified quality or greater, approximate that with new system new_qual = ' '.join(qual + '+' for qual in new_qual.split(' ')) session.execute( update(movie_table, movie_table.c.id == row['id'], {'quality': new_qual})) ver = 1 if ver == 1: # Bad upgrade left some qualities as 'ANY+' movie_table = table_schema('movie_queue', session) for row in session.execute( select([movie_table.c.id, movie_table.c.quality])): if row['quality'].lower() == 'any+': session.execute( update(movie_table, movie_table.c.id == row['id'], {'quality': 'ANY'})) ver = 2 if ver == 2: from flexget.utils.imdb import ImdbParser # Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729 movie_table = table_schema('movie_queue', session) queue_base_table = table_schema('queue', session) query = select([ movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title ]) query = query.where(movie_table.c.id == queue_base_table.c.id) for row in session.execute(query): if row['imdb_id'] and (not row['title'] or row['title'] == 'None' or '\n' in row['title']): log.info('Fixing movie_queue title for %s' % row['imdb_id']) parser = ImdbParser() parser.parse(row['imdb_id']) if parser.name: session.execute( update(queue_base_table, queue_base_table.c.id == row['id'], {'title': parser.name})) ver = 3 if ver == 3: # adding queue_name column to movie_queue table and setting initial value to default) table_add_column('movie_queue', 'queue_name', Unicode, session, default='default') ver = 4 return ver
def _parse_new_movie(self, imdb_url, session): """ Get Movie object by parsing imdb page and save movie into the database. :param imdb_url: IMDB url :param session: Session to be used :return: Newly added Movie """ parser = ImdbParser() parser.parse(imdb_url) # store to database movie = Movie() movie.photo = parser.photo movie.title = parser.name movie.original_title = parser.original_name movie.score = parser.score movie.votes = parser.votes movie.meta_score = parser.meta_score movie.year = parser.year movie.mpaa_rating = parser.mpaa_rating movie.plot_outline = parser.plot_outline movie.url = imdb_url for name in parser.genres: genre = session.query(Genre).filter(Genre.name == name).first() if not genre: genre = Genre(name) movie.genres.append(genre) # pylint:disable=E1101 for index, name in enumerate(parser.languages): language = session.query(Language).filter( Language.name == name).first() if not language: language = Language(name) movie.languages.append(MovieLanguage(language, prominence=index)) for imdb_id, name in parser.actors.items(): actor = session.query(Actor).filter( Actor.imdb_id == imdb_id).first() if not actor: actor = Actor(imdb_id, name) movie.actors.append(actor) # pylint:disable=E1101 for imdb_id, name in parser.directors.items(): director = session.query(Director).filter( Director.imdb_id == imdb_id).first() if not director: director = Director(imdb_id, name) movie.directors.append(director) # pylint:disable=E1101 for imdb_id, name in parser.writers.items(): writer = session.query(Writer).filter( Writer.imdb_id == imdb_id).first() if not writer: writer = Writer(imdb_id, name) movie.writers.append(writer) # pylint:disable=E1101 # so that we can track how long since we've updated the info later movie.updated = datetime.now() session.add(movie) return movie
def _parse_new_movie(self, imdb_url, session): """ Get Movie object by parsing imdb page and save movie into the database. :param imdb_url: IMDB url :param session: Session to be used :return: Newly added Movie """ parser = ImdbParser() parser.parse(imdb_url) # store to database movie = Movie() movie.photo = parser.photo movie.title = parser.name movie.original_title = parser.original_name movie.score = parser.score movie.votes = parser.votes movie.meta_score = parser.meta_score movie.year = parser.year movie.mpaa_rating = parser.mpaa_rating movie.plot_outline = parser.plot_outline movie.url = imdb_url for name in parser.genres: genre = session.query(Genre).filter(Genre.name == name).first() if not genre: genre = Genre(name) movie.genres.append(genre) # pylint:disable=E1101 for index, name in enumerate(parser.languages): language = session.query(Language).filter(Language.name == name).first() if not language: language = Language(name) movie.languages.append(MovieLanguage(language, prominence=index)) for imdb_id, name in parser.actors.items(): actor = session.query(Actor).filter(Actor.imdb_id == imdb_id).first() if not actor: actor = Actor(imdb_id, name) movie.actors.append(actor) # pylint:disable=E1101 for imdb_id, name in parser.directors.items(): director = session.query(Director).filter(Director.imdb_id == imdb_id).first() if not director: director = Director(imdb_id, name) movie.directors.append(director) # pylint:disable=E1101 for imdb_id, name in parser.writers.items(): writer = session.query(Writer).filter(Writer.imdb_id == imdb_id).first() if not writer: writer = Writer(imdb_id, name) movie.writers.append(writer) # pylint:disable=E1101 # so that we can track how long since we've updated the info later movie.updated = datetime.now() session.add(movie) return movie
def test_parsed_data(self): parser = ImdbParser() parser.parse('tt0114814') assert parser.actors == { 'nm0000592': 'Pete Postlethwaite', 'nm0261452': 'Christine Estabrook', 'nm0000751': 'Suzy Amis', 'nm0000286': 'Stephen Baldwin', 'nm0000445': 'Dan Hedaya', 'nm0800339': 'Phillipe Simon', 'nm0002064': 'Giancarlo Esposito', 'nm0001590': 'Chazz Palminteri', 'nm0000321': 'Gabriel Byrne', 'nm0790436': 'Jack Shearer', 'nm0000228': 'Kevin Spacey', 'nm0001629': 'Kevin Pollak', 'nm0107808': 'Carl Bressler', 'nm0001125': 'Benicio Del Toro', 'nm0000860': 'Paul Bartel' }, 'Actors not parsed correctly' assert parser.directors == { 'nm0001741': 'Bryan Singer' }, 'Directors not parsed correctly' assert len(set(parser.genres).intersection([u'crime', u'drama', u'mystery', u'thriller'])) == \ len([u'crime', u'drama', u'mystery', u'thriller']), 'Genres not parsed correctly' assert parser.imdb_id == 'tt0114814', 'ID not parsed correctly' assert len( set(parser.languages).intersection( ['english', 'hungarian', 'spanish', 'french'])) == 4, 'Languages not parsed correctly' assert parser.mpaa_rating == 'R', 'Rating not parsed correctly' assert parser.name == 'The Usual Suspects', 'Name not parsed correctly' assert ( parser.photo == 'http://ia.media-imdb.com/images/M/MV5BMzI1MjI5MDQyOV5BMl5BanBnXkFtZTcwNzE4Mjg3NA@@._V1_UX182_CR0,0,182,268_AL_.jpg' ), 'Photo not parsed correctly' assert parser.plot_outline == ( 'Following a truck hijack in New York, five conmen are arrested and brought together for questioning. ' 'As none of them are guilty, they plan a revenge operation against the police. The operation goes well, ' 'but then the influence of a legendary mastermind criminal called Keyser S\xf6ze is felt. It becomes ' 'clear that each one of them has wronged S\xf6ze at some point and must pay back now. The payback job ' 'leaves 27 men dead in a boat explosion, but the real question arises now: Who actually is Keyser S\xf6ze?' ), 'Plot outline not parsed correctly' assert 8.0 < parser.score < 9.0, 'Score not parsed correctly' assert parser.url == 'http://www.imdb.com/title/tt0114814/', 'URL not parsed correctly' assert 400000 < parser.votes < 1000000, 'Votes not parsed correctly' assert parser.year == 1995, 'Year not parsed correctly'
def test_parsed_data(self): parser = ImdbParser() parser.parse('tt0114814') assert parser.actors == { 'nm0000592': 'Pete Postlethwaite', 'nm0261452': 'Christine Estabrook', 'nm0000751': 'Suzy Amis', 'nm0000286': 'Stephen Baldwin', 'nm0000445': 'Dan Hedaya', 'nm0800339': 'Phillipe Simon', 'nm0002064': 'Giancarlo Esposito', 'nm0001590': 'Chazz Palminteri', 'nm0000321': 'Gabriel Byrne', 'nm0790436': 'Jack Shearer', 'nm0000228': 'Kevin Spacey', 'nm0001629': 'Kevin Pollak', 'nm0107808': 'Carl Bressler', 'nm0001125': 'Benicio Del Toro', 'nm0000860': 'Paul Bartel' }, 'Actors not parsed correctly' assert parser.directors == {'nm0001741': 'Bryan Singer'}, 'Directors not parsed correctly' assert len(set(parser.genres).intersection([u'crime', u'drama', u'mystery', u'thriller'])) == \ len([u'crime', u'drama', u'mystery', u'thriller']), 'Genres not parsed correctly' assert parser.imdb_id == 'tt0114814', 'ID not parsed correctly' assert len(set(parser.languages).intersection( ['english', 'hungarian', 'spanish', 'french'])) == 4, 'Languages not parsed correctly' assert parser.mpaa_rating == 'R', 'Rating not parsed correctly' assert parser.name == 'The Usual Suspects', 'Name not parsed correctly' assert (parser.photo == 'https://images-na.ssl-images-amazon.com/images/M/MV5BMzI1MjI5MDQyOV5BMl5BanBnXkFtZTcwNzE4Mjg3NA@@.' '_V1_UX182_CR0,0,182,268_AL_.jpg' ), 'Photo not parsed correctly' assert parser.plot_outline == ( 'Following a truck hijack in New York, five conmen are arrested and brought together for questioning. ' 'As none of them are guilty, they plan a revenge operation against the police. The operation goes well, ' 'but then the influence of a legendary mastermind criminal called Keyser S\xf6ze is felt. It becomes ' 'clear that each one of them has wronged S\xf6ze at some point and must pay back now. The payback job ' 'leaves 27 men dead in a boat explosion, but the real question arises now: Who actually is Keyser S\xf6ze?' ), 'Plot outline not parsed correctly' assert 8.0 < parser.score < 9.0, 'Score not parsed correctly' assert parser.url == 'http://www.imdb.com/title/tt0114814/', 'URL not parsed correctly' assert 400000 < parser.votes < 1000000, 'Votes not parsed correctly' assert parser.year == 1995, 'Year not parsed correctly'
def upgrade(ver, session): if ver == 0: # Translate old qualities into new quality requirements movie_table = table_schema('movie_queue', session) for row in session.execute(select([movie_table.c.id, movie_table.c.quality])): # Webdl quality no longer has dash new_qual = row['quality'].replace('web-dl', 'webdl') if new_qual.lower() != 'any': # Old behavior was to get specified quality or greater, approximate that with new system new_qual = ' '.join(qual + '+' for qual in new_qual.split(' ')) session.execute(update(movie_table, movie_table.c.id == row['id'], {'quality': new_qual})) ver = 1 if ver == 1: # Bad upgrade left some qualities as 'ANY+' movie_table = table_schema('movie_queue', session) for row in session.execute(select([movie_table.c.id, movie_table.c.quality])): if row['quality'].lower() == 'any+': session.execute(update(movie_table, movie_table.c.id == row['id'], {'quality': 'ANY'})) ver = 2 if ver == 2: from flexget.utils.imdb import ImdbParser # Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729 movie_table = table_schema('movie_queue', session) queue_base_table = table_schema('queue', session) query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title]) query = query.where(movie_table.c.id == queue_base_table.c.id) for row in session.execute(query): if row['imdb_id'] and (not row['title'] or row['title'] == 'None' or '\n' in row['title']): log.info('Fixing movie_queue title for %s' % row['imdb_id']) parser = ImdbParser() parser.parse(row['imdb_id']) if parser.name: session.execute(update(queue_base_table, queue_base_table.c.id == row['id'], {'title': parser.name})) ver = 3 if ver == 3: # adding queue_name column to movie_queue table and setting initial value to default) table_add_column('movie_queue', 'queue_name', Unicode, session, default='default') ver = 4 return ver
def upgrade(ver, session): if ver == 0: # Translate old qualities into new quality requirements movie_table = table_schema("movie_queue", session) for row in session.execute(select([movie_table.c.id, movie_table.c.quality])): # Webdl quality no longer has dash new_qual = row["quality"].replace("web-dl", "webdl") if new_qual.lower() != "any": # Old behavior was to get specified quality or greater, approximate that with new system new_qual = " ".join(qual + "+" for qual in new_qual.split(" ")) session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": new_qual})) ver = 1 if ver == 1: # Bad upgrade left some qualities as 'ANY+' movie_table = table_schema("movie_queue", session) for row in session.execute(select([movie_table.c.id, movie_table.c.quality])): if row["quality"].lower() == "any+": session.execute(update(movie_table, movie_table.c.id == row["id"], {"quality": "ANY"})) ver = 2 if ver == 2: from flexget.utils.imdb import ImdbParser # Corrupted movie titles may be in the queue due to imdb layout changes. GitHub #729 movie_table = table_schema("movie_queue", session) queue_base_table = table_schema("queue", session) query = select([movie_table.c.id, movie_table.c.imdb_id, queue_base_table.c.title]) query = query.where(movie_table.c.id == queue_base_table.c.id) for row in session.execute(query): if row["imdb_id"] and (not row["title"] or row["title"] == "None" or "\n" in row["title"]): log.info("Fixing movie_queue title for %s" % row["imdb_id"]) parser = ImdbParser() parser.parse(row["imdb_id"]) if parser.name: session.execute( update(queue_base_table, queue_base_table.c.id == row["id"], {"title": parser.name}) ) ver = 3 return ver
def lookup(self, entry, search_allowed=True): """Perform imdb lookup for entry. Raises PluginError with failure reason.""" from flexget.manager import manager if entry.get('imdb_url', lazy=False): log.debug('No title passed. Lookup for %s' % entry['imdb_url']) elif entry.get('imdb_id', lazy=False): log.debug('No title passed. Lookup for %s' % entry['imdb_id']) elif entry.get('title', lazy=False): log.debug('lookup for %s' % entry['title']) else: raise PluginError('looking up IMDB for entry failed, no title, imdb_url or imdb_id passed.') take_a_break = False session = Session() try: # entry sanity checks for field in ['imdb_votes', 'imdb_score']: if entry.get(field, lazy=False): value = entry[field] if not isinstance(value, (int, float)): raise PluginError('Entry field %s should be a number!' % field) # if imdb_id is included, build the url. if entry.get('imdb_id', lazy=False) and not entry.get('imdb_url', lazy=False): entry['imdb_url'] = make_url(entry['imdb_id']) # make sure imdb url is valid if entry.get('imdb_url', lazy=False): imdb_id = extract_id(entry['imdb_url']) if imdb_id: entry['imdb_url'] = make_url(imdb_id) else: log.debug('imdb url %s is invalid, removing it' % entry['imdb_url']) del(entry['imdb_url']) # no imdb_url, check if there is cached result for it or if the # search is known to fail if not entry.get('imdb_url', lazy=False): result = session.query(SearchResult).\ filter(SearchResult.title == entry['title']).first() if result: if result.fails and not manager.options.retry: # this movie cannot be found, not worth trying again ... log.debug('%s will fail lookup' % entry['title']) raise PluginError('Title `%s` lookup fails' % entry['title']) else: if result.url: log.trace('Setting imdb url for %s from db' % entry['title']) entry['imdb_url'] = result.url # no imdb url, but information required, try searching if not entry.get('imdb_url', lazy=False) and search_allowed: log.verbose('Searching from imdb `%s`' % entry['title']) take_a_break = True search = ImdbSearch() search_result = search.smart_match(entry['title']) if search_result: entry['imdb_url'] = search_result['url'] # store url for this movie, so we don't have to search on # every run result = SearchResult(entry['title'], entry['imdb_url']) session.add(result) log.verbose('Found %s' % (entry['imdb_url'])) else: log_once('Imdb lookup failed for %s' % entry['title'], log) # store FAIL for this title result = SearchResult(entry['title']) result.fails = True session.add(result) raise PluginError('Title `%s` lookup failed' % entry['title']) # check if this imdb page has been parsed & cached movie = session.query(Movie).\ options(joinedload_all(Movie.genres, Movie.languages, Movie.actors, Movie.directors)).\ filter(Movie.url == entry['imdb_url']).first() refresh_interval = 2 if movie: if movie.year: age = (datetime.now().year - movie.year) refresh_interval += age * 5 log.debug('cached movie `%s` age %i refresh interval %i days' % (movie.title, age, refresh_interval)) if not movie or movie.updated is None or \ movie.updated < datetime.now() - timedelta(days=refresh_interval): # Remove the old movie, we'll store another one later. session.query(Movie).filter(Movie.url == entry['imdb_url']).delete() # search and store to cache if 'title' in entry: log.verbose('Parsing imdb for `%s`' % entry['title']) else: log.verbose('Parsing imdb for `%s`' % entry['imdb_id']) try: take_a_break = True imdb = ImdbParser() imdb.parse(entry['imdb_url']) # store to database movie = Movie() movie.photo = imdb.photo movie.title = imdb.name movie.score = imdb.score movie.votes = imdb.votes movie.year = imdb.year movie.mpaa_rating = imdb.mpaa_rating movie.plot_outline = imdb.plot_outline movie.url = entry['imdb_url'] for name in imdb.genres: genre = session.query(Genre).\ filter(Genre.name == name).first() if not genre: genre = Genre(name) movie.genres.append(genre) # pylint:disable=E1101 for name in imdb.languages: language = session.query(Language).\ filter(Language.name == name).first() if not language: language = Language(name) movie.languages.append(language) # pylint:disable=E1101 for imdb_id, name in imdb.actors.iteritems(): actor = session.query(Actor).\ filter(Actor.imdb_id == imdb_id).first() if not actor: actor = Actor(imdb_id, name) movie.actors.append(actor) # pylint:disable=E1101 for imdb_id, name in imdb.directors.iteritems(): director = session.query(Director).\ filter(Director.imdb_id == imdb_id).first() if not director: director = Director(imdb_id, name) movie.directors.append(director) # pylint:disable=E1101 # so that we can track how long since we've updated the info later movie.updated = datetime.now() session.add(movie) except UnicodeDecodeError: log.error('Unable to determine encoding for %s. Installing chardet library may help.' % entry['imdb_url']) # store cache so this will not be tried again movie = Movie() movie.url = entry['imdb_url'] session.add(movie) raise PluginError('UnicodeDecodeError') except ValueError, e: # TODO: might be a little too broad catch, what was this for anyway? ;P if manager.options.debug: log.exception(e) raise PluginError('Invalid parameter: %s' % entry['imdb_url'], log) for att in ['title', 'score', 'votes', 'year', 'genres', 'languages', 'actors', 'directors', 'mpaa_rating']: log.trace('movie.%s: %s' % (att, getattr(movie, att))) # store to entry entry.update_using_map(self.field_map, movie) # give imdb a little break between requests (see: http://flexget.com/ticket/129#comment:1) if (take_a_break and not manager.options.debug and not manager.unit_test): import time time.sleep(3)
def add_new_downloaditem_pre(sender, instance, **kwargs): if instance.id is None: from lazy_common import metaparser logger.debug("Adding a new download %s" % instance.ftppath) instance.ftppath = instance.ftppath.strip() #Check if it exists already.. try: existing_obj = DownloadItem.objects.get(ftppath=instance.ftppath) if existing_obj: logger.info("Found existing record %s" % instance.ftppath) if existing_obj.status == DownloadItem.COMPLETE: #its complete... maybe delete it so we can re-add if its older then 2 weeks? curTime = timezone.now() hours = 0 if existing_obj.dateadded is None: hours = 300 else: diff = curTime - existing_obj.dateadded hours = diff.total_seconds() / 60 / 60 if hours > 288: existing_obj.delete() else: raise AlradyExists() else: #lets update it with the new downloaded eps if instance.onlyget is not None: for get_season, get_eps in instance.onlyget.iteritems(): for get_ep in get_eps: existing_obj.add_download(get_season, get_ep) existing_obj.reset() existing_obj.save() raise AlradyExists_Updated(existing_obj) raise AlradyExists_Updated(existing_obj) except ObjectDoesNotExist: pass #Set default status as download queue if instance.status is None: instance.status = 1 #Get section and title if instance.section is None: split = instance.ftppath.split("/") try: section = split[1] title = split[-1] except: raise Exception("Unable to determine section from path %s" % instance.ftppath) if section: instance.section = section else: raise Exception("Unable to determine section from path %s" % instance.ftppath) if title: instance.title = title else: raise Exception("Unable to determine title from path %s" % instance.ftppath) #Figure out the local path if instance.localpath is None: if section == "XVID" or section == "HD": path = settings.MOVIE_PATH_TEMP elif section == "TVHD" or section == "TV": path = settings.TV_PATH_TEMP elif section == "REQUESTS": path = settings.REQUESTS_PATH_TEMP else: raise Exception("Unable to find section path in config: %s" % section) instance.localpath = os.path.join(path, instance.title) instance.parse_title() parser = instance.metaparser() title = None if 'title' in parser.details: title = parser.details['title'] if 'series' in parser.details: title = TVShow.clean_title(parser.details['series']) #Ok now we know its a valid downloaditem lets add it to the db tvdbapi = Tvdb() type = instance.get_type() from lazy_common import metaparser #must be a tvshow if type == metaparser.TYPE_TVSHOW: if instance.tvdbid_id is None: logger.debug("Looks like we are working with a TVShow, lets try find the tvdb object") #We need to try find the series info parser = instance.metaparser() if parser.details and 'series' in parser.details: series_name = TVShow.clean_title(parser.details['series']) #search via database first found = TVShow.find_by_title(series_name) if found: instance.tvdbid_id = found.id else: try: match = tvdbapi[series_name] logger.debug("Show found") instance.tvdbid_id = int(match['id']) if match['imdb_id'] is not None: logger.debug("also found imdbid %s from thetvdb" % match['imdb_id']) instance.imdbid_id = int(match['imdb_id'].lstrip("tt")) except tvdb_shownotfound: logger.exception("Error finding show on thetvdb %s" % series_name) except Exception as e: logger.exception("Error finding : %s via thetvdb.com due to %s" % (series_name, e.message)) else: logger.exception("Unable to parse series info") else: #must be a movie! if instance.imdbid_id is None: logger.debug("Looks like we are working with a Movie") #Lets try find the movie details parser = instance.metaparser() movie_title = parser.details['title'] if 'year' in parser.details: movie_year = parser.details['year'] else: movie_year = None imdbs = ImdbSearch() results = imdbs.best_match(movie_title, movie_year) if results and results['match'] > 0.70: movieObj = ImdbParser() movieObj.parse(results['url']) logger.debug("Found imdb movie id %s" % movieObj.imdb_id) instance.imdbid_id = int(movieObj.imdb_id.lstrip("tt")) else: logger.debug("Didnt find a good enough match on imdb") #Now we have sorted both imdbid and thetvdbid lets sort it all out #If we have a tvdbid do we need to add it to the db or does it exist or ignored? if instance.tvdbid_id is not None and instance.tvdbid_id != "": #Does it already exist? try: if instance.tvdbid: #Do we need to update it curTime = timezone.now() hours = 0 if instance.tvdbid.updated is None: hours = 50 else: diff = curTime - instance.tvdbid.updated hours = diff.total_seconds() / 60 / 60 if hours > 24: try: instance.tvdbid.update_from_tvdb() instance.tvdbid.save() except Exception as e: logger.exception("Error updating TVDB info %s" % e.message) except ObjectDoesNotExist as e: logger.debug("Getting tvdb data for release") new_tvdb_item = TVShow() new_tvdb_item.id = instance.tvdbid_id try: new_tvdb_item.save() except: instance.tvdbid = None pass if instance.tvdbid and instance.tvdbid.ignored: logger.info("Show wont be added as it is marked as ignored") raise Ignored("Show wont be added as it is marked as ignored") #If we have a imdbid do we need to add it to the db or does it exist if instance.imdbid_id is not None and instance.imdbid_id != "": try: if instance.imdbid: #Do we need to update it curTime = timezone.now() imdb_date = instance.imdbid.updated try: if imdb_date: diff = curTime - instance.imdbid.updated hours = diff.total_seconds() / 60 / 60 if hours > 24: instance.imdbid.update_from_imdb() else: instance.imdbid.update_from_imdb() except ObjectDoesNotExist as e: logger.info("Error updating IMDB info as it was not found") except ObjectDoesNotExist as e: logger.debug("Getting IMDB data for release") new_imdb = Movie() new_imdb.id = instance.imdbid_id try: new_imdb.save() except ObjectDoesNotExist: instance.imdbid_id = None if instance.imdbid.ignored: logger.info("Movie wont be added as it is marked as ignored") raise Ignored("Movie cannot be added as it is marked as ignored") if title: logger.info("Looking for existing %s in the queue" % title) type = instance.get_type() if instance.tvdbid_id: #Check if already in queue (maybe this is higher quality or proper). existing_items = [dlitem for dlitem in DownloadItem.objects.all().filter(Q(status=DownloadItem.QUEUE) | Q(status=DownloadItem.DOWNLOADING) | Q(status=DownloadItem.PENDING) | Q(tvdbid_id=instance.tvdbid_id))] elif instance.imdbid_id: #Check if already in queue (maybe this is higher quality or proper). existing_items = [dlitem for dlitem in DownloadItem.objects.all().filter(Q(status=DownloadItem.QUEUE) | Q(status=DownloadItem.DOWNLOADING) | Q(status=DownloadItem.PENDING) | Q(imdbid_id=instance.imdbid_id))] else: existing_items = [dlitem for dlitem in DownloadItem.objects.all().filter(Q(status=DownloadItem.QUEUE) | Q(status=DownloadItem.DOWNLOADING) | Q(status=DownloadItem.PENDING))] for dlitem in existing_items: #If its a tvshow and the tvdbid does not match then skip if type == metaparser.TYPE_TVSHOW and dlitem.tvdbid_id and instance.tvdbid_id: if instance.tvdbid_id != dlitem.tvdbid_id: continue if type == metaparser.TYPE_MOVIE and dlitem.imdbid_id and instance.imdbid_id: if instance.imdbid_id != dlitem.imdbid_id: continue dlitem_title = None dlitem_parser = dlitem.metaparser() if 'title' in dlitem_parser.details: dlitem_title = dlitem_parser.details['title'] if 'series' in dlitem_parser.details: dlitem_title = TVShow.clean_title(dlitem_parser.details['series']) if dlitem_title and dlitem_title.lower() == title.lower(): check = False if parser.type == metaparser.TYPE_TVSHOW: if 'season' in parser.details and 'episode_number' in parser.details and 'season' in dlitem_parser.details and 'episode_number' in dlitem_parser.details: if parser.details['season'] == dlitem_parser.details['season'] and parser.details['episode_number'] == dlitem_parser.details['episode_number']: check = True else: check = True if check: logger.info("Found %s already in queue, lets see what is better quality" % dlitem.title) if dlitem_parser.quality > parser.quality: logger.info("Download already existsin queue with better quality will ignore this one") raise AlradyExists_Updated(dlitem) else: logger.info("Deleting %s from queue as it has a lower quality" % dlitem.title) dlitem.delete()