def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = removeDuplicate(preferred_words + splitString(movie['category']['preferred'].lower())) except: pass score = nameScore(toUnicode(nzb['name']), movie['info']['year'], preferred_words) for movie_title in movie['info']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie)) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = removeDuplicate(ignored_words + splitString(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def findViaAlternative(self, group): results = {"480p": [], "720p": [], "1080p": []} movie_name = getTitle(group) url = "%s?%s" % (self.urls["backup"], tryUrlencode({"s": movie_name})) try: data = self.getCache("hdtrailers.alt.%s" % getIdentifier(group), url, show_error=False) except HTTPError: log.debug("No alternative page found for: %s", movie_name) data = None if not data: return results try: html = BeautifulSoup(data, parse_only=self.only_tables_tags) result_table = html.find_all("h2", text=re.compile(movie_name)) for h2 in result_table: if "trailer" in h2.lower(): parent = h2.parent.parent.parent trailerLinks = parent.find_all("a", text=re.compile("480p|720p|1080p")) try: for trailer in trailerLinks: results[trailer].insert(0, trailer.parent["href"]) except: pass except AttributeError: log.debug("No trailers found in via alternative.") return results
def searchSingle(self, message = None, group = None): if not group: group = {} if self.isDisabled() or len(group['files']['trailer']) > 0: return trailers = fireEvent('trailer.search', group = group, merge = True) if not trailers or trailers == []: log.info('No trailers found for: %s', getTitle(group)) return False for trailer in trailers.get(self.conf('quality'), []): ext = getExt(trailer) filename = self.conf('name').replace('<filename>', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) destination = os.path.join(group['destination_dir'], filename) if not os.path.isfile(destination): trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) if trailer_file and os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one os.unlink(trailer_file) continue else: log.debug('Trailer already exists: %s', destination) group['renamed_files'].append(destination) # Download first and break break return True
def notify(self, message = '', data = None, listener = None): if not data: data = {} api_data = { 'user': self.conf('user_key'), 'token': self.conf('api_token'), 'message': toUnicode(message), 'priority': self.conf('priority'), 'sound': self.conf('sound'), } if data and getIdentifier(data): api_data.update({ 'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)), 'url_title': toUnicode('%s on IMDb' % getTitle(data)), }) try: data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'), headers = {'Content-type': 'application/x-www-form-urlencoded'}, data = api_data) log.info2('Pushover responded with: %s', data) return True except: return False
def search(self, group): movie_name = getTitle(group) url = self.urls["api"] % self.movieUrlName(movie_name) try: data = self.getCache("hdtrailers.%s" % getIdentifier(group), url, show_error=False) except HTTPError: log.debug("No page found for: %s", movie_name) data = None result_data = {"480p": [], "720p": [], "1080p": []} if not data: return result_data did_alternative = False for provider in self.providers: results = self.findByProvider(data, provider) # Find alternative if results.get("404") and not did_alternative: results = self.findViaAlternative(group) did_alternative = True result_data = mergeDicts(result_data, results) return result_data
def suggestView(self, limit = 6, **kwargs): if self.isDisabled(): return { 'success': True, 'movies': [] } movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return { 'success': True, 'movies': medias }
def automationView(self, force_update = False, **kwargs): db = get_db() charts = fireEvent('automation.get_chart_list', merge = True) ignored = splitString(Env.prop('charts_ignore', default = '')) # Create a list the movie/list.js can use for chart in charts: medias = [] for media in chart.get('list', []): identifier = media.get('imdb') if identifier in ignored: continue try: try: in_library = db.get('media', 'imdb-%s' % identifier) if in_library: continue except RecordNotFound: pass except: pass # Cache poster posters = media.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'chart', 'title': getTitle(media), 'type': 'movie', 'info': media, 'files': files, 'identifiers': { 'imdb': identifier } }) chart['list'] = medias return { 'success': True, 'count': len(charts), 'charts': charts, 'ignored': ignored, }
def notifyXBMCnoJSON(self, host, data): server = "http://%s/xbmcCmds/" % host # Notification(title, message [, timeout , image]) cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % ( urllib.quote(getTitle(data)), urllib.quote(data["message"]), urllib.quote(self.getNotificationImage("medium")), ) server += cmd # I have no idea what to set to, just tried text/plain and seems to be working :) headers = {"Content-Type": "text/plain"} # authentication support if self.conf("password"): base64string = base64.encodestring("%s:%s" % (self.conf("username"), self.conf("password"))).replace( "\n", "" ) headers["Authorization"] = "Basic %s" % base64string try: log.debug("Sending non-JSON-type request to %s: %s", (host, data)) # response wil either be 'OK': # <html> # <li>OK # </html> # # or 'Error': # <html> # <li>Error:<message> # </html> # response = self.urlopen(server, headers=headers, timeout=3, show_error=False) if "OK" in response: log.debug("Returned from non-JSON-type request %s: %s", (host, response)) # manually fake expected response array return [{"result": "OK"}] else: log.error("Returned from non-JSON-type request %s: %s", (host, response)) # manually fake expected response array return [{"result": "Error"}] except (MaxRetryError, Timeout, ConnectionError): log.info2("Couldn't send request to Kodi, assuming it's turned off") return [{"result": "Error"}] except: log.error("Failed sending non-JSON-type request to Kodi: %s", traceback.format_exc()) return [{"result": "Error"}]
def query(self, media, first = True, include_year = True, **kwargs): if media.get('type') != 'movie': return default_title = getTitle(media) titles = media['info'].get('titles', []) titles.insert(0, default_title) # Add year identifier to titles if include_year: titles = [title + (' %s' % str(media['info']['year'])) for title in titles] if first: return titles[0] if titles else None return titles
def restatus(self, media_id, tag_recent = True, allowed_restatus = None): try: db = get_db() m = db.get('id', media_id) previous_status = m['status'] log.debug('Changing status for %s', getTitle(m)) if not m['profile_id']: m['status'] = 'done' else: m['status'] = 'active' try: profile = db.get('id', m['profile_id']) media_releases = fireEvent('release.for_media', m['_id'], single = True) done_releases = [release for release in media_releases if release.get('status') == 'done'] if done_releases: # Check if we are finished with the media for release in done_releases: if fireEvent('quality.isfinish', {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, timedelta(seconds = time.time() - release['last_edit']).days, single = True): m['status'] = 'done' break elif previous_status == 'done': m['status'] = 'done' except RecordNotFound: log.debug('Failed restatus, keeping previous: %s', traceback.format_exc()) m['status'] = previous_status # Only update when status has changed if previous_status != m['status'] and (not allowed_restatus or m['status'] in allowed_restatus): db.update(m) # Tag media as recent if tag_recent: self.tag(media_id, 'recent', update_edited = True) return m['status'] except: log.error('Failed restatus: %s', traceback.format_exc())
def tryNextRelease(self, media_id, manual = False, force_download = False): try: rels = fireEvent('release.for_media', media_id, single = True) for rel in rels: if rel.get('status') in ['snatched', 'done']: fireEvent('release.update_status', rel.get('_id'), status = 'ignored') media = fireEvent('media.get', media_id, single = True) if media: log.info('Trying next release for: %s', getTitle(media)) self.single(media, manual = manual, force_download = force_download) return True return False except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False
def ignoreView(self, imdb = None, limit = 6, remove_only = False, mark_seen = False, **kwargs): ignored = splitString(Env.prop('suggest_ignore', default = '')) seen = splitString(Env.prop('suggest_seen', default = '')) new_suggestions = [] if imdb: if mark_seen: seen.append(imdb) Env.prop('suggest_seen', ','.join(set(seen))) elif not remove_only: ignored.append(imdb) Env.prop('suggest_ignore', ','.join(set(ignored))) new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored, seen = seen) if len(new_suggestions) <= limit: return { 'result': False } # Only return new (last) item media = { 'status': 'suggested', 'title': getTitle(new_suggestions[limit]), 'type': 'movie', 'info': new_suggestions[limit], 'identifiers': { 'imdb': new_suggestions[limit].get('imdb') } } return { 'result': True, 'movie': media }
def single(self, movie, search_protocols = None, manual = False, force_download = False): # Find out search type try: if not search_protocols: search_protocols = fireEvent('searcher.protocols', single = True) except SearchSetupError: return if not movie['profile_id'] or (movie['status'] == 'done' and not manual): log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') fireEvent('media.restatus', movie['_id'], single = True) return default_title = getTitle(movie) if not default_title: log.error('No proper info found for movie, removing it from library to stop it from causing more issues.') fireEvent('media.delete', movie['_id'], single = True) return # Update media status and check if it is still not done (due to the stop searching after feature if fireEvent('media.restatus', movie['_id'], single = True) == 'done': log.debug('No better quality found, marking movie %s as done.', default_title) pre_releases = fireEvent('quality.pre_releases', single = True) release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True) found_releases = [] previous_releases = movie.get('releases', []) too_early_to_search = [] outside_eta_results = 0 always_search = self.conf('always_search') ignore_eta = manual total_result_count = 0 fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title) # Ignore eta once every 7 days if not always_search: prop_name = 'last_ignored_eta.%s' % movie['_id'] last_ignored_eta = float(Env.prop(prop_name, default = 0)) if last_ignored_eta < time.time() - 604800: ignore_eta = True Env.prop(prop_name, value = time.time()) db = get_db() profile = db.get('id', movie['profile_id']) ret = False for index, q_identifier in enumerate(profile.get('qualities', [])): quality_custom = { 'index': index, 'quality': q_identifier, 'finish': profile['finish'][index], 'wait_for': tryInt(profile['wait_for'][index]), '3d': profile['3d'][index] if profile.get('3d') else False, 'minimum_score': profile.get('minimum_score', 1), } could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']) if not always_search and could_not_be_released: too_early_to_search.append(q_identifier) # Skip release, if ETA isn't ignored if not ignore_eta: continue has_better_quality = 0 # See if better quality is available for release in movie.get('releases', []): if release['status'] not in ['available', 'ignored', 'failed']: is_higher = fireEvent('quality.ishigher', \ {'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \ {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \ profile, single = True) if is_higher != 'higher': has_better_quality += 1 # Don't search for quality lower then already available. if has_better_quality > 0: log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title)) fireEvent('media.restatus', movie['_id'], single = True) break quality = fireEvent('quality.single', identifier = q_identifier, single = True) log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if always_search or ignore_eta else '')) # Extend quality with profile customs quality['custom'] = quality_custom results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] # Check if movie isn't deleted while searching if not fireEvent('media.get', movie.get('_id'), single = True): break # Add them to this movie releases list found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True) results_count = len(found_releases) total_result_count += results_count if results_count == 0: log.debug('Nothing found for %s in %s', (default_title, quality['label'])) # Keep track of releases found outside ETA window outside_eta_results += results_count if could_not_be_released else 0 # Don't trigger download, but notify user of available releases if could_not_be_released and results_count > 0: log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) # Try find a valid result and download it if (force_download or not could_not_be_released or always_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True): ret = True # Remove releases that aren't found anymore temp_previous_releases = [] for release in previous_releases: if release.get('status') == 'available' and release.get('identifier') not in found_releases: fireEvent('release.delete', release.get('_id'), single = True) else: temp_previous_releases.append(release) previous_releases = temp_previous_releases del temp_previous_releases # Break if CP wants to shut down if self.shuttingDown() or ret: break if total_result_count > 0: fireEvent('media.tag', movie['_id'], 'recent', update_edited = True, single = True) if len(too_early_to_search) > 0: log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) if outside_eta_results > 0: message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % (outside_eta_results, default_title) log.info(message) if not manual: fireEvent('media.available', message = message, data = {}) fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']}) return ret
def _search(self, media, quality, results): movie_title = getTitle(media) quality_id = quality['identifier'] params = mergeDicts(self.quality_search_params[quality_id].copy(), { 'order_by': 'relevance', 'order_way': 'descending', 'searchstr': getIdentifier(media) }) url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params)) res = self.getJsonData(url) try: if not 'Movies' in res: return authkey = res['AuthKey'] passkey = res['PassKey'] for ptpmovie in res['Movies']: if not 'Torrents' in ptpmovie: log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year'])) continue log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents']))) for torrent in ptpmovie['Torrents']: torrent_id = tryInt(torrent['Id']) torrentdesc = '%s %s %s' % (torrent['Resolution'], torrent['Source'], torrent['Codec']) torrentscore = 0 if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']: torrentdesc += ' HQ' if self.conf('prefer_golden'): torrentscore += 5000 if 'FreeleechType' in torrent: torrentdesc += ' Freeleech' if self.conf('prefer_freeleech'): torrentscore += 7000 if 'Scene' in torrent and torrent['Scene']: torrentdesc += ' Scene' if self.conf('prefer_scene'): torrentscore += 2000 if 'RemasterTitle' in torrent and torrent['RemasterTitle']: torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle']) torrentdesc += ' (%s)' % quality_id torrent_name = re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) - %s' % (movie_title, ptpmovie['Year'], torrentdesc)) def extra_check(item): return self.torrentMeetsQualitySpec(item, quality_id) results.append({ 'id': torrent_id, 'name': torrent_name, 'Source': torrent['Source'], 'Checked': 'true' if torrent['Checked'] else 'false', 'Resolution': torrent['Resolution'], 'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey), 'detail_url': self.urls['detail'] % torrent_id, 'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())), 'size': tryInt(torrent['Size']) / (1024 * 1024), 'seeders': tryInt(torrent['Seeders']), 'leechers': tryInt(torrent['Leechers']), 'score': torrentscore, 'extra_check': extra_check, }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None): if not params: params = {} # Make sure it's a correct zero filled imdb id params['identifier'] = getImdb(params.get('identifier', '')) if not params.get('identifier'): msg = 'Can\'t add movie without imdb identifier.' log.error(msg) fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) return False elif not params.get('info'): try: is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True) if not is_movie: msg = 'Can\'t add movie, seems to be a TV show.' log.error(msg) fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) return False except: pass info = params.get('info') if not info or (info and len(info.get('titles', [])) == 0): info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier')) # Allow force re-add overwrite from param if 'force_readd' in params: fra = params.get('force_readd') force_readd = fra.lower() not in ['0', '-1'] if not isinstance(fra, bool) else fra # Set default title def_title = self.getDefaultTitle(info) # Default profile and category default_profile = {} if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False): default_profile = fireEvent('profile.default', single = True) cat_id = params.get('category_id') try: db = get_db() media = { '_t': 'media', 'type': 'movie', 'title': def_title, 'identifiers': { 'imdb': params.get('identifier') }, 'status': status if status else 'active', 'profile_id': params.get('profile_id') or default_profile.get('_id'), 'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None, } # Update movie info try: del info['in_wanted'] except: pass try: del info['in_library'] except: pass media['info'] = info new = False previous_profile = None try: m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc'] try: db.get('id', m.get('profile_id')) previous_profile = m.get('profile_id') except RecordNotFound: pass except: log.error('Failed getting previous profile: %s', traceback.format_exc()) except: new = True m = db.insert(media) # Update dict to be usable m.update(media) added = True do_search = False search_after = search_after and self.conf('search_on_add', section = 'moviesearcher') onComplete = None if new: if search_after: onComplete = self.createOnComplete(m['_id']) search_after = False elif force_readd: # Clean snatched history for release in fireEvent('release.for_media', m['_id'], single = True): if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']: if params.get('ignore_previous', False): fireEvent('release.update_status', release['_id'], status = 'ignored') else: fireEvent('release.delete', release['_id'], single = True) m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None) m['last_edit'] = int(time.time()) m['tags'] = [] do_search = True db.update(m) else: try: del params['info'] except: pass log.debug('Movie already exists, not updating: %s', params) added = False # Trigger update info if added and update_after: # Do full update to get images etc fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete) # Remove releases for rel in fireEvent('release.for_media', m['_id'], single = True): if rel['status'] is 'available': db.delete(rel) movie_dict = fireEvent('media.get', m['_id'], single = True) if not movie_dict: log.debug('Failed adding media, can\'t find it anymore') return False if do_search and search_after: onComplete = self.createOnComplete(m['_id']) onComplete() if added and notify_after: if params.get('title'): message = 'Successfully added "%s" to your wanted list.' % params.get('title', '') else: title = getTitle(m) if title: message = 'Successfully added "%s" to your wanted list.' % title else: message = 'Successfully added to your wanted list.' fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message) return movie_dict except: log.error('Failed adding media: %s', traceback.format_exc())
def getNfo(self, movie_info=None, data=None, i=0): if not data: data = {} if not movie_info: movie_info = {} nfoxml = Element('details') # Title try: el = SubElement(nfoxml, 'title') el.text = toUnicode(getTitle(data)) except: pass # IMDB id try: el = SubElement(nfoxml, 'id') el.text = toUnicode(data['identifier']) except: pass # Runtime try: runtime = SubElement(nfoxml, 'runtime') runtime.text = '%s min' % movie_info.get('runtime') except: pass # Other values types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released'] for type in types: if ':' in type: name, type = type.split(':') else: name = type try: if movie_info.get(type): el = SubElement(nfoxml, name) el.text = toUnicode(movie_info.get(type, '')) except: pass # Rating for rating_type in ['imdb', 'rotten', 'tmdb']: try: r, v = movie_info['rating'][rating_type] rating = SubElement(nfoxml, 'rating') rating.text = str(r) votes = SubElement(nfoxml, 'votes') votes.text = str(v) break except: log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) # Genre for genre in movie_info.get('genres', []): genres = SubElement(nfoxml, 'genre') genres.text = toUnicode(genre) # Actors for actor_name in movie_info.get('actor_roles', {}): role_name = movie_info['actor_roles'][actor_name] actor = SubElement(nfoxml, 'actor') name = SubElement(actor, 'name') name.text = toUnicode(actor_name) if role_name: role = SubElement(actor, 'role') role.text = toUnicode(role_name) if movie_info['images']['actors'].get(actor_name): thumb = SubElement(actor, 'thumb') thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name)) # Directors for director_name in movie_info.get('directors', []): director = SubElement(nfoxml, 'director') director.text = toUnicode(director_name) # Writers for writer in movie_info.get('writers', []): writers = SubElement(nfoxml, 'credits') writers.text = toUnicode(writer) # Sets or collections collection_name = movie_info.get('collection') if collection_name: collection = SubElement(nfoxml, 'set') collection.text = toUnicode(collection_name) sorttitle = SubElement(nfoxml, 'sorttitle') sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year')) # Images for image_url in movie_info['images']['poster_original']: image = SubElement(nfoxml, 'thumb') image.text = toUnicode(image_url) image_types = [ ('fanart', 'backdrop_original'), ('banner', 'banner'), ('discart', 'disc_art'), ('logo', 'logo'), ('clearart', 'clear_art'), ('landscape', 'landscape'), ('extrathumb', 'extra_thumbs'), ('extrafanart', 'extra_fanart'), ] for image_type in image_types: sub, type = image_type sub_element = SubElement(nfoxml, sub) for image_url in movie_info['images'][type]: image = SubElement(sub_element, 'thumb') image.text = toUnicode(image_url) # Add trailer if found trailer_found = False if data.get('renamed_files'): for filename in data.get('renamed_files'): if 'trailer' in filename: trailer = SubElement(nfoxml, 'trailer') trailer.text = toUnicode(filename) trailer_found = True if not trailer_found and data['files'].get('trailer'): trailer = SubElement(nfoxml, 'trailer') trailer.text = toUnicode(data['files']['trailer'][0]) # Add file metadata fileinfo = SubElement(nfoxml, 'fileinfo') streamdetails = SubElement(fileinfo, 'streamdetails') # Video data if data['meta_data'].get('video'): video = SubElement(streamdetails, 'video') codec = SubElement(video, 'codec') codec.text = toUnicode(data['meta_data']['video']) aspect = SubElement(video, 'aspect') aspect.text = str(data['meta_data']['aspect']) width = SubElement(video, 'width') width.text = str(data['meta_data']['resolution_width']) height = SubElement(video, 'height') height.text = str(data['meta_data']['resolution_height']) # Audio data if data['meta_data'].get('audio'): audio = SubElement(streamdetails, 'audio') codec = SubElement(audio, 'codec') codec.text = toUnicode(data['meta_data'].get('audio')) channels = SubElement(audio, 'channels') channels.text = toUnicode(data['meta_data'].get('audio_channels')) # Clean up the xml and return it nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) xml_string = nfoxml.toprettyxml(indent = ' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) xml_string = text_re.sub('>\g<1></', xml_string) return xml_string.encode('utf-8')
def getSearchTitle(self, media): if media['type'] == 'movie': return getTitle(media)
def download(self, data, media, manual = False): # Test to see if any downloaders are enabled for this type downloader_enabled = fireEvent('download.enabled', manual, data, single = True) if not downloader_enabled: log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol')) return False # Download NZB or torrent file filedata = None if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): try: filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) except: log.error('Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc())) return False if filedata == 'try_next': return filedata elif not filedata: return False # Send NZB or torrent file to downloader download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True) if not download_result: log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol')) return False log.debug('Downloader result: %s', download_result) try: db = get_db() try: rls = db.get('release_identifier', md5(data['url']), with_doc = True)['doc'] except: log.error('No release found to store download information in') return False renamer_enabled = Env.setting('enabled', 'renamer') # Save download-id info if returned if isinstance(download_result, dict): rls['download_info'] = download_result db.update(rls) log_movie = '%s (%s) in %s' % (getTitle(media), media['info'].get('year'), rls['quality']) snatch_message = 'Snatched "%s": %s from %s' % (data.get('name'), log_movie, (data.get('provider', '') + data.get('provider_extra', ''))) log.info(snatch_message) fireEvent('%s.snatched' % data['type'], message = snatch_message, data = media) # Mark release as snatched if renamer_enabled: self.updateStatus(rls['_id'], status = 'snatched') # If renamer isn't used, mark media done if finished or release downloaded else: if media['status'] == 'active': profile = db.get('id', media['profile_id']) if fireEvent('quality.isfinish', {'identifier': rls['quality'], 'is_3d': rls.get('is_3d', False)}, profile, single = True): log.info('Renamer disabled, marking media as finished: %s', log_movie) # Mark release done self.updateStatus(rls['_id'], status = 'done') # Mark media done fireEvent('media.restatus', media['_id'], single = True) return True # Assume release downloaded self.updateStatus(rls['_id'], status = 'downloaded') except: log.error('Failed storing download status: %s', traceback.format_exc()) return False return True
def afterUpdate(): if not self.in_progress or self.shuttingDown(): return total = self.in_progress[folder]['total'] movie_dict = fireEvent('media.get', identifier, single = True) if movie_dict: fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict))