def getHosts(self): uses = splitString(str(self.conf('use')), clean = False) hosts = splitString(self.conf('host'), clean = False) api_keys = splitString(self.conf('api_key'), clean = False) extra_score = splitString(self.conf('extra_score'), clean = False) custom_tags = splitString(self.conf('custom_tag'), clean = False) list = [] for nr in range(len(hosts)): try: key = api_keys[nr] except: key = '' try: host = hosts[nr] except: host = '' try: score = tryInt(extra_score[nr]) except: score = 0 try: custom_tag = custom_tags[nr] except: custom_tag = '' list.append({ 'use': uses[nr], 'host': host, 'api_key': key, 'extra_score': score, 'custom_tag': custom_tag }) return list
def getFromURL(self, url): log.debug('Getting IMDBs from: %s', url) html = self.getHTMLData(url) try: split = splitString(html, split_on = "<div class=\"list compact\">")[1] html = splitString(split, split_on = "<div class=\"pages\">")[0] except: try: split = splitString(html, split_on = "<div id=\"main\">") if len(split) < 2: log.error('Failed parsing IMDB page "%s", unexpected html.', url) return [] html = BeautifulSoup(split[1]) for x in ['list compact', 'lister', 'list detail sub-list']: html2 = html.find('div', attrs = { 'class': x }) if html2: html = html2.contents html = ''.join([str(x) for x in html]) break except: log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc())) html = ss(html) imdbs = getImdb(html, multiple = True) if html else [] return imdbs
def getIMDBids(self): movies = [] enablers = [tryInt(x) for x in splitString(self.conf("automation_urls_use"))] index = -1 for rss_url in splitString(self.conf("automation_urls")): index += 1 if not enablers[index]: continue rss_movies = self.getRSSData(rss_url, headers={"Referer": ""}) for movie in rss_movies: nameyear = fireEvent("scanner.name_year", self.getTextElement(movie, "title"), single=True) imdb = self.search(nameyear.get("name"), nameyear.get("year"), imdb_only=True) if not imdb: continue movies.append(imdb) return movies
def getMovie(self, url): name = splitString(splitString(url, '/ijw_')[-1], '/')[0] if name.startswith('ijw_'): name = name[4:] year_name = fireEvent('scanner.name_year', name, single = True) return self.search(year_name.get('name'), year_name.get('year'))
def suggestView(self, limit = 6, **kwargs): if self.isDisabled(): return { 'success': True, 'movies': [] } movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return { 'success': True, 'movies': medias }
def getIMDBids(self): movies = [] watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] watchlist_urls = splitString(self.conf('automation_urls')) index = -1 for watchlist_url in watchlist_urls: try: # Get list ID ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url) if len(ids) == 1: watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[0] # Try find user id with watchlist else: userids = re.findall('(ur\d{7,9})', watchlist_url) if len(userids) == 1: watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[0] except: log.error('Failed getting id from watchlist: %s', traceback.format_exc()) index += 1 if not watchlist_enablers[index]: continue start = 0 while True: try: w_url = '%s&start=%s' % (watchlist_url, start) imdbs = self.getFromURL(w_url) for imdb in imdbs: if imdb not in movies: movies.append(imdb) if self.shuttingDown(): break log.debug('Found %s movies on %s', (len(imdbs), w_url)) if len(imdbs) < 225: break start = len(movies) except: log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) break return movies
def charView(self, **kwargs): type = splitString(kwargs.get('type', 'movie')) status = splitString(kwargs.get('status', None)) release_status = splitString(kwargs.get('release_status', None)) chars = self.availableChars(type, status, release_status) return { 'success': True, 'empty': len(chars) == 0, 'chars': chars, }
def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, (str, unicode)): movie = json.loads(movie) except ValueError: log.info('No proper json to decode') return movie_data if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': return movie_data if movie.get('Type').lower() != 'movie': return movie_data tmp_movie = movie.copy() for key in tmp_movie: if tmp_movie.get(key).lower() == 'n/a': del movie[key] year = tryInt(movie.get('Year', '')) movie_data = { 'type': 'movie', 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title'), 'images': { 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], }, 'rating': { 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'mpaa': str(movie.get('Rated', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released'), 'year': year if isinstance(year, int) else None, 'plot': movie.get('Plot'), 'genres': splitString(movie.get('Genre', '')), 'directors': splitString(movie.get('Director', '')), 'writers': splitString(movie.get('Writer', '')), 'actors': splitString(movie.get('Actors', '')), } movie_data = dict((k, v) for k, v in movie_data.items() if v) except: log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) return movie_data
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = removeDuplicate(preferred_words + splitString(movie['category']['preferred'].lower())) except: pass score = nameScore(toUnicode(nzb['name']), movie['info']['year'], preferred_words) for movie_title in movie['info']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie)) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = removeDuplicate(ignored_words + splitString(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def toList(self, log_content = ''): logs_raw = re.split(r'\[0m\n', toUnicode(log_content)) logs = [] re_split = r'\x1b' for log_line in logs_raw: split = re.split(re_split, log_line) if split and len(split) == 3: try: date, time, log_type = splitString(split[0], ' ') timestamp = '%s %s' % (date, time) except: timestamp = 'UNKNOWN' log_type = 'UNKNOWN' message = ''.join(split[1]) if len(split) > 1 else split[0] message = re.sub('\[\d+m\[', '[', message) logs.append({ 'time': timestamp, 'type': log_type, 'message': message }) return logs
def getIMDBids(self): movies = [] urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))])) for url in urls: if not urls[url]: continue rss_movies = self.getRSSData(url) for movie in rss_movies: description = self.getTextElement(movie, 'description') grabs = 0 for item in movie: if item.attrib.get('name') == 'grabs': grabs = item.attrib.get('value') break if int(grabs) > tryInt(self.conf('number_grabs')): title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1) log.info2('%s grabs for movie: %s, enqueue...', (grabs, title)) year = re.match(r'.*Year: (\d{4}).*', description).group(1) imdb = self.search(title, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies
def directories(self): try: if self.conf('library', default = '').strip(): return splitString(self.conf('library', default = ''), '::') except: pass return []
def notify(self, message="", data=None, listener=None): if not data: data = {} hosts = splitString(self.conf("host")) successful = 0 max_successful = 0 for host in hosts: if self.use_json_notifications.get(host) is None: self.getXBMCJSONversion(host, message=message) if self.use_json_notifications.get(host): calls = [ ( "GUI.ShowNotification", None, {"title": self.default_title, "message": message, "image": self.getNotificationImage("small")}, ) ] if data and data.get("destination_dir") and (not self.conf("only_first") or hosts.index(host) == 0): param = {} if not self.conf("force_full_scan") and ( self.conf("remote_dir_scan") or socket.getfqdn("localhost") == socket.getfqdn(host.split(":")[0]) ): param = {"directory": data["destination_dir"]} calls.append(("VideoLibrary.Scan", None, param)) max_successful += len(calls) response = self.request(host, calls) else: response = self.notifyXBMCnoJSON(host, {"title": self.default_title, "message": message}) if data and data.get("destination_dir") and (not self.conf("only_first") or hosts.index(host) == 0): response += self.request(host, [("VideoLibrary.Scan", None, {})]) max_successful += 1 max_successful += 1 try: for result in response: if result.get("result") and result["result"] == "OK": successful += 1 elif result.get("error"): log.error( "XBMC error; %s: %s (%s)", (result["id"], result["error"]["message"], result["error"]["code"]), ) except: log.error("Failed parsing results: %s", traceback.format_exc()) return successful == max_successful
def deleteView(self, id = '', **kwargs): ids = splitString(id) for media_id in ids: self.delete(media_id, delete_from = kwargs.get('delete_from', 'all')) return { 'success': True, }
def containsWords(self, rel_name, rel_words, conf, media): # Make sure it has required words words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower()) try: words = removeDuplicate(words + splitString(media['category'][conf].lower())) except: pass req_match = 0 for req_set in words: if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//': if re.search(req_set[1:-1], rel_name): log.debug('Regex match: %s', req_set[1:-1]) req_match += 1 else: req = splitString(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) return words, req_match > 0
def getIMDBids(self): movies = [] enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] urls = splitString(self.conf('automation_urls')) namespace = 'http://www.w3.org/2005/Atom' namespace_im = 'http://itunes.apple.com/rss' index = -1 for url in urls: index += 1 if len(enablers) == 0 or len(enablers) < index or not enablers[index]: continue try: cache_key = 'itunes.rss.%s' % md5(url) rss_data = self.getCache(cache_key, url) data = XMLTree.fromstring(rss_data) if data is not None: entry_tag = str(QName(namespace, 'entry')) rss_movies = self.getElements(data, entry_tag) for movie in rss_movies: name_tag = str(QName(namespace_im, 'name')) name = self.getTextElement(movie, name_tag) releaseDate_tag = str(QName(namespace_im, 'releaseDate')) releaseDateText = self.getTextElement(movie, releaseDate_tag) year = datetime.datetime.strptime(releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y") imdb = self.search(name, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) except: log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc())) return movies
def listView(self, **kwargs): total_movies, movies = self.list( types = splitString(kwargs.get('type')), status = splitString(kwargs.get('status')), release_status = splitString(kwargs.get('release_status')), status_or = kwargs.get('status_or') is not None, limit_offset = kwargs.get('limit_offset'), with_tags = splitString(kwargs.get('with_tags')), starts_with = kwargs.get('starts_with'), search = kwargs.get('search') ) return { 'success': True, 'empty': len(movies) == 0, 'total': total_movies, 'movies': movies, }
def migrate(self): url = self.conf('url') if url: host_split = splitString(url.split('://')[-1], split_on = '/') self.conf('ssl', value = url.startswith('https')) self.conf('host', value = host_split[0].strip()) self.conf('rpc_url', value = '/'.join(host_split[1:])) self.deleteConf('url')
def ignoreView(self, imdb = None, **kwargs): ignored = splitString(Env.prop('charts_ignore', default = '')) if imdb: ignored.append(imdb) Env.prop('charts_ignore', ','.join(set(ignored))) return { 'result': True }
def automationView(self, force_update = False, **kwargs): db = get_db() charts = fireEvent('automation.get_chart_list', merge = True) ignored = splitString(Env.prop('charts_ignore', default = '')) # Create a list the movie/list.js can use for chart in charts: medias = [] for media in chart.get('list', []): identifier = media.get('imdb') if identifier in ignored: continue try: try: in_library = db.get('media', 'imdb-%s' % identifier) if in_library: continue except RecordNotFound: pass except: pass # Cache poster posters = media.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'chart', 'title': getTitle(media), 'type': 'movie', 'info': media, 'files': files, 'identifiers': { 'imdb': identifier } }) chart['list'] = medias return { 'success': True, 'count': len(charts), 'charts': charts, 'ignored': ignored, }
def notify(self, message = '', data = None, listener = None): if not data: data = {} # Extract all the settings from settings from_address = self.conf('from') to_address = self.conf('to') ssl = self.conf('ssl') smtp_server = self.conf('smtp_server') smtp_user = self.conf('smtp_user') smtp_pass = self.conf('smtp_pass') smtp_port = self.conf('smtp_port') starttls = self.conf('starttls') # Make the basic message email = MIMEText(toUnicode(message), _charset = Env.get('encoding')) email['Subject'] = '%s: %s' % (self.default_title, toUnicode(message)) email['From'] = from_address email['To'] = to_address email['Date'] = formatdate(localtime = 1) email['Message-ID'] = make_msgid() try: # Open the SMTP connection, via SSL if requested log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port)) log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled")) mailserver = smtplib.SMTP_SSL(smtp_server, smtp_port) if ssl == 1 else smtplib.SMTP(smtp_server, smtp_port) if starttls: log.debug("Using StartTLS to initiate the connection with the SMTP server") mailserver.starttls() # Say hello to the server mailserver.ehlo() # Check too see if an login attempt should be attempted if len(smtp_user) > 0: log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else "")) mailserver.login(smtp_user, smtp_pass) # Send the e-mail log.debug("Sending the email") mailserver.sendmail(from_address, splitString(to_address), email.as_string()) # Close the SMTP connection mailserver.quit() log.info('Email notification sent') return True except: log.error('E-mail failed: %s', traceback.format_exc()) return False
def getWatchlist(self): enablers = [tryInt(x) for x in splitString(self.conf('automation_ids_use'))] ids = splitString(self.conf('automation_ids')) index = -1 movies = [] for user_id in ids: index += 1 if not enablers[index]: continue data = self.getJsonData(self.url % user_id, decode_from = 'iso-8859-1') for movie in data: movies.append({ 'title': movie['movie']['title'], 'year': movie['movie']['year'] }) return movies
def getIMDBids(self): ids = splitString(self.conf('automation_ids')) if len(ids) == 0: return [] movies = [] for movie in self.getWatchlist(): imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) movies.append(imdb_id) return movies
def isMinimalMovie(self, movie): if not movie.get('rating'): log.info('ignoring %s as no rating is available for.', (movie['original_title'])) return False if movie['rating'] and movie['rating'].get('imdb'): movie['votes'] = movie['rating']['imdb'][1] movie['rating'] = movie['rating']['imdb'][0] for minimal_type in ['year', 'rating', 'votes']: type_value = movie.get(minimal_type, 0) type_min = self.getMinimal(minimal_type) if type_value < type_min: log.info('%s too low for %s, need %s has %s', (minimal_type, movie['original_title'], type_min, type_value)) return False movie_genres = [genre.lower() for genre in movie['genres']] required_genres = splitString(self.getMinimal('required_genres').lower()) ignored_genres = splitString(self.getMinimal('ignored_genres').lower()) req_match = 0 for req_set in required_genres: req = splitString(req_set, '&') req_match += len(list(set(movie_genres) & set(req))) == len(req) if self.getMinimal('required_genres') and req_match == 0: log.info2('Required genre(s) missing for %s', movie['original_title']) return False for ign_set in ignored_genres: ign = splitString(ign_set, '&') if len(list(set(movie_genres) & set(ign))) == len(ign): log.info2('%s has blacklisted genre(s): %s', (movie['original_title'], ign)) return False return True
def ignoreView(self, imdb = None, limit = 6, remove_only = False, mark_seen = False, **kwargs): ignored = splitString(Env.prop('suggest_ignore', default = '')) seen = splitString(Env.prop('suggest_seen', default = '')) new_suggestions = [] if imdb: if mark_seen: seen.append(imdb) Env.prop('suggest_seen', ','.join(set(seen))) elif not remove_only: ignored.append(imdb) Env.prop('suggest_ignore', ','.join(set(ignored))) new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored, seen = seen) if len(new_suggestions) <= limit: return { 'result': False } # Only return new (last) item media = { 'status': 'suggested', 'title': getTitle(new_suggestions[limit]), 'type': 'movie', 'info': new_suggestions[limit], 'identifiers': { 'imdb': new_suggestions[limit].get('imdb') } } return { 'result': True, 'movie': media }
def getWatchlist(self): enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] urls = splitString(self.conf('automation_urls')) index = -1 movies = [] for username in urls: index += 1 if not enablers[index]: continue soup = BeautifulSoup(self.getHTMLData(self.url % username)) for movie in soup.find_all('li', attrs = {'class': 'poster-container'}): img = movie.find('img', movie) title = img.get('alt') movies.append({ 'title': title }) return movies
def refresh(self, id = '', **kwargs): handlers = [] ids = splitString(id) for x in ids: refresh_handler = self.createRefreshHandler(x) if refresh_handler: handlers.append(refresh_handler) fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids}) fireEventAsync('schedule.queue', handlers = handlers) return { 'success': True, }
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder identifier = os.path.splitext(identifier)[0] # ext # Exclude file name path if needed (f.e. for DVD files) if exclude_filename: identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] # Make sure the identifier is lower case as all regex is with lower case tags identifier = identifier.lower() try: path_split = splitString(identifier, os.path.sep) identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename except: pass # multipart identifier = self.removeMultipart(identifier) # remove cptag identifier = self.removeCPTag(identifier) # simplify the string identifier = simplifyString(identifier) year = self.findYear(file_path) # groups, release tags, scenename cleaner identifier = re.sub(self.clean, '::', identifier).strip(':') # Year if year and identifier[:4] != year: split_by = ':::' if ':::' in identifier else year identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year) else: identifier = identifier.split('::')[0] # Remove duplicates out = [] for word in identifier.split(): if not word in out: out.append(word) identifier = ' '.join(out) return simplifyString(identifier)
def _searchOnTitle(self, title, media, quality, results): search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search'] # Create search parameters search_params = self.buildUrl(title, media, quality) smin = quality.get('size_min') smax = quality.get('size_max') if smin and smax: search_params += ' size %sm - %sm' % (smin, smax) min_seeds = tryInt(self.conf('minimal_seeds')) if min_seeds: search_params += ' seed > %s' % (min_seeds - 1) rss_data = self.getRSSData(search_url % search_params) if rss_data: try: for result in rss_data: name = self.getTextElement(result, 'title') detail_url = self.getTextElement(result, 'link') description = self.getTextElement(result, 'description') magnet = splitString(detail_url, '/')[-1] magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce')) reg = re.search('Size: (?P<size>\d+) MB Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description)) size = reg.group('size') seeds = reg.group('seeds').replace(',', '') peers = reg.group('peers').replace(',', '') results.append({ 'id': magnet, 'name': six.text_type(name), 'url': magnet_url, 'detail_url': detail_url, 'size': tryInt(size), 'seeders': tryInt(seeds), 'leechers': tryInt(peers), }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def edit(self, id = '', **kwargs): try: db = get_db() ids = splitString(id) for media_id in ids: try: m = db.get('id', media_id) m['profile_id'] = kwargs.get('profile_id') or m['profile_id'] cat_id = kwargs.get('category_id') if cat_id is not None: m['category_id'] = cat_id if len(cat_id) > 0 else m['category_id'] # Remove releases for rel in fireEvent('release.for_media', m['_id'], single = True): if rel['status'] is 'available': db.delete(rel) # Default title if kwargs.get('default_title'): m['title'] = kwargs.get('default_title') db.update(m) fireEvent('media.restatus', m['_id'], single = True) m = db.get('id', media_id) movie_dict = fireEvent('media.get', m['_id'], single = True) fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id)) except: print traceback.format_exc() log.error('Can\'t edit non-existing media') return { 'success': True, } except: log.error('Failed editing media: %s', traceback.format_exc()) return { 'success': False, }