def createBaseUrl(self): host = Env.setting("host") if host == "0.0.0.0" or host == "": host = "localhost" port = Env.setting("port") return "%s:%d%s" % (cleanHost(host).rstrip("/"), int(port), Env.get("web_base"))
def createBaseUrl(self): host = Env.setting('host') if host == '0.0.0.0': host = 'localhost' port = Env.setting('port') return '%s:%d%s' % (cleanHost(host).rstrip('/'), int(port), '/' + Env.setting('url_base').lstrip('/') if Env.setting('url_base') else '')
def createBaseUrl(self): host = Env.setting('host') if host == '0.0.0.0' or host == '': host = 'localhost' port = Env.setting('port') return '%s:%d%s' % (cleanHost(host).rstrip('/'), int(port), Env.get('web_base'))
def createBaseUrl(self): host = Env.setting('host') if host == '0.0.0.0': host = 'localhost' port = Env.setting('port') return '%s:%d' % (cleanHost(host).rstrip('/'), int(port))
def decorated(*args, **kwargs): auth = getattr(request, 'authorization') if Env.setting('username') and Env.setting('password'): if (not auth or not check_auth(auth.username.decode('latin1'), md5(auth.password.decode('latin1').encode(Env.get('encoding'))))): return authenticate() return f(*args, **kwargs)
def getCredentials(self, key): request_token = { 'oauth_token': self.conf('username'), 'oauth_token_secret': self.conf('password'), 'oauth_callback_confirmed': True } token = oauth2.Token(request_token['oauth_token'], request_token['oauth_token_secret']) token.set_verifier(key) log.info('Generating and signing request for an access token using key: %s' % key) oauth_consumer = oauth2.Consumer(key = self.consumer_key, secret = self.consumer_secret) oauth_client = oauth2.Client(oauth_consumer, token) resp, content = oauth_client.request(self.url['access'], method = 'POST', body = 'oauth_verifier=%s' % key) access_token = dict(parse_qsl(content)) if resp['status'] != '200': log.error('The request for an access token did not succeed: ' + str(resp['status'])) return False else: log.info('Your Twitter access token is %s' % access_token['oauth_token']) log.info('Access token secret is %s' % access_token['oauth_token_secret']) Env.setting('username', section = 'twitter', value = access_token['oauth_token']) Env.setting('password', section = 'twitter', value = access_token['oauth_token_secret']) return True
def get_current_user(self): username = Env.setting('username') password = Env.setting('password') if username and password: return self.get_secure_cookie('user') else: # Login when no username or password are set return True
def __init__(self): addEvent("movie.by_hash", self.byHash) addEvent("movie.search", self.search, priority=2) addEvent("movie.info", self.getInfo, priority=2) addEvent("movie.info_by_tmdb", self.getInfoByTMDBId) # Use base wrapper log.debug('Init TheMovieDb with language: "%s"', Env.setting("language")) tmdb.configure(self.conf("api_key"), Env.setting("language"))
def get(self): api = None username = Env.setting('username') password = Env.setting('password') if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password): api = Env.setting('api_key') self.write({ 'success': api is not None, 'api_key': api })
def check_whitelisted(ip): if(Env.setting('whitelist').strip() == ''): return False wl = Env.setting('whitelist') mylist = [] for match in re.split(r"\s*,\s*|\s+", wl): if(match.strip() == ''): continue wip = match.strip() if(ip == wip): return True if not "*" in wip: continue str = re.compile('^'+re.escape(wip).replace('\\*','(2[0-5]|1[0-9]|[0-9])?[0-9]')+'$') if str.search(ip): return True return False
def getApiKey(): api = None params = getParams() username = Env.setting('username') password = Env.setting('password') if (params.get('u') == md5(username) or not username) and (params.get('p') == password or not password): api = Env.setting('api_key') return jsonified({ 'success': api is not None, 'api_key': api })
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = list(set(preferred_words + splitString(movie['category']['preferred'].lower()))) except: pass score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = list(set(ignored_words + splitString(movie['category']['ignored'].lower()))) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def post(self, *args, **kwargs): api = None username = Env.setting('username') password = Env.setting('password') if (self.get_argument('username') == username or not username) and (md5(self.get_argument('password')) == password or not password): api = Env.setting('api_key') if api: remember_me = tryInt(self.get_argument('remember_me', default = 0)) self.set_secure_cookie('user', api, expires_days = 30 if remember_me > 0 else None) self.redirect(Env.get('web_base'))
def __init__(self): addEvent('info.search', self.search, priority = 3) addEvent('movie.search', self.search, priority = 3) addEvent('movie.info', self.getInfo, priority = 3) addEvent('movie.info_by_tmdb', self.getInfo) addEvent('app.load', self.config) self.language = Env.setting('dl_language')
def nameScore(name, year): ''' Calculate score for words in the NZB name ''' score = 0 name = name.lower() #give points for the cool stuff for value in name_scores: v = value.split(':') add = int(v.pop()) if v.pop() in name: score = score + add #points if the year is correct if str(year) in name: score = score + 5 # Contains preferred word nzb_words = re.split('\W+', simplifyString(name)) preferred_words = Env.setting('preferred_words', section = 'searcher').split(',') for word in preferred_words: if word.strip() and word.strip().lower() in nzb_words: score = score + 100 return score
def __init__(self): # Get options via arg from couchpotato.runner import getOptions self.options = getOptions(base_path, sys.argv[1:]) # Load settings settings = Env.get('settings') settings.setFile(self.options.config_file) # Create data dir if needed self.data_dir = os.path.expanduser(Env.setting('data_dir')) if self.data_dir == '': self.data_dir = getDataDir() if not os.path.isdir(self.data_dir): os.makedirs(self.data_dir) # Create logging dir self.log_dir = os.path.join(self.data_dir, 'logs'); if not os.path.isdir(self.log_dir): os.mkdir(self.log_dir) # Logging from couchpotato.core.logger import CPLog self.log = CPLog(__name__) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S') hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10) hdlr.setLevel(logging.CRITICAL) hdlr.setFormatter(formatter) self.log.logger.addHandler(hdlr)
def safeMessage(self, msg, replace_tuple = ()): from couchpotato.environment import Env from couchpotato.core.helpers.encoding import ss msg = ss(msg) try: msg = msg % replace_tuple except: try: if isinstance(replace_tuple, tuple): msg = msg % tuple([ss(x) for x in list(replace_tuple)]) else: msg = msg % ss(replace_tuple) except: self.logger.error(u'Failed encoding stuff to log: %s' % traceback.format_exc()) if not Env.get('dev'): for replace in self.replace_private: msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg) msg = re.sub('(&%s=)[^\&]+' % replace, '&%s=xxx' % replace, msg) # Replace api key try: api_key = Env.setting('api_key') if api_key: msg = msg.replace(api_key, 'API_KEY') except: pass return msg
def _minify(self, file_type, files, position, out): cache = Env.get('cache_dir') out_name = 'minified_' + out out = os.path.join(cache, out_name) raw = [] for file_path in files: f = open(file_path, 'r').read() if file_type == 'script': data = jsmin(f) else: data = cssmin(f) data = data.replace('../images/', '../static/images/') raw.append({'file': file_path, 'date': int(os.path.getmtime(file_path)), 'data': data}) # Combine all files together with some comments data = '' for r in raw: data += self.comment.get(file_type) % (r.get('file'), r.get('date')) data += r.get('data') + '\n\n' self.createFile(out, data.strip()) if not self.minified.get(file_type): self.minified[file_type] = {} if not self.minified[file_type].get(position): self.minified[file_type][position] = [] minified_url = 'api/%s/file.cache/%s?%s' % (Env.setting('api_key'), out_name, tryInt(os.path.getmtime(out))) self.minified[file_type][position].append(minified_url)
def check(self): if self.update_version or self.isDisabled(): return log.info('Checking for new version on github for %s' % self.repo_name) if not Env.setting('development'): self.repo.fetch() current_branch = self.repo.getCurrentBranch().name for branch in self.repo.getRemoteByName('origin').getBranches(): if current_branch == branch.name: local = self.repo.getHead() remote = branch.getHead() log.info('Versions, local:%s, remote:%s' % (local.hash[:8], remote.hash[:8])) if local.getDate() < remote.getDate(): if self.conf('automatic') and not self.update_failed: if self.doUpdate(): fireEventAsync('app.crappy_restart') else: self.update_version = { 'hash': remote.hash[:8], 'date': remote.getDate(), } if self.conf('notification'): fireEvent('updater.available', message = 'A new update is available', data = self.getVersion()) self.last_check = time.time()
def run(self): # Get options via arg from couchpotato.runner import getOptions portable_path = os.path.join( base_path, '../..' ) #args = ['--quiet','--data_dir=' + portable_path + '/CouchPotatoData','--config_file=' + portable_path + '/CouchPotatoData/settings.conf'] args = ['--debug','--data_dir=' + portable_path + '/CouchPotatoData','--config_file=' + portable_path + '/CouchPotatoData/settings.conf'] self.options = getOptions(portable_path, args) # Load settings settings = Env.get('settings') settings.setFile(self.options.config_file) # Create data dir if needed self.data_dir = os.path.expanduser(Env.setting('data_dir')) if self.data_dir == '': from couchpotato.core.helpers.variable import getDataDir self.data_dir = getDataDir() self.data_dir = portable_path + '/CouchPotatoData' if not os.path.isdir(self.data_dir): os.makedirs(self.data_dir) # Create logging dir self.log_dir = os.path.join(self.data_dir, 'logs'); if not os.path.isdir(self.log_dir): os.mkdir(self.log_dir) try: from couchpotato.runner import runCouchPotato runCouchPotato(self.options, base_path, args, data_dir = self.data_dir, log_dir = self.log_dir, Env = Env, desktop = self._desktop) except: pass self._desktop.frame.Close()
def search(self, filename, minSize, newsgroup=None): q = filename arguments = tryUrlencode({ 'q': q, 'age': Env.setting('retention', 'nzb'), 'sort': 'agedesc', 'minsize': minSize, 'rating': 1, 'max': 250, 'more': 1, 'complete': 1, }) nzbs = self.getRSSData(self.urls['search'] % arguments) nzbid = None for nzb in nzbs: enclosure = self.getElement(nzb, 'enclosure').attrib nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4]) nzbid = nzbindex_id age = self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))) sizeInMegs = tryInt(enclosure['length']) / 1024 / 1024 downloadUrl = enclosure['url'] detailURL = enclosure['url'].replace('/download/', '/release/') if nzbid: return NZBGetURLSearchResult(self, downloadUrl, sizeInMegs, detailURL, age, nzbid)
def nameScore(name, year, preferred_words): """ Calculate score for words in the NZB name """ score = 0 name = name.lower() # points for the correct language lang_scores = lang_neg_scores lang_scores.update(lang_cur_scores[Env.setting('dl_language')]) for lang, add in lang_scores: if lang in name: score = score + add # give points for the cool stuff for value in name_scores: v = value.split(':') add = int(v.pop()) if v.pop() in name: score += add # points if the year is correct if str(year) in name: score += 5 # points if the year +1 is correct (sometimes english and german years differs +1) elif str(int(year) + 1) in name: score += 3 # Contains preferred word nzb_words = re.split('\W+', simplifyString(name)) score += 100 * len(list(set(nzb_words) & set(preferred_words))) return score
def registerStatic(self, plugin_file, add_to_head = True): # Register plugin path self.plugin_path = os.path.dirname(plugin_file) static_folder = toUnicode(os.path.join(self.plugin_path, 'static')) if not os.path.isdir(static_folder): return # Get plugin_name from PluginName s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) class_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() # View path path = 'api/%s/static/%s/' % (Env.setting('api_key'), class_name) # Add handler to Tornado Env.get('app').add_handlers(".*$", [(Env.get('web_base') + path + '(.*)', StaticFileHandler, {'path': static_folder})]) # Register for HTML <HEAD> if add_to_head: for f in glob.glob(os.path.join(self.plugin_path, 'static', '*')): ext = getExt(f) if ext in ['js', 'css']: fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f)
def getRequestHeaders(self): return { 'X-CP-Version': fireEvent('app.version', single = True), 'X-CP-API': self.api_version, 'X-CP-Time': time.time(), 'X-CP-Identifier': '+%s' % Env.setting('api_key', 'core')[:10], # Use first 10 as identifier, so we don't need to use IP address in api stats }
def providerScore(provider): try: score = tryInt(Env.setting('extra_score', section = provider.lower(), default = 0)) except: score = 0 return score
def get(self, *args, **kwargs): api = None try: username = Env.setting('username') password = Env.setting('password') if (self.get_argument('u') == md5(username) or not username) and (self.get_argument('p') == password or not password): api = Env.setting('api_key') self.write({ 'success': api is not None, 'api_key': api }) except: log.error('Failed doing key request: %s', (traceback.format_exc())) self.write({'success': False, 'error': 'Failed returning results'})
def require_basic_auth(handler, kwargs): if Env.setting('username') and Env.setting('password'): auth_header = handler.request.headers.get('Authorization') auth_decoded = base64.decodestring(auth_header[6:]) if auth_header else None if auth_decoded: username, password = auth_decoded.split(':', 2) if auth_header is None or not auth_header.startswith('Basic ') or (not check_auth(username.decode('latin'), md5(password.decode('latin')))): handler.set_status(401) handler.set_header('WWW-Authenticate', 'Basic realm="CouchPotato Login"') handler._transforms = [] handler.finish() return False return True
def __init__(self): fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) addEvent('manage.update', self.updateLibrary) addApiView('manage.update', self.updateLibraryView) if not Env.setting('development'): addEvent('app.load', self.updateLibrary)
def getCatId(self, identifier): if dict == type(self.cat_ids): cat_ids = self.cat_ids[Env.setting('dl_language', section='core')] else: cat_ids = self.cat_ids for cats in cat_ids: ids, qualities = cats if identifier in qualities: return ids if self.cat_backup_id: if dict == type(self.cat_backup_id): return [self.cat_backup_id[Env.setting('dl_language', section='core')]] else: return [self.cat_backup_id] return []
def download(self, data, movie, manual = False): snatched_status = fireEvent('status.get', 'snatched', single = True) # Download movie to temp filedata = None if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) if filedata == 'try_next': return filedata successful = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True) if successful: try: # Mark release as snatched db = get_session() rls = db.query(Release).filter_by(identifier = md5(data['url'])).first() if rls: rls.status_id = snatched_status.get('id') db.commit() log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label) snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie) log.info(snatch_message) fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict()) # If renamer isn't used, mark movie done if not Env.setting('enabled', 'renamer'): active_status = fireEvent('status.get', 'active', single = True) done_status = fireEvent('status.get', 'done', single = True) try: if movie['status_id'] == active_status.get('id'): for profile_type in movie['profile']['types']: if rls and profile_type['quality_id'] == rls.quality.id and profile_type['finish']: log.info('Renamer disabled, marking movie as finished: %s', log_movie) # Mark release done rls.status_id = done_status.get('id') db.commit() # Mark movie done mvie = db.query(Movie).filter_by(id = movie['id']).first() mvie.status_id = done_status.get('id') db.commit() except: log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc()) except: log.error('Failed marking movie finished: %s', traceback.format_exc()) return True log.info('Tried to download, but none of the downloaders are enabled') return False
def conf(self, attr, value=None, default=None): return Env.setting(attr, self.getName().lower(), value=value, default=default)
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString( Env.setting('preferred_words', section='searcher').lower()) try: preferred_words = removeDuplicate( preferred_words + splitString(movie['category']['preferred'].lower())) except: pass score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Merge global and category ignored_words = splitString( Env.setting('ignored_words', section='searcher').lower()) try: ignored_words = removeDuplicate( ignored_words + splitString(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def create_api_url(self): return '%sapi/%s' % (self.create_base_url(), Env.setting('api_key'))
def conf(self, attr): return Env.setting(attr, 'nmj')
def getMinimal(self, min_type): return Env.setting(min_type, 'automation')
def search(self, movie, quality): results = [] if self.isDisabled(): return results #Remove of quotes & remove of year filter q = '%s %s' % (simplifyString(getTitle( movie['library'])), quality.get('identifier')) #Also remove for filtering words #for ignored in Env.setting('ignored_words', 'searcher').split(','): # if len(q) + len(ignored.strip()) > 126: # break # q = '%s -%s' % (q, ignored.strip()) params = { 'q': q, 'match': 'normal', 'minSize': quality.get('size_min'), 'maxSize': quality.get('size_max'), 'complete': 2, 'maxAge': Env.setting('retention', 'nzb'), 'nopasswd': 'on', } cache_key = 'mysterbin.%s.%s.%s' % (movie['library']['identifier'], quality.get('identifier'), q) data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params)) if data: try: html = BeautifulSoup(data) resultable = html.find('table', attrs={'class': 't'}) for result in resultable.find_all('tr'): try: myster_id = result.find('input', attrs={'class': 'check4nzb'})['value'] # Age age = '' for temp in result.find('td', attrs={ 'class': 'cdetail' }).find_all(text=True): if 'days' in temp: age = tryInt(temp.split(' ')[0]) break # size size = None for temp in result.find('div', attrs={ 'class': 'cdetail' }).find_all(text=True): if 'gb' in temp.lower() or 'mb' in temp.lower( ) or 'kb' in temp.lower(): size = self.parseSize(temp) break description = '' if result.find('a', text='View NFO'): description = toUnicode( self.getCache('mysterbin.%s' % myster_id, self.urls['nfo'] % myster_id, cache_timeout=25920000)) new = { 'id': myster_id, 'name': ''.join( result.find('span', attrs={ 'class': 'cname' }).find_all(text=True)), 'type': 'nzb', 'provider': self.getName(), 'age': age, 'size': size, 'url': self.urls['download'] % myster_id, 'description': description, 'download': self.download, 'check_nzb': False, } new['score'] = fireEvent('score.calculate', new, movie, single=True) is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=False, single=True) if is_correct_movie: results.append(new) self.found(new) except: pass return results except AttributeError: log.debug('No search results found.') return results
def _search(self, movie, quality, results): arguments = tryUrlencode({ 'q': movie['library']['identifier'], 'm': 'n', 'max': 400, 'adv_age': Env.setting('retention', 'nzb'), 'adv_sort': 'date', 'adv_col': 'on', 'adv_nfo': 'on', 'minsize': quality.get('size_min'), 'maxsize': quality.get('size_max'), }) data = self.getHTMLData(self.urls['search'] % arguments) if data: try: html = BeautifulSoup(data) main_table = html.find('table', attrs={'id': 'r2'}) if not main_table: return items = main_table.find_all('tr') for row in items: title = row.find('span', attrs={'class': 's'}) if not title: continue nzb_id = row.find('input', attrs={'type': 'checkbox'})['name'] info = row.find('span', attrs={'class': 'd'}) size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text) def extra_check(item): parts = re.search( 'available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text) total = tryInt(parts.group('total')) parts = tryInt(parts.group('parts')) if (total / parts) < 0.95 or ( (total / parts) >= 0.95 and not 'par2' in info.text.lower()): log.info2( 'Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total)) return False if 'requires password' in info.text.lower(): log.info2('Wrong: \'%s\', passworded', (item['name'])) return False return True results.append({ 'id': nzb_id, 'name': title.text, 'age': tryInt( re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group( 'size')[:-1]), 'size': self.parseSize(size_match.group('size')), 'url': self.urls['download'] % nzb_id, 'detail_url': self.urls['detail'] % info.find('a')['href'], 'extra_check': extra_check }) except: log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc())
def createApiUrl(self): return '%s/api/%s' % (self.createBaseUrl(), Env.setting('api_key'))
def getLanguages(self): languages = splitString(Env.setting('languages', section = 'core')) if len(languages): return languages return [ 'en' ]
def download(self, data, media, manual=False): # Backwards compatibility code if not data.get('protocol'): data['protocol'] = data['type'] data['type'] = 'movie' # Test to see if any downloaders are enabled for this type downloader_enabled = fireEvent('download.enabled', manual, data, single=True) if not downloader_enabled: log.info( 'Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol')) return False # Download NZB or torrent file filedata = None if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): try: filedata = data.get('download')(url=data.get('url'), nzb_id=data.get('id')) except: log.error( 'Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc())) return False if filedata == 'try_next': return filedata elif not filedata: return False # Send NZB or torrent file to downloader download_result = fireEvent('download', data=data, media=media, manual=manual, filedata=filedata, single=True) if not download_result: log.info( 'Tried to download, but the "%s" downloader gave an error', data.get('protocol')) return False log.debug('Downloader result: %s', download_result) snatched_status, done_status, downloaded_status, active_status = fireEvent( 'status.get', ['snatched', 'done', 'downloaded', 'active'], single=True) try: db = get_session() rls = db.query(Relea).filter_by( identifier=md5(data['url'])).first() if not rls: log.error('No release found to store download information in') return False renamer_enabled = Env.setting('enabled', 'renamer') # Save download-id info if returned if isinstance(download_result, dict): for key in download_result: rls_info = ReleaseInfo(identifier='download_%s' % key, value=toUnicode( download_result.get(key))) rls.info.append(rls_info) db.commit() log_movie = '%s (%s) in %s' % (getTitle( media['library']), media['library']['year'], rls.quality.label) snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie) log.info(snatch_message) fireEvent('%s.snatched' % data['type'], message=snatch_message, data=rls.to_dict()) # Mark release as snatched if renamer_enabled: self.updateStatus(rls.id, status=snatched_status) # If renamer isn't used, mark media done if finished or release downloaded else: if media['status_id'] == active_status.get('id'): finished = next( (True for profile_type in media['profile']['types'] if profile_type['quality_id'] == rls.quality.id and profile_type['finish']), False) if finished: log.info( 'Renamer disabled, marking media as finished: %s', log_movie) # Mark release done self.updateStatus(rls.id, status=done_status) # Mark media done mdia = db.query(Media).filter_by( id=media['id']).first() mdia.status_id = done_status.get('id') mdia.last_edit = int(time.time()) db.commit() return True # Assume release downloaded self.updateStatus(rls.id, status=downloaded_status) except: log.error('Failed storing download status: %s', traceback.format_exc()) db.rollback() return False finally: db.close() return True
def conf(self, attr): return Env.setting(attr, 'growl')
def correctRelease(self, nzb=None, media=None, quality=None, **kwargs): if media.get('type') != 'movie': return media_title = fireEvent('searcher.get_search_title', media, single=True) imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section='nzb') if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): log.info2( 'Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False # Check for required and ignored words if not fireEvent( 'searcher.correct_words', nzb['name'], media, single=True): return False preferred_quality = fireEvent('quality.single', identifier=quality['identifier'], single=True) # Contains lower quality string if fireEvent('searcher.contains_other_quality', nzb, movie_year=media['library']['year'], preferred_quality=preferred_quality, single=True): log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label'])) return False # File to small if nzb['size'] and preferred_quality['size_min'] > nzb['size']: log.info2( 'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and preferred_quality.get('size_max') < nzb['size']: log.info2( 'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if getImdb(nzb.get('description', '')) == media['library']['identifier']: return True for raw_title in media['library']['titles']: for movie_title in possibleTitles(raw_title['title']): movie_words = re.split('\W+', simplifyString(movie_title)) if fireEvent('searcher.correct_name', nzb['name'], movie_title, single=True): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and fireEvent( 'searcher.correct_year', nzb['name'], media['library']['year'], 1, single=True): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and fireEvent( 'searcher.correct_year', nzb['name'], media['library']['year'], 0, single=True): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['library']['year'])) return False
def correctMovie(self, nzb=None, movie=None, quality=None, **kwargs): imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section='nzb') if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): log.info2( 'Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False movie_name = getTitle(movie['library']) movie_words = re.split('\W+', simplifyString(movie_name)) nzb_name = simplifyString(nzb['name']) nzb_words = re.split('\W+', nzb_name) # Make sure it has required words required_words = splitString(self.conf('required_words').lower()) req_match = 0 for req_set in required_words: req = splitString(req_set, '&') req_match += len(list(set(nzb_words) & set(req))) == len(req) if self.conf('required_words') and req_match == 0: log.info2('Wrong: Required word missing: %s', nzb['name']) return False # Ignore releases ignored_words = splitString(self.conf('ignored_words').lower()) ignored_match = 0 for ignored_set in ignored_words: ignored = splitString(ignored_set, '&') ignored_match += len(list(set(nzb_words) & set(ignored))) == len(ignored) if self.conf('ignored_words') and ignored_match: log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name'])) return False # Ignore p**n stuff pron_tags = [ 'xxx', 'sex', 'anal', 't**s', 'f**k', 'p**n', 'orgy', 'milf', 'boobs', 'erotica', 'erotic' ] pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words)) if pron_words: log.info('Wrong: %s, probably pr0n', (nzb['name'])) return False preferred_quality = fireEvent('quality.single', identifier=quality['identifier'], single=True) # Contains lower quality string if self.containsOtherQuality(nzb, movie_year=movie['library']['year'], preferred_quality=preferred_quality): log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label'])) return False # File to small if nzb['size'] and preferred_quality['size_min'] > nzb['size']: log.info2( 'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and preferred_quality.get('size_max') < nzb['size']: log.info2( 'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']): return True for raw_title in movie['library']['titles']: for movie_title in possibleTitles(raw_title['title']): movie_words = re.split('\W+', simplifyString(movie_title)) if self.correctName(nzb['name'], movie_title): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and self.correctYear( [nzb['name']], movie['library']['year'], 1): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and self.correctYear( [nzb['name']], movie['library']['year'], 0): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], movie_name, movie['library']['year'])) return False
class Transmission(Downloader): type = ['torrent', 'torrent_magnet'] log = CPLog(__name__) def download(self, data, movie, filedata = None): log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type'))) # Load host from config and split out port. host = self.conf('host').split(':') if not isInt(host[1]): log.error('Config properties are not filled in correctly, port is missing.') return False # Set parameters for Transmission params = { 'paused': self.conf('paused', default = 0), } if len(self.conf('directory', default = '')) > 0: folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1] params['download-dir'] = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep) torrent_params = {} if self.conf('ratio'): torrent_params = { 'seedRatioLimit': self.conf('ratio'), 'seedRatioMode': self.conf('ratiomode') } if not filedata and data.get('type') == 'torrent': log.error('Failed sending torrent, no data') return False # Send request to Transmission try: trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) if data.get('type') == 'torrent_magnet': remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params) torrent_params['trackerAdd'] = self.torrent_trackers else: remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params) if not remote_torrent: return False # Change settings of added torrents elif torrent_params: trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params) log.info('Torrent sent to Transmission successfully.') return self.downloadReturnId(remote_torrent['torrent-added']['hashString']) except: log.error('Failed to change settings for transfer: %s', traceback.format_exc()) return False def getAllDownloadStatus(self): log.debug('Checking Transmission download status.') # Load host from config and split out port. host = self.conf('host').split(':') if not isInt(host[1]): log.error('Config properties are not filled in correctly, port is missing.') return False # Go through Queue try: trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) return_params = { 'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isFinished', 'downloadDir', 'uploadRatio'] } queue = trpc.get_alltorrents(return_params) except Exception, err: log.error('Failed getting queue: %s', err) return False if not queue: return [] statuses = StatusList(self) # Get torrents status # CouchPotato Status #status = 'busy' #status = 'failed' #status = 'completed' # Transmission Status #status = 0 => "Torrent is stopped" #status = 1 => "Queued to check files" #status = 2 => "Checking files" #status = 3 => "Queued to download" #status = 4 => "Downloading" #status = 4 => "Queued to seed" #status = 6 => "Seeding" #To do : # add checking file # manage no peer in a range time => fail for item in queue['torrents']: log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / confRatio=%s / isFinished=%s', (item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], self.conf('ratio'), item['isFinished'])) if not os.path.isdir(Env.setting('from', 'renamer')): log.error('Renamer "from" folder doesn\'t to exist.') return if (item['percentDone'] * 100) >= 100 and (item['status'] == 6 or item['status'] == 0) and item['uploadRatio'] > self.conf('ratio'): try: trpc.stop_torrent(item['hashString'], {}) statuses.append({ 'id': item['hashString'], 'name': item['name'], 'status': 'completed', 'original_status': item['status'], 'timeleft': str(timedelta(seconds = 0)), 'folder': os.path.join(item['downloadDir'], item['name']), }) except Exception, err: log.error('Failed to stop and remove torrent "%s" with error: %s', (item['name'], err)) statuses.append({ 'id': item['hashString'], 'name': item['name'], 'status': 'failed', 'original_status': item['status'], 'timeleft': str(timedelta(seconds = 0)), }) else: statuses.append({ 'id': item['hashString'], 'name': item['name'], 'status': 'busy', 'original_status': item['status'], 'timeleft': str(timedelta(seconds = item['eta'])), # Is ETA in seconds?? })
def download(self, data, movie, manual=False): # Test to see if any downloaders are enabled for this type downloader_enabled = fireEvent('download.enabled', manual, data, single=True) if downloader_enabled: snatched_status = fireEvent('status.get', 'snatched', single=True) # Download movie to temp filedata = None if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): filedata = data.get('download')(url=data.get('url'), nzb_id=data.get('id')) if filedata == 'try_next': return filedata download_result = fireEvent('download', data=data, movie=movie, manual=manual, filedata=filedata, single=True) log.debug('Downloader result: %s', download_result) if download_result: try: # Mark release as snatched db = get_session() rls = db.query(Release).filter_by( identifier=md5(data['url'])).first() if rls: renamer_enabled = Env.setting('enabled', 'renamer') done_status = fireEvent('status.get', 'done', single=True) rls.status_id = done_status.get( 'id' ) if not renamer_enabled else snatched_status.get('id') # Save download-id info if returned if isinstance(download_result, dict): for key in download_result: rls_info = ReleaseInfo( identifier='download_%s' % key, value=toUnicode(download_result.get(key))) rls.info.append(rls_info) db.commit() log_movie = '%s (%s) in %s' % (getTitle( movie['library']), movie['library']['year'], rls.quality.label) snatch_message = 'Snatched "%s": %s' % ( data.get('name'), log_movie) log.info(snatch_message) fireEvent('movie.snatched', message=snatch_message, data=rls.to_dict()) # If renamer isn't used, mark movie done if not renamer_enabled: active_status = fireEvent('status.get', 'active', single=True) done_status = fireEvent('status.get', 'done', single=True) try: if movie['status_id'] == active_status.get( 'id'): for profile_type in movie['profile'][ 'types']: if profile_type[ 'quality_id'] == rls.quality.id and profile_type[ 'finish']: log.info( 'Renamer disabled, marking movie as finished: %s', log_movie) # Mark release done rls.status_id = done_status.get( 'id') rls.last_edit = int(time.time()) db.commit() # Mark movie done mvie = db.query(Movie).filter_by( id=movie['id']).first() mvie.status_id = done_status.get( 'id') mvie.last_edit = int(time.time()) db.commit() except: log.error( 'Failed marking movie finished, renamer disabled: %s', traceback.format_exc()) except: log.error('Failed marking movie finished: %s', traceback.format_exc()) return True log.info( 'Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('type', ''))) return False
def search(self, movie, quality): results = [] if self.isDisabled(): return results cat_ids = ','.join( ['%s' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'term': movie['library']['identifier'], 'subcat': cat_ids, 'username': self.conf('username'), 'apikey': self.conf('api_key'), 'searchin': 'weblink', 'maxage': Env.setting('retention', section='nzb'), 'english': self.conf('english_only'), }) url = "%s?%s" % (self.urls['search'], arguments) cache_key = 'nzbmatrix.%s.%s' % (movie['library'].get('identifier'), cat_ids) data = self.getCache(cache_key, url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: title = self.getTextElement(nzb, "title") if 'error' in title.lower(): continue id = int( self.getTextElement( nzb, "link").split('&')[0].partition('id=')[2]) size = self.getTextElement( nzb, "description").split('<br /><b>')[2].split('> ')[1] date = str( self.getTextElement(nzb, "description").split( '<br /><b>')[3].partition('Added:</b> ')[2]) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': title, 'age': self.calculateAge( int(time.mktime(parse(date).timetuple()))), 'size': self.parseSize(size), 'url': self.urls['download'] % id + self.getApiExt(), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "description"), 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=True, single=True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single=True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBMatrix.com')
def getAllDownloadStatus(self): log.debug('Checking Deluge download status.') if not os.path.isdir(Env.setting('from', 'renamer')): log.error('Renamer "from" folder doesn\'t to exist.') return if not self.connect(): return False statuses = StatusList(self) queue = self.drpc.get_alltorrents() if not queue: log.debug('Nothing in queue or error') return False for torrent_id in queue: item = queue[torrent_id] log.debug( 'name=%s / id=%s / save_path=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (item['name'], item['hash'], item['save_path'], item['move_completed_path'], item['hash'], item['progress'], item['state'], item['eta'], item['ratio'], item['stop_ratio'], item['is_seed'], item['is_finished'], item['paused'])) # Deluge has no easy way to work out if a torrent is stalled or failing. #status = 'failed' status = 'busy' if item['is_seed'] and tryFloat(item['ratio']) < tryFloat( item['stop_ratio']): # We have item['seeding_time'] to work out what the seeding time is, but we do not # have access to the downloader seed_time, as with deluge we have no way to pass it # when the torrent is added. So Deluge will only look at the ratio. # See above comment in download(). status = 'seeding' elif item['is_seed'] and item['is_finished'] and item[ 'paused'] and item['state'] == 'Paused': status = 'completed' download_dir = item['save_path'] if item['move_on_completed']: download_dir = item['move_completed_path'] statuses.append({ 'id': item['hash'], 'name': item['name'], 'status': status, 'original_status': item['state'], 'seed_ratio': item['ratio'], 'timeleft': str(timedelta(seconds=item['eta'])), 'folder': ss(os.path.join(download_dir, item['name'])), }) return statuses
def cmd_couchpotato(base_path, args): '''Commandline entry point.''' # Options parser = ArgumentParser() parser.add_argument('-s', '--datadir', default = os.path.join(base_path, '_data'), dest = 'data_dir', help = 'Absolute or ~/ path, where settings/logs/database data is saved (default ./_data)') parser.add_argument('-t', '--test', '--debug', action = 'store_true', dest = 'debug', help = 'Debug mode') parser.add_argument('-q', '--quiet', action = 'store_true', dest = 'quiet', help = "Don't log to console") parser.add_argument('-d', '--daemon', action = 'store_true', dest = 'daemonize', help = 'Daemonize the app') options = parser.parse_args(args) # Create data dir if needed if not os.path.isdir(options.data_dir): options.data_dir = os.path.expanduser(options.data_dir) os.makedirs(options.data_dir) # Create logging dir log_dir = os.path.join(options.data_dir, 'logs'); if not os.path.isdir(log_dir): os.mkdir(log_dir) # Daemonize app if options.daemonize: createDaemon() # Register environment settings from couchpotato.environment import Env Env.get('settings').setFile(os.path.join(options.data_dir, 'settings.conf')) Env.set('app_dir', base_path) Env.set('data_dir', options.data_dir) Env.set('db_path', 'sqlite:///' + os.path.join(options.data_dir, 'couchpotato.db')) Env.set('cache_dir', os.path.join(options.data_dir, 'cache')) Env.set('cache', FileSystemCache(os.path.join(Env.get('cache_dir'), 'python'))) Env.set('quiet', options.quiet) Env.set('daemonize', options.daemonize) Env.set('args', args) # Determine debug debug = options.debug or Env.setting('debug', default = False) Env.set('debug', debug) # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) # To screen if debug and not options.quiet and not options.daemonize: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(os.path.join(log_dir, 'CouchPotato.log'), 'a', 5000000, 4) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Disable server access log server_log = logging.getLogger('werkzeug') server_log.disabled = True # Start logging from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s' % options) # Load configs & plugins loader = Env.get('loader') loader.preload(root = base_path) loader.addModule('core', 'couchpotato.core', 'core') loader.run() # Load migrations from migrate.versioning.api import version_control, db_version, version, upgrade db = Env.get('db_path') repo = os.path.join('couchpotato', 'core', 'migration') logging.getLogger('migrate').setLevel(logging.WARNING) # Disable logging for migration latest_db_version = version(repo) try: current_db_version = db_version(db, repo) except: version_control(db, repo, version = latest_db_version) current_db_version = db_version(db, repo) if current_db_version < latest_db_version and not debug: log.info('Doing database upgrade. From %d to %d' % (current_db_version, latest_db_version)) upgrade(db, repo) # Configure Database from couchpotato.core.settings.model import setup setup() fireEvent('app.load') # Create app from couchpotato import app api_key = Env.setting('api_key') url_base = '/' + Env.setting('url_base') if Env.setting('url_base') else '' reloader = debug and not options.daemonize # Basic config app.host = Env.setting('host', default = '0.0.0.0') app.port = Env.setting('port', default = 5000) app.debug = debug app.secret_key = api_key app.static_path = url_base + '/static' # Register modules app.register_module(web, url_prefix = '%s/' % url_base) app.register_module(api, url_prefix = '%s/%s/' % (url_base, api_key)) # Go go go! app.run(use_reloader = reloader)
def cpTag(self, media): if Env.setting('enabled', 'renamer'): identifier = getIdentifier(media) return '.cp(' + identifier + ')' if identifier else '' return ''
def urlopen(self, url, timeout=30, data=None, headers=None, files=None, show_error=True, stream=False): url = quote(ss(url), safe="%/:=&?~#+!$,;'@()*[]") if not headers: headers = {} if not data: data = {} # Fill in some headers parsed_url = urlparse(url) host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else '')) headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host)) headers['Host'] = headers.get('Host', None) headers['User-Agent'] = headers.get('User-Agent', self.user_agent) headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip') headers['Connection'] = headers.get('Connection', 'keep-alive') headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0') use_proxy = Env.setting('use_proxy') proxy_url = None if use_proxy: proxy_server = Env.setting('proxy_server') proxy_username = Env.setting('proxy_username') proxy_password = Env.setting('proxy_password') if proxy_server: loc = "{0}:{1}@{2}".format( proxy_username, proxy_password, proxy_server) if proxy_username else proxy_server proxy_url = { "http": "http://" + loc, "https": "https://" + loc, } else: proxy_url = getproxies() r = Env.get('http_opener') # Don't try for failed requests if self.http_failed_disabled.get(host, 0) > 0: if self.http_failed_disabled[host] > (time.time() - 900): log.info2( 'Disabled calls to %s for 15 minutes because so many failed requests.', host) if not show_error: raise Exception( 'Disabled calls to %s for 15 minutes because so many failed requests' % host) else: return '' else: del self.http_failed_request[host] del self.http_failed_disabled[host] self.wait(host, url) status_code = None try: kwargs = { 'headers': headers, 'data': data if len(data) > 0 else None, 'timeout': timeout, 'files': files, 'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates.. 'stream': stream, 'proxies': proxy_url, } method = 'post' if len(data) > 0 or files else 'get' log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance( data, dict) else 'with data')) response = r.request(method, url, **kwargs) status_code = response.status_code if response.status_code == requests.codes.ok: data = response if stream else response.content else: response.raise_for_status() self.http_failed_request[host] = 0 except (IOError, MaxRetryError, Timeout): if show_error: log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0))) # Save failed requests by hosts try: # To many requests if status_code in [429]: self.http_failed_request[host] = 1 self.http_failed_disabled[host] = time.time() if not self.http_failed_request.get(host): self.http_failed_request[host] = 1 else: self.http_failed_request[host] += 1 # Disable temporarily if self.http_failed_request[host] > 5 and not isLocalIP( host): self.http_failed_disabled[host] = time.time() except: log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc())) raise self.http_last_use[host] = time.time() return data
def check_auth(username, password): return username == Env.setting('username') and password == Env.setting( 'password')
def conf(self, attr, value=None, default=None, section=None): class_name = self.getName().lower().split(':')[0].lower() return Env.setting(attr, section=section if section else class_name, value=value, default=default)
def conf(self, attr): return Env.setting(attr, 'notifo')
def cpTag(self, movie): if Env.setting('enabled', 'renamer'): return '.cp(' + movie['library'].get('identifier') + ')' if movie[ 'library'].get('identifier') else '' return ''
def _search(self, title, movie, quality): results = [] q = '"%s" %s' % (title, movie['library']['year']) params = { 'ctitle': q, 'customQuery': 'usr', 'cage': Env.setting('retention', 'nzb'), 'csizemin': quality.get('size_min'), 'csizemax': quality.get('size_max'), 'ccategory': 14, 'ctype': ','.join([str(x) for x in self.getCatId(quality['identifier'])]), } cache_key = 'ftdworld.%s.%s' % (movie['library']['identifier'], q) data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params), opener=self.login_opener) if data: try: html = BeautifulSoup(data) main_table = html.find('table', attrs={'id': 'ftdresult'}) if not main_table: return results items = main_table.find_all( 'tr', attrs={'class': re.compile('tcontent')}) for item in items: tds = item.find_all('td') nzb_id = tryInt(item.attrs['data-spot']) up = item.find('img', attrs={'src': re.compile('up.png')}) down = item.find('img', attrs={'src': re.compile('down.png')}) new = { 'id': nzb_id, 'type': 'nzb', 'provider': self.getName(), 'name': toUnicode( item.find('a', attrs={ 'href': re.compile('./spotinfo') }).text.strip()), 'age': self.calculateAge( int(time.mktime(parse(tds[2].text).timetuple()))), 'size': 0, 'url': self.urls['download'] % nzb_id, 'download': self.loginDownload, 'detail_url': self.urls['detail'] % nzb_id, 'description': '', 'score': (tryInt(up.attrs['title'].split(' ')[0]) * 3) - (tryInt(down.attrs['title'].split(' ')[0]) * 3) if up else 0, } is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=False, single=True) if is_correct_movie: new['score'] += fireEvent('score.calculate', new, movie, single=True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBClub') return results
def runCouchPotato(options, base_path, args): # Load settings from couchpotato.environment import Env settings = Env.get('settings') settings.setFile(options.config_file) # Create data dir if needed data_dir = os.path.expanduser(Env.setting('data_dir')) if data_dir == '': data_dir = os.path.join(base_path, '_data') if not os.path.isdir(data_dir): os.makedirs(data_dir) # Create logging dir log_dir = os.path.join(data_dir, 'logs') if not os.path.isdir(log_dir): os.mkdir(log_dir) # Daemonize app if options.daemonize: createDaemon() # Register environment settings Env.set('uses_git', not options.git) Env.set('app_dir', base_path) Env.set('data_dir', data_dir) Env.set('log_path', os.path.join(log_dir, 'CouchPotato.log')) Env.set('db_path', 'sqlite:///' + os.path.join(data_dir, 'couchpotato.db')) Env.set('cache_dir', os.path.join(data_dir, 'cache')) Env.set('cache', FileSystemCache(os.path.join(Env.get('cache_dir'), 'python'))) Env.set('quiet', options.quiet) Env.set('daemonize', options.daemonize) Env.set('args', args) # Determine debug debug = options.debug or Env.setting('debug', default=False) Env.set('debug', debug) # Only run once when debugging if os.environ.get('WERKZEUG_RUN_MAIN') or not debug: # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) # To screen if debug and not options.quiet and not options.daemonize: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Disable server access log server_log = logging.getLogger('werkzeug') server_log.disabled = True # Start logging from couchpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s' % options) # Load configs & plugins loader = Env.get('loader') loader.preload(root=base_path) loader.run() # Load migrations from migrate.versioning.api import version_control, db_version, version, upgrade db = Env.get('db_path') repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') logging.getLogger('migrate').setLevel( logging.WARNING) # Disable logging for migration latest_db_version = version(repo) initialize = True try: current_db_version = db_version(db, repo) initialize = False except: version_control(db, repo, version=latest_db_version) current_db_version = db_version(db, repo) if current_db_version < latest_db_version and not debug: log.info('Doing database upgrade. From %d to %d' % (current_db_version, latest_db_version)) upgrade(db, repo) # Configure Database from couchpotato.core.settings.model import setup setup() fireEventAsync('app.load') if initialize: fireEventAsync('app.initialize') # Create app from couchpotato import app api_key = Env.setting('api_key') url_base = '/' + Env.setting('url_base').lstrip('/') if Env.setting( 'url_base') else '' reloader = debug and not options.daemonize # Basic config app.secret_key = api_key config = { 'use_reloader': reloader, 'host': Env.setting('host', default='0.0.0.0'), 'port': Env.setting('port', default=5000) } # Static path web.add_url_rule(url_base + '/static/<path:filename>', endpoint='static', view_func=app.send_static_file) # Register modules app.register_blueprint(web, url_prefix='%s/' % url_base) app.register_blueprint(api, url_prefix='%s/%s/' % (url_base, api_key)) # Go go go! app.run(**config)
def correctMovie(self, nzb={}, movie={}, quality={}, **kwargs): imdb_results = kwargs.get('imdb_results', False) single_category = kwargs.get('single_category', False) retention = Env.setting('retention', section='nzb') if retention < nzb.get('age', 0): log.info( 'Wrong: Outside retention, age is %s, needs %s or lower: %s' % (nzb['age'], retention, nzb['name'])) return False nzb_words = re.split('\W+', simplifyString(nzb['name'])) required_words = self.conf('required_words').split(',') if self.conf('required_words' ) and not list(set(nzb_words) & set(required_words)): log.info("NZB doesn't contain any of the required words.") return False ignored_words = self.conf('ignored_words').split(',') blacklisted = list(set(nzb_words) & set(ignored_words)) if self.conf('ignored_words') and blacklisted: log.info("Wrong: '%s' blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted))) return False #qualities = fireEvent('quality.all', single = True) preferred_quality = fireEvent('quality.single', identifier=quality['identifier'], single=True) # Contains lower quality string if self.containsOtherQuality(nzb['name'], preferred_quality, single_category): log.info('Wrong: %s, looking for %s' % (nzb['name'], quality['label'])) return False """ # File to small minSize = q.minimumSize(qualityType) if minSize > item.size: log.info('"%s" is too small to be %s. %sMB instead of the minimal of %sMB.' % (item.name, type['label'], item.size, minSize)) return False # File to large maxSize = q.maximumSize(qualityType) if maxSize < item.size: log.info('"%s" is too large to be %s. %sMB instead of the maximum of %sMB.' % (item.name, type['label'], item.size, maxSize)) return False """ if imdb_results: return True # Check if nzb contains imdb link if self.checkIMDB([nzb['description']], movie['library']['identifier']): return True for movie_title in movie['library']['titles']: movie_words = re.split('\W+', simplifyString(movie_title['title'])) if self.correctName(nzb['name'], movie_title['title']): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and self.correctYear( [nzb['name']], movie['library']['year'], 1): return True # if no IMDB link, at least check year if len(movie_words) == 2 and self.correctYear( [nzb['name']], movie['library']['year'], 0): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'" % (nzb['name'], movie['library']['titles'][0]['title'], movie['library']['year'])) return False
def download(self, data, media, manual=False): # Test to see if any downloaders are enabled for this type downloader_enabled = fireEvent('download.enabled', manual, data, single=True) if not downloader_enabled: log.info( 'Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol')) return False # Download NZB or torrent file filedata = None if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): try: filedata = data.get('download')(url=data.get('url'), nzb_id=data.get('id')) except: log.error( 'Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc())) return False if filedata == 'try_next': return filedata elif not filedata: return False # Send NZB or torrent file to downloader download_result = fireEvent('download', data=data, media=media, manual=manual, filedata=filedata, single=True) if not download_result: log.info( 'Tried to download, but the "%s" downloader gave an error', data.get('protocol')) return False log.debug('Downloader result: %s', download_result) try: db = get_db() try: rls = db.get('release_identifier', md5(data['url']), with_doc=True)['doc'] except: log.error('No release found to store download information in') return False renamer_enabled = Env.setting('enabled', 'renamer') # Save download-id info if returned if isinstance(download_result, dict): rls['download_info'] = download_result db.update(rls) log_movie = '%s (%s) in %s' % ( getTitle(media), media['info']['year'], rls['quality']) snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie) log.info(snatch_message) fireEvent('%s.snatched' % data['type'], message=snatch_message, data=rls) # Mark release as snatched if renamer_enabled: self.updateStatus(rls['_id'], status='snatched') # If renamer isn't used, mark media done if finished or release downloaded else: if media['status'] == 'active': profile = db.get('id', media['profile_id']) if fireEvent('quality.isfinish', { 'identifier': rls['quality'], 'is_3d': rls.get('is_3d', False) }, profile, single=True): log.info( 'Renamer disabled, marking media as finished: %s', log_movie) # Mark release done self.updateStatus(rls['_id'], status='done') # Mark media done fireEvent('media.restatus', media['_id'], single=True) return True # Assume release downloaded self.updateStatus(rls['_id'], status='downloaded') except: log.error('Failed storing download status: %s', traceback.format_exc()) return False return True
class CPLog(object): context = '' replace_private = [ 'api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key' ] def __init__(self, context=''): if context.endswith('.main'): context = context[:-5] self.context = context self.logger = logging.getLogger() def info(self, msg, replace_tuple=()): self.logger.info(self.addContext(msg, replace_tuple)) def info2(self, msg, replace_tuple=()): self.logger.log(19, self.addContext(msg, replace_tuple)) def debug(self, msg, replace_tuple=()): self.logger.debug(self.addContext(msg, replace_tuple)) def error(self, msg, replace_tuple=()): self.logger.error(self.addContext(msg, replace_tuple)) def warning(self, msg, replace_tuple=()): self.logger.warning(self.addContext(msg, replace_tuple)) def critical(self, msg, replace_tuple=()): self.logger.critical(self.addContext(msg, replace_tuple), exc_info=1) def addContext(self, msg, replace_tuple=()): return '[%+25.25s] %s' % (self.context[-25:], self.safeMessage(msg, replace_tuple)) def safeMessage(self, msg, replace_tuple=()): from couchpotato.environment import Env from couchpotato.core.helpers.encoding import ss msg = ss(msg) try: msg = msg % replace_tuple except: try: if isinstance(replace_tuple, tuple): msg = msg % tuple([ss(x) for x in list(replace_tuple)]) else: msg = msg % ss(replace_tuple) except Exception, e: self.logger.error(u'Failed encoding stuff to log "%s": %s' % (msg, e)) if not Env.get('dev'): for replace in self.replace_private: msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg) msg = re.sub('(&%s=)[^\&]+' % replace, '&%s=xxx' % replace, msg) # Replace api key try: api_key = Env.setting('api_key') if api_key: msg = msg.replace(api_key, 'API_KEY') except: pass return msg
def search(self, movie, quality): results = [] if self.isDisabled(): return results cat_id_string = '&'.join( ['c%s=1' % x for x in self.getCatId(quality.get('identifier'))]) arguments = tryUrlencode({ 'searchtext': 'imdb:' + movie['library']['identifier'][2:], 'uid': self.conf('userid'), 'key': self.conf('api_key'), 'age': Env.setting('retention', section='nzb'), }) # check for english_only if self.conf('english_only'): arguments += "&lang0=1&lang3=1&lang1=1" url = "%s&%s&%s" % (self.urls['search'], arguments, cat_id_string) cache_key = 'nzbsrus.%s.%s' % (movie['library'].get('identifier'), cat_id_string) data = self.getCache(cache_key, url, cache_timeout=1800, headers={'User-Agent': Env.getIdentifier()}) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'results/result') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: title = self.getTextElement(nzb, "name") if 'error' in title.lower(): continue id = self.getTextElement(nzb, "id") size = int( round(int(self.getTextElement(nzb, "size")) / 1048576)) age = int( round((time.time() - int(self.getTextElement(nzb, "postdate"))) / 86400)) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': title, 'age': age, 'size': size, 'url': self.urls['download'] % id + self.getApiExt() + self.getTextElement(nzb, "key"), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "addtext"), 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=True, single=True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single=True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from Nzbsrus.com')