def notify(self, message='', data=None, listener=None): if not data: data = {} try: message = message.strip() long_message = '' if listener == 'test': long_message = 'This is a test message' elif data.get('identifier'): long_message = 'More movie info <a href="http://www.imdb.com/title/%s/">on IMDB</a>' % data[ 'identifier'] data = { 'user_credentials': self.conf('token'), 'notification[title]': to_unicode('%s - %s' % (self.default_title, message)), 'notification[long_message]': to_unicode(long_message), 'notification[icon_url]': self.LOGO_URL, 'notification[source_name]': 'CouchPotato', } self.urlopen(self.url, data=data) except: log.error('Make sure the token provided is for the correct device') return False log.info('Boxcar notification successful.') return True
def notify(self, message='', data=None, listener=None): if not data: data = {} api_data = { 'user': self.conf('user_key'), 'token': self.conf('api_token'), 'message': to_unicode(message), 'priority': self.conf('priority'), 'sound': self.conf('sound'), } if data and get_identifier(data): api_data.update({ 'url': to_unicode('http://www.imdb.com/title/%s/' % get_identifier(data)), 'url_title': to_unicode('%s on IMDb' % get_title(data)), }) try: data = self.urlopen( '%s/%s' % (self.api_url, '1/messages.json'), headers={'Content-type': 'application/x-www-form-urlencoded'}, data=api_data) log.info2('Pushover responded with: %s', data) return True except: return False
def notify(self, message='', data=None, listener=None): if not data: data = {} data = { 'AuthorizationToken': self.conf('auth_token'), 'Title': self.default_title, 'Body': to_unicode(message), 'IsImportant': self.conf('important'), 'IsSilent': self.conf('silent'), 'Image': to_unicode(self.getNotificationImage('medium') + '?1'), 'Source': to_unicode(self.default_title) } headers = {'Content-type': 'application/x-www-form-urlencoded'} try: self.urlopen(self.urls['api'], headers=headers, data=data, show_error=False) return True except: log.error('PushAlot failed: %s', traceback.format_exc()) return False
def notify(self, message='', data=None, listener=None): if not data: data = {} # Get all the device IDs linked to this user devices = self.getDevices() or [None] successful = 0 for device in devices: response = self.request('pushes', device_iden=device, type='note', title=self.default_title, body=to_unicode(message)) if response: successful += 1 else: log.error( 'Unable to push notification to Pushbullet device with ID %s' % device) for channel in self.getChannels(): self.request('pushes', channel_tag=channel, type='note', title=self.default_title, body=to_unicode(message)) return successful == len(devices)
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = split_string(Env.setting('preferred_words', section='searcher').lower()) try: preferred_words = remove_duplicate(preferred_words + split_string(movie['category']['preferred'].lower())) except: pass score = nameScore(to_unicode(nzb['name']), movie['info']['year'], preferred_words) for movie_title in movie['info']['titles']: score += nameRatioScore(to_unicode(nzb['name']), to_unicode(movie_title)) score += namePositionScore(to_unicode(nzb['name']), to_unicode(movie_title)) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], get_title(movie)) # Merge global and category ignored_words = split_string(Env.setting('ignored_words', section='searcher').lower()) try: ignored_words = remove_duplicate(ignored_words + split_string(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], get_title(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def create(self, message=None, group=None): if self.is_disabled(): return if not group: group = {} log.info('Creating %s metadata.', self.getName()) # Update library to get latest info try: group['media'] = fire_event('movie.update', group['media'].get('_id'), identifier=get_identifier( group['media']), extended=True, single=True) except: log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc()) root_name = to_unicode(self.getRootName(group)) meta_name = to_unicode(os.path.basename(root_name)) root = to_unicode(os.path.dirname(root_name)) movie_info = group['media'].get('info') for file_type in ['nfo']: try: self._createType(meta_name, root, movie_info, group, file_type, 0) except: log.error('Unable to create %s file: %s', ('nfo', traceback.format_exc())) for file_type in [ 'thumbnail', 'fanart', 'banner', 'disc_art', 'logo', 'clear_art', 'landscape', 'extra_thumbs', 'extra_fanart' ]: try: if file_type == 'thumbnail': num_images = len(movie_info['images']['poster_original']) elif file_type == 'fanart': num_images = len(movie_info['images']['backdrop_original']) else: num_images = len(movie_info['images'][file_type]) for i in range(num_images): self._createType(meta_name, root, movie_info, group, file_type, i) except: log.error('Unable to create %s file: %s', (file_type, traceback.format_exc()))
def safe_message(self, msg, replace_tuple=()): from couchpotato.core.helpers.encoding import ss, to_unicode msg = ss(msg) try: if isinstance(replace_tuple, tuple): msg = msg % tuple([ss(x) if not isinstance(x, (int, float)) else x for x in list(replace_tuple)]) elif isinstance(replace_tuple, dict): msg = msg % dict( (k, ss(v) if not isinstance(v, (int, float)) else v) for k, v in list(replace_tuple.items())) else: msg = msg % ss(replace_tuple) except Exception as e: self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e)) self.setup() if not self.is_develop: for replace in self.replace_private: msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg) msg = re.sub('(&%s=)[^\&]+' % replace, '&%s=xxx' % replace, msg) # Replace api key try: api_key = self.Env.setting('api_key') if api_key: msg = msg.replace(api_key, 'API_KEY') except: pass return to_unicode(msg)
def notify(self, message='', data=None, listener=None): if not data: data = {} # default for devices device_default = [None] apikey = self.conf('apikey') if apikey is not None: # Add apikey to request url self.url = self.url + '&apikey=' + apikey # If api key is present, default to sending to all devices device_default = ['group.all'] devices = self.getDevices() or device_default successful = 0 for device in devices: response = self.urlopen( self.url % (self.default_title, try_url_encode( to_unicode(message)), device, self.icon)) if response: successful += 1 else: log.error( 'Unable to push notification to Join device with ID %s' % device) return successful == len(devices)
def notify(self, message='', data=None, listener=None): if not data: data = {} n = { '_t': 'notification', 'time': int(time.time()), } try: db = get_db() n['message'] = to_unicode(message) if data.get('sticky'): n['sticky'] = True if data.get('important'): n['important'] = True db.insert(n) self.frontend(type=listener, data=n) return True except: log.error('Failed notify "%s": %s', (n, traceback.format_exc()))
def _searchOnTitle(self, title, movie, quality, results): q = '%s %s' % (title, movie['info']['year']) params = try_url_encode({ 'search': q, 'catid': ','.join([str(x) for x in self.getCatId(quality)]), 'user': self.conf('username', default = ''), 'api': self.conf('api_key', default = ''), }) if len(self.conf('custom_tag')) > 0: params = '%s&%s' % (params, self.conf('custom_tag')) nzbs = self.getJsonData(self.urls['search'] % params) if isinstance(nzbs, list): for nzb in nzbs: results.append({ 'id': nzb.get('nzbid'), 'name': to_unicode(nzb.get('release')), 'age': self.calculateAge(try_int(nzb.get('usenetage'))), 'size': try_int(nzb.get('sizebytes')) / 1024 / 1024, 'url': nzb.get('getnzb'), 'detail_url': nzb.get('details'), 'description': nzb.get('weblink') })
def toList(self, log_content = ''): logs_raw = re.split(r'\[0m\n', to_unicode(log_content)) logs = [] re_split = r'\x1b' for log_line in logs_raw: split = re.split(re_split, log_line) if split and len(split) == 3: try: date, time, log_type = split_string(split[0], ' ') timestamp = '%s %s' % (date, time) except: timestamp = 'UNKNOWN' log_type = 'UNKNOWN' message = ''.join(split[1]) if len(split) > 1 else split[0] message = re.sub('\[\d+m\[', '[', message) logs.append({ 'time': timestamp, 'type': log_type, 'message': message }) return logs
def fill(self): try: db = get_db() profiles = [{ 'label': 'Best', 'qualities': ['720p', '1080p', 'brrip', 'dvdrip'] }, { 'label': 'HD', 'qualities': ['720p', '1080p'] }, { 'label': 'SD', 'qualities': ['dvdrip', 'dvdr'] }, { 'label': 'Prefer 3D HD', 'qualities': ['1080p', '720p', '720p', '1080p'], '3d': [True, True] }, { 'label': '3D HD', 'qualities': ['1080p', '720p'], '3d': [True, True] }, { 'label': 'UHD 4K', 'qualities': ['720p', '1080p', '2160p'] }] # Create default quality profile order = 0 for profile in profiles: log.info('Creating default profile: %s', profile.get('label')) pro = { '_t': 'profile', 'label': to_unicode(profile.get('label')), 'order': order, 'qualities': profile.get('qualities'), 'minimum_score': 1, 'finish': [], 'wait_for': [], 'stop_after': [], '3d': [] } threed = profile.get('3d', []) for q in profile.get('qualities'): pro['finish'].append(True) pro['wait_for'].append(0) pro['stop_after'].append(0) pro['3d'].append(threed.pop() if threed else False) db.insert(pro) order += 1 return True except: log.error('Failed: %s', traceback.format_exc()) return False
def set_property(self, identifier, value=''): from couchpotato import get_db db = get_db() try: p = db.get('property', identifier, with_doc=True) p['doc'].update({ 'identifier': identifier, 'value': to_unicode(value), }) db.update(p['doc']) except: db.insert({ '_t': 'property', 'identifier': identifier, 'value': to_unicode(value), })
def getDefaultTitle(self, info, default_title = None): # Set default title default_title = default_title if default_title else to_unicode(info.get('title')) titles = info.get('titles', []) counter = 0 def_title = None for title in titles: if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == to_unicode( default_title.lower()) or (to_unicode(default_title) == six.u('') and to_unicode(titles[0]) == title): def_title = to_unicode(title) break counter += 1 if not def_title and titles and len(titles) > 0: def_title = to_unicode(titles[0]) return def_title or 'UNKNOWN'
def getMoreInfo(self, item): full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout=25920000) html = BeautifulSoup(full_description) nfo_pre = html.find('table', attrs={'class': 'detail'}) description = to_unicode(nfo_pre.text) if nfo_pre else '' item['description'] = description return item
def first(self, title): title = to_unicode(title) title = simplify_string(title) for prefix in ['the ', 'an ', 'a ']: if prefix == title[:len(prefix)]: title = title[len(prefix):] break return str(title[0] if title and len(title) > 0 and title[0] in ascii_letters else '#').lower()
def getMoreInfo(self, item): full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) html = BeautifulSoup(full_description) nfo_pre = html.find('div', attrs = {'class': 'nfo'}) description = '' try: description = to_unicode(nfo_pre.text) except: pass item['description'] = description return item
def simplify(self, title): title = to_unicode(title) nr_prefix = '' if title and len( title) > 0 and title[0] in ascii_letters else '#' title = simplify_string(title) for prefix in ['the ', 'an ', 'a ']: if prefix == title[:len(prefix)]: title = title[len(prefix):] break return str(nr_prefix + title).ljust(32, ' ')[:32]
def getTitles(self, movie): # add the title to the list title = to_unicode(movie.get('title')) titles = [title] if title else [] # add the original_title to the list alternate_title = to_unicode(movie.get('original_title')) if alternate_title and alternate_title not in titles: titles.append(alternate_title) # Add alternative titles alternate_titles = movie.get('alternative_titles', {}).get('titles', []) for alt in alternate_titles: alt_name = to_unicode(alt.get('title')) if alt_name and alt_name not in titles and alt_name.lower( ) != 'none' and alt_name is not None: titles.append(alt_name) return titles
def get_params(params): reg = re.compile('^[a-z0-9_\.]+$') # Sort keys param_keys = list(params.keys()) param_keys.sort(key=nat_sort_key) temp = {} for param in param_keys: value = params[param] nest = re.split("([\[\]]+)", param) if len(nest) > 1: nested = [] for key in nest: if reg.match(key): nested.append(key) current = temp for item in nested: if item is nested[-1]: current[item] = to_unicode(unquote(value)) else: try: current[item] except: current[item] = {} current = current[item] else: temp[param] = to_unicode(unquote(value)) if temp[param].lower() in ['true', 'false']: temp[param] = temp[param].lower() != 'false' return to_list(temp)
def save(self, **kwargs): try: db = get_db() category = { '_t': 'category', 'order': kwargs.get('order', 999), 'label': to_unicode(kwargs.get('label', '')), 'ignored': to_unicode(kwargs.get('ignored', '')), 'preferred': to_unicode(kwargs.get('preferred', '')), 'required': to_unicode(kwargs.get('required', '')), 'destination': to_unicode(kwargs.get('destination', '')), } try: c = db.get('id', kwargs.get('id')) category['order'] = c.get('order', category['order']) c.update(category) db.update(c) except: c = db.insert(category) c.update(category) return { 'success': True, 'category': c } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False, 'category': None }
def _search(self, media, quality, results): nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media)) for nzb in nzbs: nzbclub_id = try_int( self.get_text_element( nzb, "link").split('/nzb_view/')[1].split('/')[0]) enclosure = self.get_element(nzb, "enclosure").attrib size = enclosure['length'] date = self.get_text_element(nzb, "pubDate") def extra_check(item): full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout=25920000) for ignored in [ 'ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible' ]: if ignored in full_description: log.info( 'Wrong: Seems to be passworded or corrupted files: %s', item['name']) return False return True results.append({ 'id': nzbclub_id, 'name': to_unicode(self.get_text_element(nzb, "title")), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': try_int(size) / 1024 / 1024, 'url': enclosure['url'].replace(' ', '_'), 'detail_url': self.get_text_element(nzb, "link"), 'get_more_info': self.getMoreInfo, 'extra_check': extra_check })
def fill(self, reorder=False): try: db = get_db() order = 0 for q in self.qualities: existing = None try: existing = db.get('quality', q.get('identifier'), with_doc=reorder) except RecordNotFound: pass if not existing: db.insert({ '_t': 'quality', 'order': order, 'identifier': q.get('identifier'), 'size_min': try_int(q.get('size')[0]), 'size_max': try_int(q.get('size')[1]), }) log.info('Creating profile: %s', q.get('label')) db.insert({ '_t': 'profile', 'order': order + 20, # Make sure it goes behind other profiles 'core': True, 'qualities': [q.get('identifier')], 'label': to_unicode(q.get('label')), 'finish': [True], 'wait_for': [0], }) elif reorder: log.info2('Updating quality order') existing['doc']['order'] = order db.update(existing['doc']) order += 1 return True except: log.error('Failed: %s', traceback.format_exc()) return False
def createNzbName(self, data, media, unique_tag=False): release_name = data.get('name') tag = self.cpTag(media, unique_tag=unique_tag) # Check if password is filename name_password = scan_for_password(data.get('name')) if name_password: release_name, password = name_password tag += '{{%s}}' % password elif data.get('password'): tag += '{{%s}}' % data.get('password') max_length = 127 - len( tag) # Some filesystems don't support 128+ long filenames return '%s%s' % (to_safe_string( to_unicode(release_name)[:max_length]), tag)
def getPoster(self, media, image_urls): if 'files' not in media: media['files'] = {} existing_files = media['files'] image_type = 'poster' file_type = 'image_%s' % image_type # Make existing unique unique_files = list(set(existing_files.get(file_type, []))) # Remove files that can't be found for ef in unique_files: if not os.path.isfile(ef): unique_files.remove(ef) # Replace new files list existing_files[file_type] = unique_files if len(existing_files) == 0: del existing_files[file_type] images = image_urls.get(image_type, []) for y in ['SX300', 'tmdb']: initially_try = [x for x in images if y in x] images[:-1] = initially_try # Loop over type for image in images: if not isinstance(image, str): continue # Check if it has top image filename = '%s.%s' % (md5(image), get_extension(image)) existing = existing_files.get(file_type, []) has_latest = False for x in existing: if filename in x: has_latest = True if not has_latest or file_type not in existing_files or len(existing_files.get(file_type, [])) == 0: file_path = fire_event('file.download', url=image, single=True) if file_path: existing_files[file_type] = [to_unicode(file_path)] break else: break
def save(self, **kwargs): try: db = get_db() profile = { '_t': 'profile', 'label': to_unicode(kwargs.get('label')), 'order': try_int(kwargs.get('order', 999)), 'core': kwargs.get('core', False), 'minimum_score': try_int(kwargs.get('minimum_score', 1)), 'qualities': [], 'wait_for': [], 'stop_after': [], 'finish': [], '3d': [] } # Update types order = 0 for type in kwargs.get('types', []): profile['qualities'].append(type.get('quality')) profile['wait_for'].append(try_int(kwargs.get('wait_for', 0))) profile['stop_after'].append( try_int(kwargs.get('stop_after', 0))) profile['finish'].append(( try_int(type.get('finish')) == 1) if order > 0 else True) profile['3d'].append(try_int(type.get('3d'))) order += 1 id = kwargs.get('id') try: p = db.get('id', id) profile['order'] = try_int( kwargs.get('order', p.get('order', 999))) except: p = db.insert(profile) p.update(profile) db.update(p) return {'success': True, 'profile': p} except: log.error('Failed: %s', traceback.format_exc()) return {'success': False}
def getDirectories(self, path='/', show_hidden=True): # Return driveletters or root if path is empty if path == '/' or not path or path == '\\': if os.name == 'nt': return self.getDriveLetters() path = '/' dirs = [] path = sp(path) for f in os.listdir(path): p = sp(os.path.join(path, f)) if os.path.isdir(p) and ( (self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): dirs.append(to_unicode('%s%s' % (p, os.path.sep))) return sorted(dirs)
def getMoreInfo(self, item): cache_key = 'ilt.%s' % item['id'] description = self.getCache(cache_key) if not description: try: full_description = self.getHTMLData(item['detail_url']) html = BeautifulSoup(full_description) nfo_pre = html.find('td', attrs={ 'class': 'main' }).findAll('table')[1] description = to_unicode(nfo_pre.text) if nfo_pre else '' except: log.error('Failed getting more info for %s', item['name']) description = '' self.setCache(cache_key, description, timeout=25920000) item['description'] = description return item
def notify(self, message='', data=None, listener=None): if not data: data = {} post_data = {'message': to_unicode(message)} if get_identifier(data): post_data.update({'imdb_id': get_identifier(data)}) headers = {'Content-type': 'application/x-www-form-urlencoded'} try: self.urlopen(self.conf('url'), headers=headers, data=post_data, show_error=False) return True except: log.error('Webhook notification failed: %s', traceback.format_exc()) return False
def partial(self, type = 'all', lines = 30, offset = 0, **kwargs): total_lines = try_int(lines) offset = try_int(offset) log_lines = [] for x in range(0, 50): path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '') # Check see if the log exists if not os.path.isfile(path): break f = open(path, 'r') log_content = to_unicode(f.read()) raw_lines = self.toList(log_content) raw_lines.reverse() brk = False for line in raw_lines: if type == 'all' or line.get('type') == type.upper(): log_lines.append(line) if len(log_lines) >= (total_lines + offset): brk = True break if brk: break log_lines = log_lines[offset:] log_lines.reverse() return { 'success': True, 'log': log_lines, }