def searchSingle(self, message=None, group=None): if not group: group = {} if self.isDisabled() or len(group['files']['trailer']) > 0: return if self.conf('usevf'): filename = self.conf('name').replace('<filename>', group['filename']) destination = os.path.join(group['destination_dir'], filename) trailers = fireEvent('vftrailer.search', group=group, filename=filename, destination=destination, merge=True) else: trailers = fireEvent('trailer.search', group=group, merge=True) if not trailers or trailers == []: log.info('No trailers found for: %s', getTitle(group)) return False if self.conf('usevf'): log.info('Trailers found in VF for: %s', getTitle(group)) return True else: for trailer in trailers.get(self.conf('quality'), []): ext = getExt(trailer) filename = self.conf('name').replace( '<filename>', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) destination = os.path.join(group['destination_dir'], filename) if not os.path.isfile(destination): trailer_file = fireEvent('file.download', url=trailer, dest=destination, urlopen_kwargs={ 'headers': { 'User-Agent': 'Quicktime' } }, single=True) if os.path.getsize(trailer_file) < ( 1024 * 1024 ): # Don't trust small trailers (1MB), try next one os.unlink(trailer_file) continue else: log.debug('Trailer already exists: %s', destination) group['renamed_files'].append(destination) # Download first and break break return True
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = list(set(preferred_words + splitString(movie['category']['preferred'].lower()))) except: pass score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = list(set(ignored_words + splitString(movie['category']['ignored'].lower()))) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def tryNextRelease(self, movie_id, manual=False): snatched_status, done_status, ignored_status = fireEvent( "status.get", ["snatched", "done", "ignored"], single=True ) try: db = get_session() rels = ( db.query(Release) .filter_by(movie_id=movie_id) .filter(Release.status_id.in_([snatched_status.get("id"), done_status.get("id")])) .all() ) for rel in rels: rel.status_id = ignored_status.get("id") db.commit() movie_dict = fireEvent("movie.get", movie_id, single=True) log.info("Trying next release for: %s", getTitle(movie_dict["library"])) fireEvent("movie.searcher.single", movie_dict, manual=manual) return True except: log.error("Failed searching for next release: %s", traceback.format_exc()) return False
def notify(self, message = '', data = None, listener = None): if not data: data = {} api_data = { 'user': self.conf('user_key'), 'token': self.conf('api_token'), 'message': toUnicode(message), 'priority': self.conf('priority'), 'sound': self.conf('sound'), } if data and getIdentifier(data): api_data.update({ 'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)), 'url_title': toUnicode('%s on IMDb' % getTitle(data)), }) try: data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'), headers = {'Content-type': 'application/x-www-form-urlencoded'}, data = api_data) log.info2('Pushover responded with: %s', data) return True except: return False
def findViaAlternative(self, group): results = {"480p": [], "720p": [], "1080p": []} movie_name = getTitle(group["library"]) url = "%s?%s" % (self.url["backup"], tryUrlencode({"s": movie_name})) data = self.getCache("hdtrailers.alt.%s" % group["library"]["identifier"], url) try: tables = SoupStrainer("div") html = BeautifulSoup(data, parse_only=tables) result_table = html.find_all("h2", text=re.compile(movie_name)) for h2 in result_table: if "trailer" in h2.lower(): parent = h2.parent.parent.parent trailerLinks = parent.find_all("a", text=re.compile("480p|720p|1080p")) try: for trailer in trailerLinks: results[trailer].insert(0, trailer.parent["href"]) except: pass except AttributeError: log.debug("No trailers found in via alternative.") return results
def calculate(self, nzb, movie): ''' Calculate the score of a NZB, used for sorting later ''' score = nameScore(toUnicode(nzb['name']), movie['library']['year']) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeds'): try: score += nzb.get('seeds') / 5 score += nzb.get('leechers') / 10 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) return score
def tryNextRelease(self, movie_id, manual=False): snatched_status = fireEvent('status.get', 'snatched', single=True) ignored_status = fireEvent('status.get', 'ignored', single=True) try: db = get_session() rels = db.query(Release).filter_by( status_id=snatched_status.get('id'), movie_id=movie_id).all() for rel in rels: rel.status_id = ignored_status.get('id') db.commit() movie_dict = fireEvent('movie.get', movie_id, single=True) log.info('Trying next release for: %s', getTitle(movie_dict['library'])) fireEvent('searcher.single', movie_dict) return True except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False
def search(self, movie, quality): results = [] if self.isDisabled(): return results # Cookie login if not self.login_opener and not self.login(): return results cache_key = 'torrentleech.%s.%s' % (movie['library']['identifier'], quality.get('identifier')) url = self.urls['search'] % (quote_plus(getTitle(movie['library']).replace(':', '') + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0]) data = self.getCache(cache_key, url, opener = self.login_opener) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs = {'id' : 'torrenttable'}) if not result_table: return results entries = result_table.find_all('tr') for result in entries[1:]: link = result.find('td', attrs = {'class' : 'name'}).find('a') url = result.find('td', attrs = {'class' : 'quickdownload'}).find('a') details = result.find('td', attrs = {'class' : 'name'}).find('a') new = { 'id': link['href'].replace('/torrent/', ''), 'name': link.string, 'type': 'torrent', 'check_nzb': False, 'description': '', 'provider': self.getName(), 'url': self.urls['download'] % url['href'], 'detail_url': self.urls['download'] % details['href'], 'download': self.loginDownload, 'size': self.parseSize(result.find_all('td')[4].string), 'seeders': tryInt(result.find('td', attrs = {'class' : 'seeders'}).string), 'leechers': tryInt(result.find('td', attrs = {'class' : 'leechers'}).string), } imdb_results = self.imdbMatch(self.urls['detail'] % new['id'], movie['library']['identifier']) new['score'] = fireEvent('score.calculate', new, movie, single = True) is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = imdb_results, single = True) if is_correct_movie: results.append(new) self.found(new) return results except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) return []
def search(self, group): movie_name = getTitle(group) url = self.urls['api'] % self.movieUrlName(movie_name) try: data = self.getCache('hdtrailers.%s' % group['identifier'], url, show_error=False) except HTTPError: log.debug('No page found for: %s', movie_name) data = None result_data = {'480p': [], '720p': [], '1080p': []} if not data: return result_data did_alternative = False for provider in self.providers: results = self.findByProvider(data, provider) # Find alternative if results.get('404') and not did_alternative: results = self.findViaAlternative(group) did_alternative = True result_data = mergeDicts(result_data, results) return result_data
def searchSingle(self, group): if self.isDisabled() or len(group['files']['trailer']) > 0: return trailers = fireEvent('trailer.search', group=group, merge=True) if not trailers or trailers == []: log.info('No trailers found for: %s', getTitle(group['library'])) return for trailer in trailers.get(self.conf('quality'), []): destination = '%s-trailer.%s' % (self.getRootName(group), getExt(trailer)) if not os.path.isfile(destination): fireEvent( 'file.download', url=trailer, dest=destination, urlopen_kwargs={'headers': { 'User-Agent': 'Quicktime' }}, single=True) else: log.debug('Trailer already exists: %s', destination) # Download first and break break
def notify(self, message='', data=None, listener=None): if not data: data = {} api_data = { 'user': self.conf('user_key'), 'token': self.conf('api_token'), 'message': toUnicode(message), 'priority': self.conf('priority'), 'sound': self.conf('sound'), } if data and getIdentifier(data): api_data.update({ 'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)), 'url_title': toUnicode('%s on IMDb' % getTitle(data)), }) try: data = self.urlopen( '%s/%s' % (self.api_url, '1/messages.json'), headers={'Content-type': 'application/x-www-form-urlencoded'}, data=api_data) log.info2('Pushover responded with: %s', data) return True except: return False
def search(self, group): movie_name = getTitle(group['library']) url = self.urls['api'] % self.movieUrlName(movie_name) try: data = self.getCache('hdtrailers.%s' % group['library']['identifier'], url, show_error = False) except HTTPError: log.debug('No page found for: %s', movie_name) data = None result_data = {'480p':[], '720p':[], '1080p':[]} if not data: return result_data did_alternative = False for provider in self.providers: results = self.findByProvider(data, provider) # Find alternative if results.get('404') and not did_alternative: results = self.findViaAlternative(group) did_alternative = True result_data = mergeDicts(result_data, results) return result_data
def tryNextRelease(self, movie_id, manual = False): snatched_status = fireEvent('status.get', 'snatched', single = True) ignored_status = fireEvent('status.get', 'ignored', single = True) try: db = get_session() rels = db.query(Release).filter_by( status_id = snatched_status.get('id'), movie_id = movie_id ).all() for rel in rels: rel.status_id = ignored_status.get('id') db.commit() movie_dict = fireEvent('movie.get', movie_id, single = True) log.info('Trying next release for: %s', getTitle(movie_dict['library'])) fireEvent('searcher.single', movie_dict) return True except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False
def findViaAlternative(self, group): results = {'480p':[], '720p':[], '1080p':[]} movie_name = getTitle(group['library']) url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) data = self.getCache('hdtrailers.alt.%s' % group['library']['identifier'], url) try: tables = SoupStrainer('div') html = BeautifulSoup(data, parse_only = tables) result_table = html.find_all('h2', text = re.compile(movie_name)) for h2 in result_table: if 'trailer' in h2.lower(): parent = h2.parent.parent.parent trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p')) try: for trailer in trailerLinks: results[trailer].insert(0, trailer.parent['href']) except: pass except AttributeError: log.debug('No trailers found in via alternative.') return results
def notify(self, message = '', data = None, listener = None): if not data: data = {} if listener == 'test': post_data = { 'username': self.conf('automation_username'), 'password': self.conf('automation_password'), } result = self.call((self.urls['test'] % self.conf('automation_api_key')), post_data) return result else: post_data = { 'username': self.conf('automation_username'), 'password': self.conf('automation_password'), 'movies': [{ 'imdb_id': data['identifier'], 'title': getTitle(data), 'year': data['info']['year'] }] if data else [] } result = self.call((self.urls['library'] % self.conf('automation_api_key')), post_data) if self.conf('remove_watchlist_enabled'): result = result and self.call((self.urls['unwatchlist'] % self.conf('automation_api_key')), post_data) return result
def searchSingle(self, message=None, group={}): if self.isDisabled() or len(group['files']['trailer']) > 0: return trailers = fireEvent('trailer.search', group=group, merge=True) if not trailers or trailers == []: log.info('No trailers found for: %s', getTitle(group['library'])) return False for trailer in trailers.get(self.conf('quality'), []): filename = self.conf('name').replace( '<filename>', group['filename']) + ('.%s' % getExt(trailer)) destination = os.path.join(group['destination_dir'], filename) if not os.path.isfile(destination): fireEvent( 'file.download', url=trailer, dest=destination, urlopen_kwargs={'headers': { 'User-Agent': 'Quicktime' }}, single=True) else: log.debug('Trailer already exists: %s', destination) group['renamed_files'].append(destination) # Download first and break break return True
def notify(self, message = '', data = {}, listener = None): http_handler = HTTPSConnection("api.pushover.net:443") api_data = { 'user': self.conf('user_key'), 'token': self.app_token, 'message': toUnicode(message), 'priority': self.conf('priority'), } if data and data.get('library'): api_data.extend({ 'url': toUnicode('http://www.imdb.com/title/%s/' % data['library']['identifier']), 'url_title': toUnicode('%s on IMDb' % getTitle(data['library'])), }) http_handler.request('POST', "/1/messages.json", headers = {'Content-type': 'application/x-www-form-urlencoded'}, body = tryUrlencode(api_data) ) response = http_handler.getresponse() request_status = response.status if request_status == 200: log.info('Pushover notifications sent.') return True elif request_status == 401: log.error('Pushover auth failed: %s', response.reason) return False else: log.error('Pushover notification failed.') return False
def tryNextRelease(self, media_id, manual=False, force_download=False): try: rels = fireEvent('release.for_media', media_id, single=True) for rel in rels: if rel.get('status') in ['snatched', 'done']: fireEvent('release.update_status', rel.get('_id'), status='ignored') media = fireEvent('media.get', media_id, single=True) if media: log.info('Trying next release for: %s', getTitle(media)) self.single(media, manual=manual, force_download=force_download) return True except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False
def tryNextRelease(self, media_id, manual=False): snatched_status, done_status, ignored_status = fireEvent( 'status.get', ['snatched', 'done', 'ignored'], single=True) try: db = get_session() rels = db.query(Release) \ .filter_by(movie_id = media_id) \ .filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \ .all() for rel in rels: rel.status_id = ignored_status.get('id') db.commit() movie_dict = fireEvent('media.get', media_id=media_id, single=True) log.info('Trying next release for: %s', getTitle(movie_dict['library'])) fireEvent('movie.searcher.single', movie_dict, manual=manual) return True except: log.error('Failed searching for next release: %s', traceback.format_exc()) db.rollback() return False finally: db.close()
def findViaAlternative(self, group): results = {'480p':[], '720p':[], '1080p':[]} movie_name = getTitle(group['library']) url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) try: data = self.getCache('hdtrailers.alt.%s' % group['library']['identifier'], url, show_error = False) except HTTPError: log.debug('No alternative page found for: %s', movie_name) data = None if not data: return results try: tables = SoupStrainer('div') html = BeautifulSoup(data, parse_only = tables) result_table = html.find_all('h2', text = re.compile(movie_name)) for h2 in result_table: if 'trailer' in h2.lower(): parent = h2.parent.parent.parent trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p')) try: for trailer in trailerLinks: results[trailer].insert(0, trailer.parent['href']) except: pass except AttributeError: log.debug('No trailers found in via alternative.') return results
def searchSingle(self, message = None, group = None): if not group: group = {} if self.isDisabled() or len(group['files']['trailer']) > 0: return trailers = fireEvent('trailer.search', group = group, merge = True) if not trailers or trailers == []: log.info('No trailers found for: %s', getTitle(group)) return False for trailer in trailers.get(self.conf('quality'), []): ext = getExt(trailer) filename = self.conf('name').replace('<filename>', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) destination = os.path.join(group['destination_dir'], filename) if not os.path.isfile(destination): trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) if os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one os.unlink(trailer_file) continue else: log.debug('Trailer already exists: %s', destination) group['renamed_files'].append(destination) # Download first and break break return True
def notifyXBMCnoJSON(self, host, data): server = 'http://%s/xbmcCmds/' % host # Notification(title, message [, timeout , image]) cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % ( urllib.quote(getTitle(data)), urllib.quote(data['message']), urllib.quote(self.getNotificationImage('medium'))) server += cmd # I have no idea what to set to, just tried text/plain and seems to be working :) headers = { 'Content-Type': 'text/plain', } # authentication support if self.conf('password'): base64string = base64.encodestring( '%s:%s' % (self.conf('username'), self.conf('password'))).replace( '\n', '') headers['Authorization'] = 'Basic %s' % base64string try: log.debug('Sending non-JSON-type request to %s: %s', (host, data)) # response wil either be 'OK': # <html> # <li>OK # </html> # # or 'Error': # <html> # <li>Error:<message> # </html> # response = self.urlopen(server, headers=headers, timeout=3, show_error=False) if 'OK' in response: log.debug('Returned from non-JSON-type request %s: %s', (host, response)) # manually fake expected response array return [{'result': 'OK'}] else: log.error('Returned from non-JSON-type request %s: %s', (host, response)) # manually fake expected response array return [{'result': 'Error'}] except (MaxRetryError, requests.exceptions.Timeout, ConnectionError): log.info2( 'Couldn\'t send request to XBMC, assuming it\'s turned off') return [{'result': 'Error'}] except: log.error('Failed sending non-JSON-type request to XBMC: %s', traceback.format_exc()) return [{'result': 'Error'}]
def findViaAlternative(self, group): results = {'480p': [], '720p': [], '1080p': []} movie_name = getTitle(group) url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) try: data = self.getCache('hdtrailers.alt.%s' % getIdentifier(group), url, show_error = False) except HTTPError: log.debug('No alternative page found for: %s', movie_name) data = None if not data: return results try: html = BeautifulSoup(data, parse_only = self.only_tables_tags) result_table = html.find_all('h2', text = re.compile(movie_name)) for h2 in result_table: if 'trailer' in h2.lower(): parent = h2.parent.parent.parent trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p')) try: for trailer in trailerLinks: results[trailer].insert(0, trailer.parent['href']) except: pass except AttributeError: log.debug('No trailers found in via alternative.') return results
def tryNextRelease(self, media_id, manual = False): snatched_status, done_status, ignored_status = fireEvent('status.get', ['snatched', 'done', 'ignored'], single = True) try: db = get_session() rels = db.query(Release) \ .filter_by(movie_id = media_id) \ .filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \ .all() for rel in rels: rel.status_id = ignored_status.get('id') db.commit() movie_dict = fireEvent('media.get', media_id = media_id, single = True) log.info('Trying next release for: %s', getTitle(movie_dict['library'])) fireEvent('movie.searcher.single', movie_dict, manual = manual) return True except: log.error('Failed searching for next release: %s', traceback.format_exc()) db.rollback() return False finally: db.close()
def restatus(self, media_id): try: db = get_db() m = db.get('id', media_id) previous_status = m['status'] log.debug('Changing status for %s', getTitle(m)) if not m['profile_id']: m['status'] = 'done' else: move_to_wanted = True profile = db.get('id', m['profile_id']) media_releases = fireEvent('release.for_media', m['_id'], single = True) for q_identifier in profile['qualities']: index = profile['qualities'].index(q_identifier) for release in media_releases: if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]): move_to_wanted = False m['status'] = 'active' if move_to_wanted else 'done' # Only update when status has changed if previous_status != m['status']: db.update(m) return True except: log.error('Failed restatus: %s', traceback.format_exc())
def restatus(self, media_id, tag_recent=True, allowed_restatus=None): try: db = get_db() m = db.get('id', media_id) previous_status = m['status'] log.debug('Changing status for %s', getTitle(m)) if not m['profile_id']: m['status'] = 'done' else: m['status'] = 'active' try: profile = db.get('id', m['profile_id']) media_releases = fireEvent('release.for_media', m['_id'], single=True) done_releases = [ release for release in media_releases if release.get('status') == 'done' ] if done_releases: # Check if we are finished with the media for release in done_releases: if fireEvent('quality.isfinish', { 'identifier': release['quality'], 'is_3d': release.get('is_3d', False) }, profile, timedelta(seconds=time.time() - release['last_edit']).days, single=True): m['status'] = 'done' break elif previous_status == 'done': m['status'] = 'done' except RecordNotFound: log.debug('Failed restatus, keeping previous: %s', traceback.format_exc()) m['status'] = previous_status # Only update when status has changed if previous_status != m['status'] and ( not allowed_restatus or m['status'] in allowed_restatus): db.update(m) # Tag media as recent if tag_recent: self.tag(media_id, 'recent', update_edited=True) return m['status'] except: log.error('Failed restatus: %s', traceback.format_exc())
def search(self, movie, quality): results = [] if self.isDisabled() or not self.getDomain(): return results cache_key = 'thepiratebay.%s.%s' % (movie['library']['identifier'], quality.get('identifier')) search_url = self.urls['search'] % (self.getDomain(), quote_plus(getTitle(movie['library']) + ' ' + quality['identifier']), self.getCatId(quality['identifier'])[0]) data = self.getCache(cache_key, search_url) if data: try: soup = BeautifulSoup(data) results_table = soup.find('table', attrs = {'id': 'searchResult'}) entries = results_table.find_all('tr') for result in entries[1:]: link = result.find(href = re.compile('torrent\/\d+\/')) download = result.find(href = re.compile('magnet:')) size = re.search('Size (?P<size>.+),', unicode(result.select('font.detDesc')[0])).group('size') if link and download: def extra_score(item): trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) != None] vip = (0, 20)[result.find('img', alt = re.compile('VIP')) != None] confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) != None] moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) != None] return confirmed + trusted + vip + moderated new = { 'id': re.search('/(?P<id>\d+)/', link['href']).group('id'), 'type': 'torrent_magnet', 'name': link.string, 'check_nzb': False, 'description': '', 'provider': self.getName(), 'url': download['href'], 'detail_url': self.getDomain(link['href']), 'size': self.parseSize(size), 'seeders': tryInt(result.find_all('td')[2].string), 'leechers': tryInt(result.find_all('td')[3].string), 'extra_score': extra_score, 'get_more_info': self.getMoreInfo } new['score'] = fireEvent('score.calculate', new, movie, single = True) is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = False, single = True) if is_correct_movie: results.append(new) self.found(new) return results except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) return []
def buildUrl(self, media, host): arguments = tryUrlencode({ 'user': host['name'], 'passkey': host['pass_key'], 'imdbid': getIdentifier(media), 'search': getTitle(media), }) return '%s?%s' % (host['host'], arguments)
def buildUrl(self, media, host): arguments = tryUrlencode({ 'user': host['name'], 'passkey': host['pass_key'], 'imdbid': getIdentifier(media), 'search' : getTitle(media), }) return '%s?%s' % (host['host'], arguments)
def _search(self, movie, quality, results): """ defaults = { 'id': 0, 'type': self.provider.type, 'provider': self.provider.getName(), 'download': self.provider.download, 'url': '', 'name': '', 'age': 0, 'size': 0, 'description': '', 'score': 0 } """ q = '%s %s %s' % (simplifyString(getTitle(movie)), movie['info']['year'], quality['identifier']) log.info(q) search = [] r = requests.get(self.urls['search'], params={'gps': q}, auth=(self.conf('username'), self.conf('password'))) soup = BeautifulSoup(r.text) rows = soup.find_all('tr', 'rRow1') + soup.find_all('tr', 'rRow2') for tr in rows: url = tr.find('td', 'subject').find('a')['href'] search.append({ 'id': tr.find('input', 'checkbox')['value'], 'file': urllib.unquote(os.path.basename(url)), 'url': url, 'size': tr.find('td', 'fSize').string, 'date': tr.find('td', 'timeStamp').string }) for s in search: def extra_score(item): group1 = (0, 50)[any(g in s['file'].lower() for g in ('ctrlhd', 'wiki', 'esir', 'shitsony', 'cytsunee', 'don.mkv'))] group2 = (0, 30)[any(g in s['file'].lower() for g in ('chd', 'hdc', 'hdchina'))] hires = (0, 10)['1080p' in s['file'].lower()] return group1 + group2 + hires d = parser.parse(s['date']) if d > datetime.now(): d = datetime(d.year - 1, d.month, d.day) age = (datetime.now() - d).days + 1 results.append({ 'id': s['id'], 'name': s['file'], 'age': age, 'size': self.parseSize(s['size']), 'url': s['url'], 'detail_url': r.url, 'extra_score': extra_score })
def suggestView(self, limit=6, **kwargs): if self.isDisabled(): return {'success': True, 'movies': []} movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types='movie', single=True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default='')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default=''))) suggestions = fireEvent('movie.suggest', movies=movies, ignore=ignored, single=True) self.setCache('suggestion_cached', suggestions, timeout=6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent( 'file.download', url=posters[0], single=True) if len(posters) > 0 else False files = {'image_poster': [cached_poster]} if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return {'success': True, 'movies': medias}
def automationView(self, force_update=False, **kwargs): db = get_db() charts = fireEvent('automation.get_chart_list', merge=True) ignored = splitString(Env.prop('charts_ignore', default='')) # Create a list the movie/list.js can use for chart in charts: medias = [] for media in chart.get('list', []): identifier = media.get('imdb') if identifier in ignored: continue try: try: in_library = db.get('media', 'imdb-%s' % identifier) if in_library: continue except RecordNotFound: pass except: pass # Cache poster posters = media.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent( 'file.download', url=posters[0], single=True) if len(posters) > 0 else False files = { 'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'chart', 'title': getTitle(media), 'type': 'movie', 'info': media, 'files': files, 'identifiers': { 'imdb': identifier } }) chart['list'] = medias return { 'success': True, 'count': len(charts), 'charts': charts, 'ignored': ignored, }
def download(self, data, movie, manual = False): snatched_status = fireEvent('status.get', 'snatched', single = True) # Download movie to temp filedata = None if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) if filedata == 'try_next': return filedata successful = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True) if successful: try: # Mark release as snatched db = get_session() rls = db.query(Release).filter_by(identifier = md5(data['url'])).first() if rls: rls.status_id = snatched_status.get('id') db.commit() log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label) snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie) log.info(snatch_message) fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict()) # If renamer isn't used, mark movie done if not Env.setting('enabled', 'renamer'): active_status = fireEvent('status.get', 'active', single = True) done_status = fireEvent('status.get', 'done', single = True) try: if movie['status_id'] == active_status.get('id'): for profile_type in movie['profile']['types']: if rls and profile_type['quality_id'] == rls.quality.id and profile_type['finish']: log.info('Renamer disabled, marking movie as finished: %s', log_movie) # Mark release done rls.status_id = done_status.get('id') db.commit() # Mark movie done mvie = db.query(Movie).filter_by(id = movie['id']).first() mvie.status_id = done_status.get('id') db.commit() except: log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc()) except: log.error('Failed marking movie finished: %s', traceback.format_exc()) return True log.info('Tried to download, but none of the downloaders are enabled') return False
def afterUpdate(): self.in_progress[folder][ 'to_go'] = self.in_progress[folder]['to_go'] - 1 total = self.in_progress[folder]['total'] movie_dict = fireEvent('movie.get', identifier, single=True) fireEvent('notify.frontend', type='movie.added', data=movie_dict, message=None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict['library']))
def search(self, movie, quality): if self.isDisabled(): return [] results = [] for title in possibleTitles(getTitle(movie['library'])): results.extend(self._search(title, movie, quality)) return self.removeDuplicateResults(results)
def search(self, movie, quality): pre_releases = fireEvent('quality.pre_releases', single=True) if self.isDisabled() or quality['identifier'] in pre_releases: return [] results = [] for title in possibleTitles(getTitle(movie['library'])): results.extend(self._search(title, movie, quality)) return self.removeDuplicateResults(results)
def suggestView(self, limit = 6, **kwargs): if self.isDisabled(): return { 'success': True, 'movies': [] } movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return { 'success': True, 'movies': medias }
def afterUpdate(): self.in_progress[folder]["to_go"] = self.in_progress[folder]["to_go"] - 1 total = self.in_progress[folder]["total"] movie_dict = fireEvent("movie.get", identifier, single=True) fireEvent( "notify.frontend", type="movie.added", data=movie_dict, message=None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict["library"]), )
def search(self, movie, quality): pre_releases = fireEvent("quality.pre_releases", single=True) if self.isDisabled() or quality["identifier"] in pre_releases: return [] results = [] for title in possibleTitles(getTitle(movie["library"])): results.extend(self._search(title, movie, quality)) return self.removeDuplicateResults(results)
def calculate(self, nzb, movie): ''' Calculate the score of a NZB, used for sorting later ''' score = nameScore( toUnicode(nzb['name'] + ' ' + nzb.get('name_extra', '')), movie['library']['year']) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') / 5 score += nzb.get('leechers') / 10 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library'])) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) return score
def afterUpdate(): if not self.in_progress or self.shuttingDown(): return total = self.in_progress[folder]['total'] movie_dict = fireEvent('media.get', identifier, single=True) fireEvent('notify.frontend', type='movie.added', data=movie_dict, message=None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict))
def automationView(self, force_update = False, **kwargs): db = get_db() charts = fireEvent('automation.get_chart_list', merge = True) ignored = splitString(Env.prop('charts_ignore', default = '')) # Create a list the movie/list.js can use for chart in charts: medias = [] for media in chart.get('list', []): identifier = media.get('imdb') if identifier in ignored: continue try: try: in_library = db.get('media', 'imdb-%s' % identifier) if in_library: continue except RecordNotFound: pass except: pass # Cache poster posters = media.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'chart', 'title': getTitle(media), 'type': 'movie', 'info': media, 'files': files, 'identifiers': { 'imdb': identifier } }) chart['list'] = medias return { 'success': True, 'count': len(charts), 'charts': charts, 'ignored': ignored, }
def restatus(self, media_id): try: db = get_db() m = db.get("id", media_id) previous_status = m["status"] log.debug("Changing status for %s", getTitle(m)) if not m["profile_id"]: m["status"] = "done" else: m["status"] = "active" try: profile = db.get("id", m["profile_id"]) media_releases = fireEvent("release.for_media", m["_id"], single=True) done_releases = [release for release in media_releases if release.get("status") == "done"] if done_releases: # Check if we are finished with the media for release in done_releases: if fireEvent( "quality.isfinish", {"identifier": release["quality"], "is_3d": release.get("is_3d", False)}, profile, timedelta(seconds=time.time() - release["last_edit"]).days, single=True, ): m["status"] = "done" break elif previous_status == "done": m["status"] = "done" except RecordNotFound: log.debug("Failed restatus, keeping previous: %s", traceback.format_exc()) m["status"] = previous_status # Only update when status has changed if previous_status != m["status"]: db.update(m) # Tag media as recent self.tag(media_id, "recent", update_edited=True) return m["status"] except: log.error("Failed restatus: %s", traceback.format_exc())
def calculate(self, nzb, movie): ''' Calculate the score of a NZB, used for sorting later ''' score = nameScore(toUnicode(nzb['name'] + ' ' + nzb.get('name_extra', '')), movie['library']['year']) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') / 5 score += nzb.get('leechers') / 10 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library'])) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) return score
def download(self, data, movie, manual=False): snatched_status = fireEvent("status.get", "snatched", single=True) # Download movie to temp filedata = None if data.get("download") and (ismethod(data.get("download")) or isfunction(data.get("download"))): filedata = data.get("download")(url=data.get("url"), nzb_id=data.get("id")) if filedata is "try_next": return filedata successful = fireEvent("download", data=data, movie=movie, manual=manual, single=True, filedata=filedata) if successful: # Mark release as snatched db = get_session() rls = db.query(Release).filter_by(identifier=md5(data["url"])).first() rls.status_id = snatched_status.get("id") db.commit() log_movie = "%s (%s) in %s" % (getTitle(movie["library"]), movie["library"]["year"], rls.quality.label) snatch_message = 'Snatched "%s": %s' % (data.get("name"), log_movie) log.info(snatch_message) fireEvent("movie.snatched", message=snatch_message, data=rls.to_dict()) # If renamer isn't used, mark movie done if not Env.setting("enabled", "renamer"): active_status = fireEvent("status.get", "active", single=True) done_status = fireEvent("status.get", "done", single=True) try: if movie["status_id"] == active_status.get("id"): for profile_type in movie["profile"]["types"]: if profile_type["quality_id"] == rls.quality.id and profile_type["finish"]: log.info("Renamer disabled, marking movie as finished: %s" % log_movie) # Mark release done rls.status_id = done_status.get("id") db.commit() # Mark movie done mvie = db.query(Movie).filter_by(id=movie["id"]).first() mvie.status_id = done_status.get("id") db.commit() except Exception, e: log.error("Failed marking movie finished: %s %s" % (e, traceback.format_exc())) # db.close() return True
def _search(self, movie, quality, results): # Cookie login #if not self.last_login_check and not self.login(): # pass # return TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8") self._searchOnTitle(TitleStringReal, movie, quality, results) if not results: media_title = fireEvent('library.query', movie, include_year = False, single = True) for title in possibleTitles(media_title): self._searchOnTitle(title, movie, quality, results)
def search(self, group, filename, destination): movie_name = getTitle(group['library']) movienorm = unicodedata.normalize('NFKD', movie_name).encode('ascii','ignore') movie_year = group['library']['year'] searchstring=movienorm+' '+ str(movie_year) +' bande annonce vf HD' time.sleep(3) log.info('Searching google for: %s', searchstring) g = pygoogle(str(searchstring)) diclist = g.search() urllist = g.get_urls() cleanlist=[] for x in urllist: if 'youtube' in x or 'dailymotion' in x: cleanlist.append(x) if cleanlist: bocount=0 for bo in cleanlist: if bocount==0: tempdest=unicodedata.normalize('NFKD', os.path.join(rootDir,filename)).encode('ascii','ignore')+u'.%(ext)s' dest=destination+u'.%(ext)s' log.info('Trying to download : %s to %s ', (bo, tempdest)) p=subprocess.Popen([sys.executable, 'youtube_dl/__main__.py', '-o',tempdest,'--newline', bo],cwd=rootDir, shell=False, stdout=subprocess.PIPE,stderr=subprocess.PIPE) while p.poll() is None: l = p.stdout.readline() # This blocks until it receives a newline. lmsg= l.replace('%',' percent')+' '+filename log.info(lmsg) # When the subprocess terminates there might be unconsumed output # that still needs to be processed. (out, err) = p.communicate() outmsg='Out for '+filename +' : '+out errmsg='Err for '+filename +' : '+err if out: log.info(outmsg) if err: log.info(errmsg) continue else: listetemp=glob.glob(os.path.join(rootDir,'*')) for listfile in listetemp: if unicodedata.normalize('NFKD', filename).encode('ascii','ignore') in listfile: ext=listfile[-4:] finaldest=destination+ext shutil.move(listfile, finaldest) bocount=1 log.info('Downloaded trailer for : %s', movienorm) return True else: return False
def query(self, media, first = True, include_year = True, **kwargs): if media.get('type') != 'movie': return default_title = getTitle(media) titles = media['info'].get('titles', []) titles.insert(0, default_title) # Add year identifier to titles if include_year: titles = [title + (' %s' % str(media['info']['year'])) for title in titles] if first: return titles[0] if titles else None return titles
def createRefreshHandler(self, id): db = get_session() media = db.query(Media).filter_by(id = id).first() if media: default_title = getTitle(media.library) identifier = media.library.identifier event = 'library.update.%s' % media.type def handler(): fireEvent(event, identifier = identifier, default_title = default_title, on_complete = self.createOnComplete(id)) if handler: return handler
def createRefreshHandler(self, id): db = get_session() media = db.query(Media).filter_by(id = id).first() if media: default_title = getTitle(media.library) identifier = media.library.identifier db.expire_all() def handler(): fireEvent('library.update.%s' % media.type, identifier = identifier, default_title = default_title, force = True, on_complete = self.createOnComplete(id)) return handler
def query(self, media, first=True, include_year=True, **kwargs): if media.get('type') != 'movie': return default_title = getTitle(media) titles = media['info'].get('titles', []) titles.insert(0, default_title) # Add year identifier to titles if include_year: titles = [ title + (' %s' % str(media['info']['year'])) for title in titles ] if first: return titles[0] if titles else None return titles
def tryNextRelease(self, media_id, manual=False): try: db = get_db() rels = fireEvent('media.with_status', ['snatched', 'done'], single=True) for rel in rels: rel['status'] = 'ignored' db.update(rel) movie_dict = fireEvent('media.get', media_id, single=True) log.info('Trying next release for: %s', getTitle(movie_dict)) self.single(movie_dict, manual=manual) return True except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False
def notify(self, message='', data=None, listener=None): if not data: data = {} http_handler = HTTPSConnection("api.pushover.net:443") api_data = { 'user': self.conf('user_key'), 'token': self.conf('api_token'), 'message': toUnicode(message), 'priority': self.conf('priority'), 'sound': self.conf('sound'), } if data and getIdentifier(data): api_data.update({ 'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)), 'url_title': toUnicode('%s on IMDb' % getTitle(data)), }) http_handler.request( 'POST', '/1/messages.json', headers={'Content-type': 'application/x-www-form-urlencoded'}, body=tryUrlencode(api_data)) response = http_handler.getresponse() request_status = response.status if request_status == 200: log.info('Pushover notifications sent.') return True elif request_status == 401: log.error('Pushover auth failed: %s', response.reason) return False else: log.error('Pushover notification failed: %s', request_status) return False
def ignoreView(self, imdb=None, limit=6, remove_only=False, mark_seen=False, **kwargs): ignored = splitString(Env.prop('suggest_ignore', default='')) seen = splitString(Env.prop('suggest_seen', default='')) new_suggestions = [] if imdb: if mark_seen: seen.append(imdb) Env.prop('suggest_seen', ','.join(set(seen))) elif not remove_only: ignored.append(imdb) Env.prop('suggest_ignore', ','.join(set(ignored))) new_suggestions = self.updateSuggestionCache(ignore_imdb=imdb, limit=limit, ignored=ignored, seen=seen) if len(new_suggestions) <= limit: return {'result': False} # Only return new (last) item media = { 'status': 'suggested', 'title': getTitle(new_suggestions[limit]), 'type': 'movie', 'info': new_suggestions[limit], 'identifiers': { 'imdb': new_suggestions[limit].get('imdb') } } return {'result': True, 'movie': media}
def notify(self, message='', data=None, listener=None): if not data: data = {} if listener == 'test': post_data = { 'username': self.conf('automation_username'), 'password': self.conf('automation_password'), } result = self.call( (self.urls['test'] % self.conf('automation_api_key')), post_data) return result else: post_data = { 'username': self.conf('automation_username'), 'password': self.conf('automation_password'), 'movies': [{ 'imdb_id': getIdentifier(data), 'title': getTitle(data), 'year': data['info']['year'] }] if data else [] } result = self.call( (self.urls['library'] % self.conf('automation_api_key')), post_data) if self.conf('remove_watchlist_enabled'): result = result and self.call( (self.urls['unwatchlist'] % self.conf('automation_api_key')), post_data) return result
def search(self, movie, quality): if self.isDisabled(): return [] # Login if needed if self.urls.get('login') and not self.login(): log.error('Failed to login to: %s', self.getName()) return [] # Create result container imdb_results = hasattr(self, '_search') results = ResultList(self, movie, quality, imdb_results=imdb_results) # Do search based on imdb id if imdb_results: self._search(movie, quality, results) # Search possible titles else: for title in possibleTitles(getTitle(movie['library'])): self._searchOnTitle(title, movie, quality, results) return results
def search(self, group): movie_name = getTitle(group['library']) url = self.urls['api'] % self.movieUrlName(movie_name) data = self.getCache('hdtrailers.%s' % group['library']['identifier'], url) result_data = {'480p':[], '720p':[], '1080p':[]} if not data: return result_data did_alternative = False for provider in self.providers: results = self.findByProvider(data, provider) # Find alternative if results.get('404') and not did_alternative: results = self.findViaAlternative(group) did_alternative = True result_data = mergeDicts(result_data, results) return result_data