def correctWords(self, rel_name, media): media_title = fireEvent('searcher.get_search_title', media, single=True) media_words = re.split('\W+', simplifyString(media_title)) rel_name = simplifyString(rel_name) rel_words = re.split('\W+', rel_name) # Make sure it has required words required_words = splitString( self.conf('required_words', section='searcher').lower()) try: required_words = removeDuplicate( required_words + splitString(media['category']['required'].lower())) except: pass req_match = 0 for req_set in required_words: req = splitString(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) if len(required_words) > 0 and req_match == 0: log.info2('Wrong: Required word missing: %s', rel_name) return False # Ignore releases ignored_words = splitString( self.conf('ignored_words', section='searcher').lower()) try: ignored_words = removeDuplicate( ignored_words + splitString(media['category']['ignored'].lower())) except: pass ignored_match = 0 for ignored_set in ignored_words: ignored = splitString(ignored_set, '&') ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored) if len(ignored_words) > 0 and ignored_match: log.info2("Wrong: '%s' contains 'ignored words'", rel_name) return False # Ignore p**n stuff pron_tags = [ 'xxx', 'sex', 'anal', 't**s', 'f**k', 'p**n', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'c**k', 'dick' ] pron_words = list(set(rel_words) & set(pron_tags) - set(media_words)) if pron_words: log.info('Wrong: %s, probably pr0n', rel_name) return False return True
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = removeDuplicate(preferred_words + splitString(movie['category']['preferred'].lower())) except: pass score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = removeDuplicate(ignored_words + splitString(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def correctName(self, check_name, movie_name): check_names = [check_name] # Match names between " try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) except: pass # Match longest name between [] try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip()) except: pass for check_name in removeDuplicate(check_names): check_movie = fireEvent('scanner.name_year', check_name, single = True) try: check_words = removeEmpty(re.split('\W+', check_movie.get('name', ''))) movie_words = removeEmpty(re.split('\W+', simplifyString(movie_name))) if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0: return True except: pass return False
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): # Combine with previous suggestion_cache cached_suggestion = self.getCache('suggestion_cached') or [] new_suggestions = [] ignored = [] if not ignored else ignored seen = [] if not seen else seen if ignore_imdb: suggested_imdbs = [] for cs in cached_suggestion: if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs: suggested_imdbs.append(cs.get('imdb')) new_suggestions.append(cs) # Get new results and add them if len(new_suggestions) - 1 < limit: db = get_db() active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) movies = [getIdentifier(x) for x in active_movies] movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True) if suggestions: new_suggestions.extend(suggestions) self.setCache('suggestion_cached', new_suggestions, timeout = 3024000) return new_suggestions
def correctWords(self, rel_name, media): media_title = fireEvent('searcher.get_search_title', media, single = True) media_words = re.split('\W+', simplifyString(media_title)) rel_name = simplifyString(rel_name) rel_words = re.split('\W+', rel_name) # Make sure it has required words required_words = splitString(self.conf('required_words', section = 'searcher').lower()) try: required_words = removeDuplicate(required_words + splitString(media['category']['required'].lower())) except: pass req_match = 0 for req_set in required_words: req = splitString(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) if len(required_words) > 0 and req_match == 0: log.info2('Wrong: Required word missing: %s', rel_name) return False # Ignore releases ignored_words = splitString(self.conf('ignored_words', section = 'searcher').lower()) try: ignored_words = removeDuplicate(ignored_words + splitString(media['category']['ignored'].lower())) except: pass ignored_match = 0 for ignored_set in ignored_words: ignored = splitString(ignored_set, '&') ignored_match += len(list(set(rel_words) & set(ignored))) == len(ignored) if len(ignored_words) > 0 and ignored_match: log.info2("Wrong: '%s' contains 'ignored words'", rel_name) return False # Ignore p**n stuff pron_tags = ['xxx', 'sex', 'anal', 't**s', 'f**k', 'p**n', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'c**k', 'dick'] pron_words = list(set(rel_words) & set(pron_tags) - set(media_words)) if pron_words: log.info('Wrong: %s, probably pr0n', rel_name) return False return True
def containsWords(self, rel_name, rel_words, conf, media): # Make sure it has required words words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower()) try: words = removeDuplicate(words + splitString(media['category'][conf].lower())) except: pass req_match = 0 for req_set in words: if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//': if re.search(req_set[1:-1], rel_name): log.debug('Regex match: %s', req_set[1:-1]) req_match += 1 else: req = splitString(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) return words, req_match > 0
def containsWords(self, rel_name, rel_words, conf, media): # Make sure it has required words words = splitString( self.conf('%s_words' % conf, section='searcher').lower()) try: words = removeDuplicate( words + splitString(media['category'][conf].lower())) except: pass req_match = 0 for req_set in words: if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//': if re.search(req_set[1:-1], rel_name): log.debug('Regex match: %s', req_set[1:-1]) req_match += 1 else: req = splitString(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) return words, req_match > 0
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): # Combine with previous suggestion_cache cached_suggestion = self.getCache('suggestion_cached') or [] new_suggestions = [] ignored = [] if not ignored else ignored seen = [] if not seen else seen if ignore_imdb: suggested_imdbs = [] for cs in cached_suggestion: if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs: suggested_imdbs.append(cs.get('imdb')) new_suggestions.append(cs) # Get new results and add them if len(new_suggestions) - 1 < limit: active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True) db = get_session() active_movies = db.query(Media) \ .join(Library) \ .with_entities(Library.identifier) \ .filter(Media.status_id.in_([active_status.get('id'), done_status.get('id')])).all() movies = [x[0] for x in active_movies] movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True) if suggestions: new_suggestions.extend(suggestions) self.setCache('suggestion_cached', new_suggestions, timeout = 3024000) return new_suggestions
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString( Env.setting('preferred_words', section='searcher').lower()) try: preferred_words = removeDuplicate( preferred_words + splitString(movie['category']['preferred'].lower())) except: pass score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Merge global and category ignored_words = splitString( Env.setting('ignored_words', section='searcher').lower()) try: ignored_words = removeDuplicate( ignored_words + splitString(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score