def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = removeDuplicate(preferred_words + splitString(movie['category']['preferred'].lower())) except: pass score = nameScore(toUnicode(nzb['name']), movie['info']['year'], preferred_words) for movie_title in movie['info']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie)) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = removeDuplicate(ignored_words + splitString(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def correctName(self, check_name, movie_name): check_names = [check_name] # Match names between " try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) except: pass # Match longest name between [] try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip()) except: pass for check_name in removeDuplicate(check_names): check_movie = fireEvent('scanner.name_year', check_name, single = True) try: check_words = removeEmpty(re.split('\W+', check_movie.get('name', ''))) movie_words = removeEmpty(re.split('\W+', simplifyString(movie_name))) if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0: return True except: pass return False
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): # Combine with previous suggestion_cache cached_suggestion = self.getCache('suggestion_cached') or [] new_suggestions = [] ignored = [] if not ignored else ignored seen = [] if not seen else seen if ignore_imdb: suggested_imdbs = [] for cs in cached_suggestion: if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs: suggested_imdbs.append(cs.get('imdb')) new_suggestions.append(cs) # Get new results and add them if len(new_suggestions) - 1 < limit: active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) movies = [getIdentifier(x) for x in active_movies] movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True) if suggestions: new_suggestions.extend(suggestions) self.setCache('suggestion_cached', new_suggestions, timeout = 3024000) return new_suggestions
def containsWords(self, rel_name, rel_words, conf, media): # Make sure it has required words words = splitString(self.conf('%s_words' % conf, section = 'searcher').lower()) try: words = removeDuplicate(words + splitString(media['category'][conf].lower())) except: pass req_match = 0 for req_set in words: if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//': if re.search(req_set[1:-1], rel_name): log.debug('Regex match: %s', req_set[1:-1]) req_match += 1 else: req = splitString(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) return words, req_match > 0