def correctName(check_name, movie): MovieTitles = movie['info']['titles'] result=0 for movietitle in MovieTitles: check_names = [simplifyString(check_name)] # Match names between " try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) except: pass # Match longest name between [] try: check_names.append(max(check_name.split('['), key = len)) except: pass for check_name in list(set(check_names)): check_movie = getReleaseNameYear(check_name) try: check_words = filter(None, re.split('\W+', simplifyString(check_movie.get('name', '')))) movie_words = filter(None, re.split('\W+', simplifyString(movietitle))) if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0: result+=1 except: pass result+=0 return result
def correctWords(self, rel_name, media): media_title = fireEvent('searcher.get_search_title', media, single = True) media_words = re.split('\W+', simplifyString(media_title)) rel_name = simplifyString(rel_name) rel_words = re.split('\W+', rel_name) required_words, contains_required = self.containsWords(rel_name, rel_words, 'required', media) if len(required_words) > 0 and not contains_required: log.info2('Wrong: Required word missing: %s', rel_name) return False ignored_words, contains_ignored = self.containsWords(rel_name, rel_words, 'ignored', media) if len(ignored_words) > 0 and contains_ignored: log.info2("Wrong: '%s' contains 'ignored words'", rel_name) return False # Ignore p**n stuff pron_tags = ['xxx', 'sex', 'anal', 't**s', 'f**k', 'p**n', 'orgy', 'milf', 'boobs', 'erotica', 'erotic', 'c**k', 'dick'] pron_words = list(set(rel_words) & set(pron_tags) - set(media_words)) if pron_words: log.info('Wrong: %s, probably pr0n', rel_name) return False return True
def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): identifier = file_path.replace(folder, '') # root folder identifier = os.path.splitext(identifier)[0] # ext if exclude_filename: identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] # multipart identifier = self.removeMultipart(identifier) # remove cptag identifier = self.removeCPTag(identifier) # groups, release tags, scenename cleaner, regex isn't correct identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':') # Year year = self.findYear(identifier) if year: identifier = '%s %s' % (identifier.split(year)[0].strip(), year) else: identifier = identifier.split('::')[0] # Remove duplicates out = [] for word in identifier.split(): if not word in out: out.append(word) identifier = ' '.join(out) return simplifyString(identifier)
def possibleTitles(raw_title): titles = [toSafeString(raw_title).lower(), raw_title.lower(), simplifyString(raw_title)] # replace some chars new_title = raw_title.replace("&", "and") titles.append(simplifyString(new_title)) return removeDuplicate(titles)
def duplicateScore(nzb_name, movie_name): nzb_words = re.split('\W+', simplifyString(nzb_name)) movie_words = re.split('\W+', simplifyString(movie_name)) # minus for duplicates duplicates = [x for i, x in enumerate(nzb_words) if nzb_words[i:].count(x) > 1] return len(list(set(duplicates) - set(movie_words))) * -4
def namePositionScore(nzb_name, movie_name): score = 0 nzb_words = re.split('\W+', simplifyString(nzb_name)) qualities = fireEvent('quality.all', single = True) try: nzb_name = re.search(r'([\'"])[^\1]*\1', nzb_name).group(0) except: pass name_year = fireEvent('scanner.name_year', nzb_name, single = True) # Give points for movies beginning with the correct name split_by = simplifyString(movie_name) name_split = [] if len(split_by) > 0: name_split = simplifyString(nzb_name).split(split_by) if name_split[0].strip() == '': score += 10 # If year is second in line, give more points if len(name_split) > 1 and name_year: after_name = name_split[1].strip() if tryInt(after_name[:4]) == name_year.get('year', None): score += 10 after_name = after_name[4:] # Give -point to crap between year and quality found_quality = None for quality in qualities: # Main in words if quality['identifier'] in nzb_words: found_quality = quality['identifier'] # Alt in words for alt in quality['alternative']: if alt in nzb_words: found_quality = alt break if not found_quality: return score - 20 allowed = [] for value in name_scores: name, sc = value.split(':') allowed.append(name) inbetween = re.split('\W+', after_name.split(found_quality)[0].strip()) score -= (10 * len(set(inbetween) - set(allowed))) return score
def possibleTitles(raw_title): titles = [ toSafeString(raw_title).lower(), raw_title.lower(), simplifyString(raw_title) ] # replace some chars new_title = raw_title.replace('&', 'and') titles.append(simplifyString(new_title)) return list(set(titles))
def duplicateScore(nzb_name, movie_name): try: nzb_words = re.split('\W+', simplifyString(nzb_name)) movie_words = re.split('\W+', simplifyString(movie_name)) # minus for duplicates duplicates = [x for i, x in enumerate(nzb_words) if nzb_words[i:].count(x) > 1] return len(list(set(duplicates) - set(movie_words))) * -4 except: log.error('Failed doing duplicateScore: %s', traceback.format_exc()) return 0
def getSearchParams(self, movie, quality): results = [] MovieTitles = movie["info"]["titles"] moviequality = simplifyString(quality["identifier"]) moviegenre = movie["info"]["genres"] if "Animation" in moviegenre: subcat = 455 elif "Documentaire" in moviegenre or "Documentary" in moviegenre: subcat = 634 else: subcat = 631 if moviequality in ["720p"]: qualpar = "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B17%5D%5B%5D=1160&term%5B17%5D%5B%5D=722&term%5B7%5D%5B%5D=15&term%5B7%5D%5B%5D=12&term%5B7%5D%5B%5D=1175" elif moviequality in ["1080p"]: qualpar = "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B17%5D%5B%5D=1160&term%5B17%5D%5B%5D=722&term%5B7%5D%5B%5D=16&term%5B7%5D%5B%5D=1162&term%5B7%5D%5B%5D=1174" elif moviequality in ["dvd-r", "dvdr"]: qualpar = "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B17%5D%5B%5D=1160&term%5B17%5D%5B%5D=722&term%5B7%5D%5B%5D=13&term%5B7%5D%5B%5D=14" elif moviequality in ["br-disk"]: qualpar = "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B17%5D%5B%5D=1160&term%5B17%5D%5B%5D=722&term%5B7%5D%5B%5D=1171&term%5B7%5D%5B%5D=17" else: qualpar = "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B17%5D%5B%5D=1160&term%5B17%5D%5B%5D=722&term%5B7%5D%5B%5D=8&term%5B7%5D%5B%5D=9&term%5B7%5D%5B%5D=10&term%5B7%5D%5B%5D=11&term%5B7%5D%5B%5D=18&term%5B7%5D%5B%5D=19" if quality["custom"]["3d"] == 1: qualpar = qualpar + "&term%5B9%5D%5B%5D=24&term%5B9%5D%5B%5D=23" for MovieTitle in MovieTitles: try: TitleStringReal = str(MovieTitle.encode("latin-1").replace("-", " ")) except: continue try: results.append( urllib.urlencode({"search": TitleStringReal, "cat": 210, "submit": "Recherche", "subcat": subcat}) + qualpar ) results.append( urllib.urlencode( { "search": simplifyString(unicode(TitleStringReal, "latin-1")), "cat": 210, "submit": "Recherche", "subcat": subcat, } ) + qualpar ) except: continue return results
def correctLanguage(self, rel_name, media): # retrieving the base configuration dubbedVersion = self.conf('dubbed_version', section = 'searcher') # retrieving the category configuration try: dubbedVersion = media['category']['dubbed_version'] except: pass releaseMetaDatas = media['info']['languages'] rel_name = simplifyString(rel_name) rel_words = re.split('\W+', rel_name) upper_rel_words = [x.upper() for x in rel_words] languageWordFound = False; for word in upper_rel_words: matchingTuples = [item for item in getAllLanguages() if item[1].upper() == word] if matchingTuples and any(matchingTuples): languageWordFound = True; if dubbedVersion: if 'FRENCH' in upper_rel_words or 'TRUEFRENCH' in upper_rel_words or 'MULTI' in upper_rel_words: return True; if languageWordFound == False and 'FRENCH' in releaseMetaDatas: return True; else: if any(l for l in upper_rel_words if l.upper() in releaseMetaDatas) or 'MULTI' in upper_rel_words: return True; if languageWordFound == False: return True; return False
def search(self, q, limit = 12): ''' Find movie by name ''' if self.isDisabled(): return False log.debug('TheMovieDB - Searching for movie: %s' % q) raw = tmdb.search(simplifyString(q)) results = [] if raw: try: nr = 0 for movie in raw: results.append(self.parseMovie(movie)) nr += 1 if nr == limit: break log.info('TheMovieDB - Found: %s' % [result['titles'][0] + ' (' + str(result['year']) + ')' for result in results]) return results except SyntaxError, e: log.error('Failed to parse XML response: %s' % e) return False
def correctName(self, check_name, movie_name): check_names = [check_name] # Match names between " try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) except: pass # Match longest name between [] try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', check_name), key = len).strip()) except: pass for check_name in removeDuplicate(check_names): check_movie = fireEvent('scanner.name_year', check_name, single = True) try: check_words = removeEmpty(re.split('\W+', check_movie.get('name', ''))) movie_words = removeEmpty(re.split('\W+', simplifyString(movie_name))) if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0: return True except: pass return False
def nameRatioScore(nzb_name, movie_name): nzb_words = re.split('\W+', fireEvent('scanner.create_file_identifier', nzb_name, single = True)) movie_words = re.split('\W+', simplifyString(movie_name)) left_over = set(nzb_words) - set(movie_words) return 10 - len(left_over)
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}, single_category = False): name = nzb['name'] size = nzb.get('size', 0) nzb_words = re.split('\W+', simplifyString(name)) qualities = fireEvent('quality.all', single = True) found = {} for quality in qualities: # Main in words if quality['identifier'] in nzb_words: found[quality['identifier']] = True # Alt in words if list(set(nzb_words) & set(quality['alternative'])): found[quality['identifier']] = True # Hack for older movies that don't contain quality tag year_name = fireEvent('scanner.name_year', name, single = True) if movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): if size > 3000: # Assume dvdr return 'dvdr' == preferred_quality['identifier'] else: # Assume dvdrip return 'dvdrip' == preferred_quality['identifier'] # Allow other qualities for allowed in preferred_quality.get('allow'): if found.get(allowed): del found[allowed] if (len(found) == 0 and single_category): return False return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def correctName(self, check_name, movie_name): check_names = [check_name] # Match names between " try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) except: pass # Match longest name between [] try: check_names.append(max(check_name.split('['), key = len)) except: pass for check_name in list(set(check_names)): check_movie = fireEvent('scanner.name_year', check_name, single = True) try: check_words = filter(None, re.split('\W+', check_movie.get('name', ''))) movie_words = filter(None, re.split('\W+', simplifyString(movie_name))) if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0: return True except: pass return False
def nameScore(name, year): ''' Calculate score for words in the NZB name ''' score = 0 name = name.lower() #give points for the cool stuff for value in name_scores: v = value.split(':') add = int(v.pop()) if v.pop() in name: score = score + add #points if the year is correct if str(year) in name: score = score + 5 # Contains preferred word nzb_words = re.split('\W+', simplifyString(name)) preferred_words = Env.setting('preferred_words', section = 'searcher').split(',') for word in preferred_words: if word.strip() and word.strip().lower() in nzb_words: score = score + 100 return score
def byHash(self, file): ''' Find movie by hash ''' if self.isDisabled(): return False cache_key = 'tmdb.cache.%s' % simplifyString(file) results = self.getCache(cache_key) if not results: log.debug('Searching for movie by hash: %s', file) try: raw = tmdb.searchByHashingFile(file) results = [] if raw: try: results = self.parseMovie(raw) log.info('Found: %s', results['titles'][0] + ' (' + str(results['year']) + ')') self.setCache(cache_key, results) return results except SyntaxError, e: log.error('Failed to parse XML response: %s', e) return False except: log.debug('No movies known by hash for: %s', file) pass return results
def getReleaseNameYear(self, release_name, file_name=None): # Use guessit first if file_name: try: guess = guess_movie_info(file_name) if guess.get("title") and guess.get("year"): return {"name": guess.get("title"), "year": guess.get("year")} except: log.debug('Could not detect via guessit "%s": %s' % (file_name, traceback.format_exc())) # Backup to simple cleaned = " ".join(re.split("\W+", simplifyString(release_name))) cleaned = re.sub(self.clean, " ", cleaned) year = self.findYear(cleaned) if year: # Split name on year try: movie_name = cleaned.split(year).pop(0).strip() return {"name": movie_name, "year": int(year)} except: pass else: # Split name on multiple spaces try: movie_name = cleaned.split(" ").pop(0).strip() return {"name": movie_name, "year": int(year)} except: pass return {}
def getSearchParams(self, movie, quality): results = [] MovieTitles = movie['info']['titles'] moviequality = simplifyString(quality['identifier']) if quality['custom']['3d']==1: category='&adv_cat%5Bs%5D%5B7%5D=189' else: if moviequality in ['720p']: category='&adv_cat%5Bm%5D%5B4%5D=136' elif moviequality in ['1080p']: category='&adv_cat%5Bm%5D%5B5%5D=150' elif moviequality in ['dvd-r']: category='&adv_cat%5Bm%5D%5B3%5D=82' elif moviequality in ['br-disk']: category='&adv_cat%5Bm%5D%5B6%5D=187' else: category='&adv_cat%5Bm%5D%5B1%5D=71' for MovieTitle in MovieTitles: try: TitleStringReal = str(MovieTitle.encode("latin-1").replace('-',' ')) except: continue try: results.append(urllib.urlencode( {'name': TitleStringReal, 'exact':1, 'group': 'films'})+category) except: continue return results
def containsOtherQuality(self, name, preferred_quality={}, single_category=False): nzb_words = re.split("\W+", simplifyString(name)) qualities = fireEvent("quality.all", single=True) found = {} for quality in qualities: # Main in words if quality["identifier"] in nzb_words: found[quality["identifier"]] = True # Alt in words if list(set(nzb_words) & set(quality["alternative"])): found[quality["identifier"]] = True # Allow other qualities for allowed in preferred_quality.get("allow"): if found.get(allowed): del found[allowed] if len(found) == 0 and single_category: return False return not (found.get(preferred_quality["identifier"]) and len(found) == 1)
def correctName(self, check_name, movie_name): check_names = [check_name] try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0)) except: pass for check_name in check_names: check_movie = fireEvent("scanner.name_year", check_name, single=True) try: check_words = filter(None, re.split("\W+", check_movie.get("name", ""))) movie_words = filter(None, re.split("\W+", simplifyString(movie_name))) if ( len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0 ): return True except: pass return False
def containsOtherQuality(self, nzb, movie_year=None, preferred_quality={}, single_category=False): name = nzb["name"] size = nzb.get("size", 0) nzb_words = re.split("\W+", simplifyString(name)) qualities = fireEvent("quality.all", single=True) found = {} for quality in qualities: # Main in words if quality["identifier"] in nzb_words: found[quality["identifier"]] = True # Alt in words if list(set(nzb_words) & set(quality["alternative"])): found[quality["identifier"]] = True # Hack for older movies that don't contain quality tag year_name = fireEvent("scanner.name_year", name, single=True) if movie_year < datetime.datetime.now().year - 3 and not year_name.get("year", None): if size > 3000: # Assume dvdr return "dvdr" == preferred_quality["identifier"] else: # Assume dvdrip return "dvdrip" == preferred_quality["identifier"] # Allow other qualities for allowed in preferred_quality.get("allow"): if found.get(allowed): del found[allowed] if len(found) == 0 and single_category: return False return not (found.get(preferred_quality["identifier"]) and len(found) == 1)
def byHash(self, file): """ Find movie by hash """ if self.isDisabled(): return False cache_key = "tmdb.cache.%s" % simplifyString(file) results = self.getCache(cache_key) if not results: log.debug("Searching for movie by hash: %s", file) try: raw = tmdb.searchByHashingFile(file) results = [] if raw: try: results = self.parseMovie(raw) log.info("Found: %s", results["titles"][0] + " (" + str(results["year"]) + ")") self.setCache(cache_key, results) return results except SyntaxError, e: log.error("Failed to parse XML response: %s", e) return False except: log.debug("No movies known by hash for: %s", file) pass return results
def nameScore(name, year, preferred_words): """ Calculate score for words in the NZB name """ try: score = 0 name = name.lower() # give points for the cool stuff for value in name_scores: v = value.split(':') add = int(v.pop()) if v.pop() in name: score += add # points if the year is correct if str(year) in name: score += 5 # Contains preferred word nzb_words = re.split('\W+', simplifyString(name)) score += 100 * len(list(set(nzb_words) & set(preferred_words))) return score except: log.error('Failed doing nameScore: %s', traceback.format_exc()) return 0
def search(self, q, limit = 12): ''' Find movie by name ''' if self.isDisabled(): return False search_string = simplifyString(q) cache_key = 'tmdb.cache.%s.%s' % (search_string, limit) results = self.getCache(cache_key) if not results: log.debug('Searching for movie: %s' % q) raw = tmdb.search(search_string) results = [] if raw: try: nr = 0 for movie in raw: results.append(self.parseMovie(movie)) nr += 1 if nr == limit: break log.info('Found: %s' % [result['titles'][0] + ' (' + str(result['year']) + ')' for result in results]) self.setCache(cache_key, results) return results except SyntaxError, e: log.error('Failed to parse XML response: %s' % e) return False
def search(self, q, limit=12): """ Find movie by name """ if self.isDisabled(): return False search_string = simplifyString(q) cache_key = "tmdb.cache.%s.%s" % (search_string, limit) results = self.getCache(cache_key) if not results: log.debug("Searching for movie: %s", q) raw = tmdb.search(search_string) results = [] if raw: try: nr = 0 for movie in raw: results.append(self.parseMovie(movie)) nr += 1 if nr == limit: break log.info( "Found: %s", [result["titles"][0] + " (" + str(result["year"]) + ")" for result in results] ) self.setCache(cache_key, results) return results except SyntaxError, e: log.error("Failed to parse XML response: %s", e) return False
def nameScore(name, year, preferred_words): """ Calculate score for words in the NZB name """ score = 0 name = name.lower() # points for the correct language lang_scores = lang_neg_scores lang_scores.update(lang_cur_scores[Env.setting('dl_language')]) for lang, add in lang_scores: if lang in name: score = score + add # give points for the cool stuff for value in name_scores: v = value.split(':') add = int(v.pop()) if v.pop() in name: score += add # points if the year is correct if str(year) in name: score += 5 # points if the year +1 is correct (sometimes english and german years differs +1) elif str(int(year) + 1) in name: score += 3 # Contains preferred word nzb_words = re.split('\W+', simplifyString(name)) score += 100 * len(list(set(nzb_words) & set(preferred_words))) return score
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}): name = nzb['name'] size = nzb.get('size', 0) nzb_words = re.split('\W+', simplifyString(name)) qualities = fireEvent('quality.all', single = True) found = {} for quality in qualities: # Main in words if quality['identifier'] in nzb_words: found[quality['identifier']] = True # Alt in words if list(set(nzb_words) & set(quality['alternative'])): found[quality['identifier']] = True # Hack for older movies that don't contain quality tag year_name = fireEvent('scanner.name_year', name, single = True) if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): if size > 3000: # Assume dvdr log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', (size)) found['dvdr'] = True else: # Assume dvdrip log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', (size)) found['dvdrip'] = True # Allow other qualities for allowed in preferred_quality.get('allow'): if found.get(allowed): del found[allowed] return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def _searchOnTitle(self, title, movie, quality, results): log.debug("Searching for %s (%s) on %s" % (title, quality['label'], self.urls['base_url'])) # remove accents simpletitle = simplifyString(title) cat = self.getCatId(quality) log.debug("Searching in CorSaRoNero category: %s" % cat) data = self.getHTMLData(self.urls['search'] % (cat, tryUrlencode(simpletitle))) if 'Nessus torrent trovato!!!!' in data: log.info("No torrents found for %s on ilCorsaroNero.info.", title) return if data: try: html = BeautifulSoup(data) entries_1 = html.findAll('tr', attrs={'class':'odd'}) entries_2 = html.findAll('tr', attrs={'class':'odd2'}) try: self.parseResults(results, entries_1, movie, title) self.parseResults(results, entries_2, movie, title) except: log.error('Failed parsing ilCorsaroNero: %s', traceback.format_exc()) except AttributeError: log.debug('No search results found.')
def getReleaseNameYear(self, release_name): cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) cleaned = re.sub(self.clean, ' ', cleaned) year = self.findYear(cleaned) if year: # Split name on year try: movie_name = cleaned.split(year).pop(0).strip() return { 'name': movie_name, 'year': year, } except: pass else: # Split name on multiple spaces try: movie_name = cleaned.split(' ').pop(0).strip() return { 'name': movie_name, 'year': year, } except: pass return {}
def _search(self, movie, quality, results): # Cookie login if not self.login_opener and not self.login(): return TitleStringReal = (getTitle(movie['library']) + ' ' + simplifyString(quality['identifier'] )).replace('-',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').encode("utf8") URL = (self.urls['search']).encode('UTF8') URL=unicodedata.normalize('NFD',unicode(URL,"utf8","replace")) URL=URL.encode('ascii','ignore') URL = urllib2.quote(URL.encode('utf8'), ":/?=") values = { 'champ_recherche' : TitleStringReal } data_tmp = urllib.urlencode(values) req = urllib2.Request(URL, data_tmp ) data = urllib2.urlopen(req ) id = 1000 if data: cat_ids = self.getCatId(quality['identifier']) table_order = ['name', 'size', None, 'age', 'seeds', 'leechers'] try: html = BeautifulSoup(data) resultdiv = html.find('div', attrs = {'id':'recherche'}).find('table').find('tbody') for result in resultdiv.find_all('tr', recursive = False): try: new = {} #id = result.find_all('td')[2].find_all('a')[0]['href'][1:].replace('torrents/nfo/?id=','') name = result.find_all('td')[0].find_all('a')[0].text testname=namer_check.correctName(name,movie) if testname==0: continue detail_url = result.find_all('td')[0].find_all('a')[0]['href'] #on scrapp la page detail urldetail = detail_url.encode('UTF8') urldetail=unicodedata.normalize('NFD',unicode(urldetail,"utf8","replace")) urldetail=urldetail.encode('ascii','ignore') urldetail = urllib2.quote(urldetail.encode('utf8'), ":/?=") req = urllib2.Request(urldetail ) # POST request doesn't not work data_detail = urllib2.urlopen(req) url_download = "" if data_detail: html_detail = BeautifulSoup(data_detail) url_download = html_detail.find_all('div', attrs = {'class':'download-torrent'})[0].find_all('a')[0]['href'] else: tmp = result.find_all('td')[0].find_all('a')[0]['href'] tmp = tmp.split('/')[6].replace('.html','.torrent') url_download = ('http://www.cpasbien.me/_torrents/%s' % tmp) size = result.find_all('td')[1].text seeder = result.find_all('td')[2].find_all('span')[0].text leecher = result.find_all('td')[3].text age = '0' verify = getTitle(movie['library']).split(' ') add = 1 for verify_unit in verify: if (name.find(verify_unit) == -1) : add = 0 def extra_check(item): return True if add == 1: new['id'] = id new['name'] = name.strip() new['url'] = url_download new['detail_url'] = detail_url new['size'] = self.parseSize(size) new['age'] = self.ageToDays(age) new['seeders'] = tryInt(seeder) new['leechers'] = tryInt(leecher) new['extra_check'] = extra_check new['download'] = self.loginDownload #new['score'] = fireEvent('score.calculate', new, movie, single = True) #log.error('score') #log.error(new['score']) results.append(new) id = id+1 except: log.error('Failed parsing cPASbien: %s', traceback.format_exc()) except AttributeError: log.debug('No search results found.') else: log.debug('No search results found.')
def _search(self, movie, quality, results): TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'])).replace( '-', ' ').replace(' ', ' ').replace( ' ', ' ').replace(' ', ' ').encode("utf-8") log.info('Title %s', TitleStringReal) URL = ((self.urls['search']) + TitleStringReal.replace('.', '-').replace(' ', '-') + '.html,trie-seeds-d').encode('utf-8') req = urllib2.Request(URL, headers={'User-Agent': "Mozilla/5.0"}) log.info('opening url %s', URL) data = urllib2.urlopen(req, timeout=500) log.info('data retrieved') id = 1000 if data: try: html = BeautifulSoup(data) torrent_rows = html.findAll('tr') for result in torrent_rows: try: if not result.find('a'): continue title = result.find('a').get_text(strip=False) log.info('found title %s', title) testname = namer_check.correctName( title.lower(), movie) if testname == 0: log.info('%s not match %s', (title.lower(), movie['info']['titles'])) continue log.info('title %s match', title) tmp = result.find("a")['href'].split('/')[-1].replace( '.html', '.torrent').strip() download_url = (self.urls['site'] + 'get_torrent/{0}'.format(tmp) + ".torrent") detail_url = (self.urls['site'] + 'torrent/{0}'.format(tmp)) log.debug('download_url %s', download_url) if not all([title, download_url]): continue seeders = int( result.find(class_="seed_ok").get_text(strip=True)) leechers = int( result.find_all('td')[3].get_text(strip=True)) size = result.find_all('td')[1].get_text(strip=True) def extra_check(item): return True size = size.lower() size = size.replace("go", "gb") size = size.replace("mo", "mb") size = size.replace("ko", "kb") size = size.replace(' ', '') size = self.parseSize(str(size)) new = {} new['id'] = id new['name'] = title.strip() new['url'] = download_url new['detail_url'] = detail_url new['size'] = size new['seeders'] = seeders new['leechers'] = leechers new['extra_check'] = extra_check new['download'] = self.loginDownload results.append(new) log.info(results) id = id + 1 except StandardError, e: log.info('boum %s', e) continue except AttributeError: log.debug('No search results found.') else: log.debug('No search results found.')
def search(self, movie, quality): results = [] if self.isDisabled(): return results q = '"%s %s" %s' % (simplifyString(getTitle( movie['library'])), movie['library']['year'], quality.get('identifier')) for ignored in Env.setting('ignored_words', 'searcher').split(','): q = '%s -%s' % (q, ignored.strip()) params = { 'q': q, 'ig': '1', 'rpp': 200, 'st': 1, 'sp': 1, 'ns': 1, } cache_key = 'nzbclub.%s.%s.%s' % (movie['library']['identifier'], quality.get('identifier'), q) data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params)) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: nzbclub_id = tryInt( self.getTextElement( nzb, "link").split('/nzb_view/')[1].split('/')[0]) enclosure = self.getElement(nzb, "enclosure").attrib size = enclosure['length'] date = self.getTextElement(nzb, "pubDate") def extra_check(item): full_description = self.getCache( 'nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout=25920000) for ignored in [ 'ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible' ]: if ignored in full_description: log.info( 'Wrong: Seems to be passworded or corrupted files: %s', new['name']) return False return True new = { 'id': nzbclub_id, 'type': 'nzb', 'provider': self.getName(), 'name': toUnicode(self.getTextElement(nzb, "title")), 'age': self.calculateAge( int(time.mktime(parse(date).timetuple()))), 'size': tryInt(size) / 1024 / 1024, 'url': enclosure['url'].replace(' ', '_'), 'download': self.download, 'detail_url': self.getTextElement(nzb, "link"), 'description': '', 'get_more_info': self.getMoreInfo, 'extra_check': extra_check } is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=False, single=True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single=True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBClub')
def getSearchParams(self, movie, quality): results = [] MovieTitles = movie['library']['info']['titles'] moviequality = simplifyString(quality['identifier']) for MovieTitle in MovieTitles: try: TitleStringReal = str( MovieTitle.encode("latin-1").replace('-', ' ')) except: TitleStringReal = str( MovieTitle.encode("utf-8").replace('-', ' ')) if moviequality in ['720p']: results.append( urllib.urlencode({ 'q': TitleStringReal, 'category': 15, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") results.append( urllib.urlencode({ 'q': simplifyString(TitleStringReal), 'category': 15, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") elif moviequality in ['1080p']: results.append( urllib.urlencode({ 'q': TitleStringReal, 'category': 16, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") results.append( urllib.urlencode({ 'q': simplifyString(TitleStringReal), 'category': 16, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") elif moviequality in ['dvd-r']: results.append( urllib.urlencode({ 'q': TitleStringReal, 'category': 19, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") results.append( urllib.urlencode({ 'q': simplifyString(TitleStringReal), 'category': 19, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") elif moviequality in ['br-disk']: results.append( urllib.urlencode({ 'q': TitleStringReal, 'category': 17, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") results.append( urllib.urlencode({ 'q': simplifyString(TitleStringReal), 'category': 17, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") else: results.append( urllib.urlencode({ 'q': TitleStringReal, 'category': 5, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") results.append( urllib.urlencode({ 'q': simplifyString(TitleStringReal), 'category': 5, 'ak': self.conf('userkey') }) + "&order=desc&sort=normal&exact") return results
def _search(self, movie, quality, results): nzbDownloaders = [BinSearch(), NZBIndex()] MovieTitles = movie['info']['titles'] moviequality = simplifyString(quality['identifier']) movieyear = movie['info']['year'] if quality['custom']['3d'] == 1: threeD = True else: threeD = False if moviequality in ("720p", "1080p", "bd50", "2160p"): cat1 = '39' cat2 = '49' minSize = 2000 elif moviequality in ("dvdr"): cat1 = '23' cat2 = '48' minSize = 3000 else: cat1 = '6' cat2 = '27' minSize = 500 for MovieTitle in MovieTitles: try: TitleStringReal = str( MovieTitle.encode("latin-1").replace('-', ' ')) except: continue if threeD: TitleStringReal = TitleStringReal + ' 3d' data = 'chkInit=1&edTitre=' + simplifyString( unicode(TitleStringReal, "latin-1") ) + '&chkTitre=on&chkFichier=on&chkCat=on&cats%5B%5D=' + cat1 + '&cats%5B%5D=' + cat2 + '&edAge=&edYear=' try: soup = BeautifulSoup(urllib2.urlopen(self.urls['search'], data)) except Exception, e: log.error(u"Error trying to load BinNewz response: " + e) return [] tables = soup.findAll("table", id="tabliste") for table in tables: rows = table.findAll("tr") for row in rows: cells = row.select("> td") if (len(cells) < 11): continue name = cells[2].text.strip() testname = namer_check.correctName(name, movie) if testname == 0: continue language = cells[3].find("img").get("src") if not "_fr" in language and not "_frq" in language: continue detectedlang = '' if "_fr" in language: detectedlang = ' truefrench ' else: detectedlang = ' french ' # blacklist_groups = [ "alt.binaries.multimedia" ] blacklist_groups = [] newgroupLink = cells[4].find("a") newsgroup = None if newgroupLink.contents: newsgroup = newgroupLink.contents[0] if newsgroup == "abmulti": newsgroup = "alt.binaries.multimedia" elif newsgroup == "ab.moovee": newsgroup = "alt.binaries.moovee" elif newsgroup == "abtvseries": newsgroup = "alt.binaries.tvseries" elif newsgroup == "abtv": newsgroup = "alt.binaries.tv" elif newsgroup == "a.b.teevee": newsgroup = "alt.binaries.teevee" elif newsgroup == "abstvdivxf": newsgroup = "alt.binaries.series.tv.divx.french" elif newsgroup == "abhdtvx264fr": newsgroup = "alt.binaries.hdtv.x264.french" elif newsgroup == "abmom": newsgroup = "alt.binaries.mom" elif newsgroup == "abhdtv": newsgroup = "alt.binaries.hdtv" elif newsgroup == "abboneless": newsgroup = "alt.binaries.boneless" elif newsgroup == "abhdtvf": newsgroup = "alt.binaries.hdtv.french" elif newsgroup == "abhdtvx264": newsgroup = "alt.binaries.hdtv.x264" elif newsgroup == "absuperman": newsgroup = "alt.binaries.superman" elif newsgroup == "abechangeweb": newsgroup = "alt.binaries.echange-web" elif newsgroup == "abmdfvost": newsgroup = "alt.binaries.movies.divx.french.vost" elif newsgroup == "abdvdr": newsgroup = "alt.binaries.dvdr" elif newsgroup == "abmzeromov": newsgroup = "alt.binaries.movies.zeromovies" elif newsgroup == "abcfaf": newsgroup = "alt.binaries.cartoons.french.animes-fansub" elif newsgroup == "abcfrench": newsgroup = "alt.binaries.cartoons.french" elif newsgroup == "abgougouland": newsgroup = "alt.binaries.gougouland" elif newsgroup == "abroger": newsgroup = "alt.binaries.roger" elif newsgroup == "abtatu": newsgroup = "alt.binaries.tatu" elif newsgroup == "abstvf": newsgroup = "alt.binaries.series.tv.french" elif newsgroup == "abmdfreposts": newsgroup = "alt.binaries.movies.divx.french.reposts" elif newsgroup == "abmdf": newsgroup = "alt.binaries.movies.french" elif newsgroup == "abhdtvfrepost": newsgroup = "alt.binaries.hdtv.french.repost" elif newsgroup == "abmmkv": newsgroup = "alt.binaries.movies.mkv" elif newsgroup == "abf-tv": newsgroup = "alt.binaries.french-tv" elif newsgroup == "abmdfo": newsgroup = "alt.binaries.movies.divx.french.old" elif newsgroup == "abmf": newsgroup = "alt.binaries.movies.french" elif newsgroup == "ab.movies": newsgroup = "alt.binaries.movies" elif newsgroup == "a.b.french": newsgroup = "alt.binaries.french" elif newsgroup == "a.b.3d": newsgroup = "alt.binaries.3d" elif newsgroup == "ab.dvdrip": newsgroup = "alt.binaries.dvdrip" elif newsgroup == "ab.welovelori": newsgroup = "alt.binaries.welovelori" elif newsgroup == "abblu-ray": newsgroup = "alt.binaries.blu-ray" elif newsgroup == "ab.bloaf": newsgroup = "alt.binaries.bloaf" elif newsgroup == "ab.hdtv.german": newsgroup = "alt.binaries.hdtv.german" elif newsgroup == "abmd": newsgroup = "alt.binaries.movies.divx" elif newsgroup == "ab.ath": newsgroup = "alt.binaries.ath" elif newsgroup == "a.b.town": newsgroup = "alt.binaries.town" elif newsgroup == "a.b.u-4all": newsgroup = "alt.binaries.u-4all" elif newsgroup == "ab.amazing": newsgroup = "alt.binaries.amazing" elif newsgroup == "ab.astronomy": newsgroup = "alt.binaries.astronomy" elif newsgroup == "ab.nospam.cheer": newsgroup = "alt.binaries.nospam.cheerleaders" elif newsgroup == "ab.worms": newsgroup = "alt.binaries.worms" elif newsgroup == "abcores": newsgroup = "alt.binaries.cores" elif newsgroup == "abdvdclassics": newsgroup = "alt.binaries.dvd.classics" elif newsgroup == "abdvdf": newsgroup = "alt.binaries.dvd.french" elif newsgroup == "abdvds": newsgroup = "alt.binaries.dvds" elif newsgroup == "abmdfrance": newsgroup = "alt.binaries.movies.divx.france" elif newsgroup == "abmisc": newsgroup = "alt.binaries.misc" elif newsgroup == "abnl": newsgroup = "alt.binaries.nl" elif newsgroup == "abx": newsgroup = "alt.binaries.x" elif newsgroup == "ab.new-movies": newsgroup = "alt.binaries.new-movies" elif newsgroup == "ab.triballs": newsgroup = "alt.binaries.triballs" elif newsgroup == "abdivxf": newsgroup = "alt.binaries.divx.french" elif newsgroup == "ab.solar-xl": newsgroup = "alt.binaries.solar-xl" elif newsgroup == "abbig": newsgroup = "alt.binaries.big" elif newsgroup == "ab.insiderz": newsgroup = "alt.binaries.insiderz" elif newsgroup == "abwarez": newsgroup = "alt.binaries.warez" elif newsgroup == "abdvd": newsgroup = "alt.binaries.dvd" elif newsgroup == "abdvd9": newsgroup = "alt.binaries.dvd9" elif newsgroup == "absvcdf": newsgroup = "alt.binaries.svcd.french" elif newsgroup == "ab.ftd": newsgroup = "alt.binaries.ftd" elif newsgroup == "ab.u-4all": newsgroup = "alt.binaries.u-4all" elif newsgroup == "a.b.u4all": newsgroup = "alt.binaries.u-4all" else: log.error(u"Unknown binnewz newsgroup: " + newsgroup) continue if newsgroup in blacklist_groups: log.error( u"Ignoring result, newsgroup is blacklisted: " + newsgroup) continue filename = cells[5].contents[0] m = re.search("^(.+)\s+{(.*)}$", name) qualityStr = "" if m: name = m.group(1) qualityStr = m.group(2) m = re.search("^(.+)\s+\[(.*)\]$", name) source = None if m: name = m.group(1) source = m.group(2) m = re.search("(.+)\(([0-9]{4})\)", name) year = "" if m: name = m.group(1) year = m.group(2) if int(year) > movieyear + 1 or int( year) < movieyear - 1: continue m = re.search("(.+)\((\d{2}/\d{2}/\d{4})\)", name) dateStr = "" if m: name = m.group(1) dateStr = m.group(2) year = dateStr[-5:].strip(")").strip("/") m = re.search("(.+)\s+S(\d{2})\s+E(\d{2})(.*)", name) if m: name = m.group(1) + " S" + m.group(2) + "E" + m.group( 3) + m.group(4) m = re.search("(.+)\s+S(\d{2})\s+Ep(\d{2})(.*)", name) if m: name = m.group(1) + " S" + m.group(2) + "E" + m.group( 3) + m.group(4) filenameLower = filename.lower() searchItems = [] if qualityStr == "": if source in ("Blu Ray-Rip", "HD DVD-Rip"): qualityStr = "brrip" elif source == "Blu Ray-Rip 4K": qualityStr = "2160p" elif source == "DVDRip": qualityStr = "dvdrip" elif source == "TS": qualityStr = "ts" elif source == "DVDSCR": qualityStr = "scr" elif source == "CAM": qualityStr = "cam" elif moviequality == "dvdr": qualityStr = "dvdr" if year == '': year = '1900' if len(searchItems) == 0 and qualityStr == str( moviequality): searchItems.append(filename) for searchItem in searchItems: resultno = 1 for downloader in nzbDownloaders: log.info("Searching for download : " + name + ", search string = " + searchItem + " on " + downloader.__class__.__name__) try: binsearch_result = downloader.search( searchItem, minSize, newsgroup) if binsearch_result: new = {} def extra_check(item): return True qualitytag = '' if qualityStr.lower() in [ '720p', '1080p', '2160p' ]: qualitytag = ' hd x264 h264 ' elif qualityStr.lower() in ['dvdrip']: qualitytag = ' dvd xvid ' elif qualityStr.lower() in ['brrip']: qualitytag = ' hdrip ' elif qualityStr.lower() in ['ts']: qualitytag = ' webrip ' elif qualityStr.lower() in ['scr']: qualitytag = '' elif qualityStr.lower() in ['dvdr']: qualitytag = ' pal video_ts ' new['id'] = binsearch_result.nzbid new['name'] = name + detectedlang + qualityStr + qualitytag + downloader.__class__.__name__ new['url'] = binsearch_result.nzburl new['detail_url'] = binsearch_result.refererURL new['size'] = binsearch_result.sizeInMegs new['age'] = binsearch_result.age new['extra_check'] = extra_check results.append(new) resultno = resultno + 1 log.info("Found : " + searchItem + " on " + downloader.__class__.__name__) if resultno == 3: break except Exception, e: log.error("Searching from " + downloader.__class__.__name__ + " failed : " + str(e) + traceback.format_exc())
def _search(self, movie, quality, results): # Cookie login if not self.last_login_check and not self.login(): return TitleStringReal = (getTitle(movie['info']) + ' ' + simplifyString(quality['identifier'])).replace( '-', ' ').replace(' ', ' ').replace( ' ', ' ').replace(' ', ' ').encode("utf8") URL = (self.urls['search']).encode('UTF8') URL = unicodedata.normalize('NFD', unicode(URL, "utf8", "replace")) URL = URL.encode('ascii', 'ignore') URL = urllib2.quote(URL.encode('utf8'), ":/?=") values = {'champ_recherche': TitleStringReal} data_tmp = urllib.urlencode(values) req = urllib2.Request(URL, data_tmp, headers={'User-Agent': "Mozilla/5.0"}) data = urllib2.urlopen(req) id = 1000 if data: try: html = BeautifulSoup(data) lin = 0 erlin = 0 resultdiv = [] while erlin == 0: try: classlin = 'ligne' + str(lin) resultlin = html.findAll(attrs={'class': [classlin]}) if resultlin: for ele in resultlin: resultdiv.append(ele) lin += 1 else: erlin = 1 except: erlin = 1 for result in resultdiv: try: new = {} name = result.findAll( attrs={'class': ["titre"]})[0].text testname = namer_check.correctName(name, movie) if testname == 0: continue detail_url = result.find("a")['href'] tmp = detail_url.split('/')[-1].replace( '.html', '.torrent') url_download = ( 'http://www.cpasbien.io/telechargement/%s' % tmp) size = result.findAll( attrs={'class': ["poid"]})[0].text seeder = result.findAll( attrs={'class': ["seed_ok"]})[0].text leecher = result.findAll( attrs={'class': ["down"]})[0].text age = '1' verify = getTitle(movie['info']).split(' ') add = 1 for verify_unit in verify: if (name.lower().find(verify_unit.lower()) == -1): add = 0 def extra_check(item): return True if add == 1: new['id'] = id new['name'] = name.strip() new['url'] = url_download new['detail_url'] = detail_url new['size'] = self.parseSize(size) new['age'] = self.ageToDays(age) new['seeders'] = tryInt(seeder) new['leechers'] = tryInt(leecher) new['extra_check'] = extra_check new['download'] = self.loginDownload #new['score'] = fireEvent('score.calculate', new, movie, single = True) #log.error('score') #log.error(new['score']) results.append(new) id = id + 1 except: log.error('Failed parsing cPASbien: %s', traceback.format_exc()) except AttributeError: log.debug('No search results found.') else: log.debug('No search results found.')
def _searchOnTitle(self, title, media, quality, results, offset=0): """ Do a search based on possible titles. This function doesn't check the quality because CouchPotato do the job when parsing results. Furthermore the URL must stay generic to use native CouchPotato caching feature. .. seealso:: YarrProvider.search """ try: params = { 'category': 2145, # Film/Vidéo 'description': '', 'do': 'search', 'file': '', 'name': simplifyString(title), 'sub_category': 'all', 'uploader': '' } if offset > 0: params['page'] = offset * YGG.limit url = self.urls['search'].format(tryUrlencode(params)) data = self.getHTMLData(url) soup = BeautifulSoup(data, 'html.parser') filter_ = '^{0}'.format(self.urls['torrent']) for link in soup.find_all(href=re.compile(filter_)): detail_url = link['href'] if re.search(u'/filmvidéo/(film|animation|documentaire)/', detail_url): name = self.parseText(link) id_ = tryInt(re.search('/(\d+)-[^/\s]+$', link['href']). group(1)) columns = link.parent.parent.find_all('td') size = self.parseSize(self.parseText(columns[5])) seeders = tryInt(self.parseText(columns[7])) leechers = tryInt(self.parseText(columns[8])) result = { 'id': id_, 'name': name, 'seeders': seeders, 'leechers': leechers, 'size': size, 'url': self.urls['url'].format(id_), 'detail_url': detail_url, 'verified': True, 'get_more_info': self.getMoreInfo, 'extra_check': self.extraCheck } results.append(result) YGG.log.debug(result) # Get next page if we don't have all results pagination = soup.find('ul', class_='pagination') if pagination: for page in pagination.find_all('li'): next_ = tryInt(self.parseText(page.find('a'))) if next_ > offset + 1: self._searchOnTitle(title, media, quality, results, offset + 1) break except: YGG.log.error('Failed searching release from {0}: {1}'. format(self.getName(), traceback.format_exc()))
def _search(self, movie, quality, results): nzbDownloaders = [NZBClub(), BinSearch(), NZBIndex()] MovieTitles = movie['library']['info']['titles'] moviequality = simplifyString(quality['identifier']) movieyear = movie['library']['year'] if moviequality in ("720p", "1080p", "bd50"): cat1 = '39' cat2 = '49' minSize = 2000 elif moviequality in ("dvdr"): cat1 = '23' cat2 = '48' minSize = 3000 else: cat1 = '6' cat2 = '27' minSize = 500 for MovieTitle in MovieTitles: TitleStringReal = str(MovieTitle.encode("utf-8").replace('-', ' ')) data = 'chkInit=1&edTitre=' + TitleStringReal + '&chkTitre=on&chkFichier=on&chkCat=on&cats%5B%5D=' + cat1 + '&cats%5B%5D=' + cat2 + '&edAge=&edYear=' try: soup = BeautifulSoup( urllib2.urlopen("http://www.binnews.in/_bin/search2.php", data)) except Exception, e: log.error(u"Error trying to load BinNewz response: " + e) return [] #results = [] tables = soup.findAll("table", id="tabliste") for table in tables: rows = table.findAll("tr") for row in rows: cells = row.select("> td") if (len(cells) < 11): continue name = cells[2].text.strip() testname = namer_check.correctName(name, movie) if testname == 0: continue language = cells[3].find("img").get("src") if not "_fr" in language and not "_frq" in language: continue # blacklist_groups = [ "alt.binaries.multimedia" ] blacklist_groups = [] newgroupLink = cells[4].find("a") newsgroup = None if newgroupLink.contents: newsgroup = newgroupLink.contents[0] if newsgroup == "abmulti": newsgroup = "alt.binaries.multimedia" elif newsgroup == "abtvseries": newsgroup = "alt.binaries.tvseries" elif newsgroup == "abtv": newsgroup = "alt.binaries.tv" elif newsgroup == "a.b.teevee": newsgroup = "alt.binaries.teevee" elif newsgroup == "abstvdivxf": newsgroup = "alt.binaries.series.tv.divx.french" elif newsgroup == "abhdtvx264fr": newsgroup = "alt.binaries.hdtv.x264.french" elif newsgroup == "abmom": newsgroup = "alt.binaries.mom" elif newsgroup == "abhdtv": newsgroup = "alt.binaries.hdtv" elif newsgroup == "abboneless": newsgroup = "alt.binaries.boneless" elif newsgroup == "abhdtvf": newsgroup = "alt.binaries.hdtv.french" elif newsgroup == "abhdtvx264": newsgroup = "alt.binaries.hdtv.x264" elif newsgroup == "absuperman": newsgroup = "alt.binaries.superman" elif newsgroup == "abechangeweb": newsgroup = "alt.binaries.echange-web" elif newsgroup == "abmdfvost": newsgroup = "alt.binaries.movies.divx.french.vost" elif newsgroup == "abdvdr": newsgroup = "alt.binaries.dvdr" elif newsgroup == "abmzeromov": newsgroup = "alt.binaries.movies.zeromovies" elif newsgroup == "abcfaf": newsgroup = "alt.binaries.cartoons.french.animes-fansub" elif newsgroup == "abcfrench": newsgroup = "alt.binaries.cartoons.french" elif newsgroup == "abgougouland": newsgroup = "alt.binaries.gougouland" elif newsgroup == "abroger": newsgroup = "alt.binaries.roger" elif newsgroup == "abtatu": newsgroup = "alt.binaries.tatu" elif newsgroup == "abstvf": newsgroup = "alt.binaries.series.tv.french" elif newsgroup == "abmdfreposts": newsgroup = "alt.binaries.movies.divx.french.reposts" elif newsgroup == "abmdf": newsgroup = "alt.binaries.movies.french" elif newsgroup == "abhdtvfrepost": newsgroup = "alt.binaries.hdtv.french.repost" elif newsgroup == "abmmkv": newsgroup = "alt.binaries.movies.mkv" elif newsgroup == "abf-tv": newsgroup = "alt.binaries.french-tv" elif newsgroup == "abmdfo": newsgroup = "alt.binaries.movies.divx.french.old" elif newsgroup == "abmf": newsgroup = "alt.binaries.movies.french" elif newsgroup == "ab.movies": newsgroup = "alt.binaries.movies" elif newsgroup == "a.b.french": newsgroup = "alt.binaries.french" elif newsgroup == "a.b.3d": newsgroup = "alt.binaries.3d" elif newsgroup == "ab.dvdrip": newsgroup = "alt.binaries.dvdrip" else: log.error(u"Unknown binnewz newsgroup: " + newsgroup) continue if newsgroup in blacklist_groups: log.error( u"Ignoring result, newsgroup is blacklisted: " + newsgroup) continue filename = cells[5].contents[0] m = re.search("^(.+)\s+{(.*)}$", name) qualityStr = "" if m: name = m.group(1) qualityStr = m.group(2) m = re.search("^(.+)\s+\[(.*)\]$", name) source = None if m: name = m.group(1) source = m.group(2) m = re.search("(.+)\(([0-9]{4})\)", name) year = "" if m: name = m.group(1) year = m.group(2) m = re.search("(.+)\((\d{2}/\d{2}/\d{4})\)", name) dateStr = "" if m: name = m.group(1) dateStr = m.group(2) year = dateStr[-5:].strip(")").strip("/") m = re.search("(.+)\s+S(\d{2})\s+E(\d{2})(.*)", name) if m: name = m.group(1) + " S" + m.group(2) + "E" + m.group( 3) + m.group(4) m = re.search("(.+)\s+S(\d{2})\s+Ep(\d{2})(.*)", name) if m: name = m.group(1) + " S" + m.group(2) + "E" + m.group( 3) + m.group(4) filenameLower = filename.lower() searchItems = [] if qualityStr == "": if source in ("Blu Ray-Rip", "HD DVD-Rip"): qualityStr = "brrip" elif source == "DVDRip": qualityStr = "dvdrip" elif source == "TS": qualityStr = "ts" elif source == "DVDSCR": qualityStr = "scr" elif source == "CAM": qualityStr = "cam" elif moviequality == "dvdr": qualityStr = "dvdr" if year == '': year = '1900' if len(searchItems) == 0 and qualityStr == str( moviequality): searchItems.append(filename) for searchItem in searchItems: resultno = 1 for downloader in nzbDownloaders: log.info2("Searching for download : " + name + ", search string = " + searchItem + " on " + downloader.__class__.__name__) try: binsearch_result = downloader.search( searchItem, minSize, newsgroup) if binsearch_result: new = {} def extra_check(item): return True new['id'] = binsearch_result.nzbid new['name'] = name + ' french ' + qualityStr + ' ' + searchItem + ' ' + name + ' ' + downloader.__class__.__name__ new['url'] = binsearch_result.nzburl new['detail_url'] = binsearch_result.refererURL new['size'] = binsearch_result.sizeInMegs new['age'] = binsearch_result.age new['extra_check'] = extra_check results.append(new) resultno = resultno + 1 log.info2("Found : " + searchItem + " on " + downloader.__class__.__name__) if resultno == 3: break except Exception, e: log.error("Searching from " + downloader.__class__.__name__ + " failed : " + str(e) + traceback.format_exc())
def correctMovie(self, nzb=None, movie=None, quality=None, **kwargs): imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section='nzb') if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): log.info2( 'Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False movie_name = getTitle(movie['library']) movie_words = re.split('\W+', simplifyString(movie_name)) nzb_name = simplifyString(nzb['name']) nzb_words = re.split('\W+', nzb_name) # Make sure it has required words required_words = splitString(self.conf('required_words').lower()) req_match = 0 for req_set in required_words: req = splitString(req_set, '&') req_match += len(list(set(nzb_words) & set(req))) == len(req) if self.conf('required_words') and req_match == 0: log.info2('Wrong: Required word missing: %s', nzb['name']) return False # Ignore releases ignored_words = splitString(self.conf('ignored_words').lower()) ignored_match = 0 for ignored_set in ignored_words: ignored = splitString(ignored_set, '&') ignored_match += len(list(set(nzb_words) & set(ignored))) == len(ignored) if self.conf('ignored_words') and ignored_match: log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name'])) return False # Ignore p**n stuff pron_tags = [ 'xxx', 'sex', 'anal', 't**s', 'f**k', 'p**n', 'orgy', 'milf', 'boobs', 'erotica', 'erotic' ] pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words)) if pron_words: log.info('Wrong: %s, probably pr0n', (nzb['name'])) return False preferred_quality = fireEvent('quality.single', identifier=quality['identifier'], single=True) # Contains lower quality string if self.containsOtherQuality(nzb, movie_year=movie['library']['year'], preferred_quality=preferred_quality): log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label'])) return False # File to small if nzb['size'] and preferred_quality['size_min'] > nzb['size']: log.info2( 'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and preferred_quality.get('size_max') < nzb['size']: log.info2( 'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']): return True for raw_title in movie['library']['titles']: for movie_title in possibleTitles(raw_title['title']): movie_words = re.split('\W+', simplifyString(movie_title)) if self.correctName(nzb['name'], movie_title): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and self.correctYear( [nzb['name']], movie['library']['year'], 1): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and self.correctYear( [nzb['name']], movie['library']['year'], 0): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], movie_name, movie['library']['year'])) return False
def search(self, movie, quality): results = [] if self.isDisabled(): return results title = simplifyString(getTitle(movie['library'])).replace(' ', '-') cache_key = 'kickasstorrents.%s.%s' % (movie['library']['identifier'], quality.get('identifier')) data = self.getCache( cache_key, self.urls['search'] % (title, movie['library']['identifier'].replace('tt', ''))) if data: cat_ids = self.getCatId(quality['identifier']) table_order = ['name', 'size', None, 'age', 'seeds', 'leechers'] try: html = BeautifulSoup(data) resultdiv = html.find('div', attrs={'class': 'tabs'}) for result in resultdiv.find_all('div', recursive=False): if result.get('id').lower() not in cat_ids: continue try: try: for temp in result.find_all('tr'): if temp['class'] is 'firstr' or not temp.get( 'id'): continue new = { 'type': 'torrent_magnet', 'check_nzb': False, 'description': '', 'provider': self.getName(), 'score': 0, } nr = 0 for td in temp.find_all('td'): column_name = table_order[nr] if column_name: if column_name is 'name': link = td.find( 'div', { 'class': 'torrentname' }).find_all('a')[1] new['id'] = temp.get('id')[-8:] new['name'] = link.text new['url'] = td.find( 'a', 'imagnet')['href'] new['detail_url'] = self.urls[ 'detail'] % link['href'][1:] new['score'] = 20 if td.find( 'a', 'iverif') else 0 elif column_name is 'size': new['size'] = self.parseSize( td.text) elif column_name is 'age': new['age'] = self.ageToDays( td.text) elif column_name is 'seeds': new['seeds'] = tryInt(td.text) elif column_name is 'leechers': new['leechers'] = tryInt(td.text) nr += 1 new['score'] += fireEvent('score.calculate', new, movie, single=True) is_correct_movie = fireEvent( 'searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=True, single=True) if is_correct_movie: results.append(new) self.found(new) except: log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc()) except: pass return results except AttributeError: log.debug('No search results found.') return results
def getReleaseNameYear(self, release_name, file_name=None): release_name = release_name.strip(' .-_') # Use guessit first guess = {} if file_name: try: guessit = guess_movie_info(toUnicode(file_name)) if guessit.get('title') and guessit.get('year'): guess = { 'name': guessit.get('title'), 'year': guessit.get('year'), } except: log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) # Backup to simple release_name = os.path.basename(release_name.replace('\\', '/')) cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) cleaned = re.sub(self.clean, ' ', cleaned) year = None for year_str in [file_name, release_name, cleaned]: if not year_str: continue year = self.findYear(year_str) if year: break cp_guess = {} if year: # Split name on year try: movie_name = cleaned.rsplit(year, 1).pop(0).strip() if movie_name: cp_guess = { 'name': movie_name, 'year': int(year), } except: pass if not cp_guess: # Split name on multiple spaces try: movie_name = cleaned.split(' ').pop(0).strip() cp_guess = { 'name': movie_name, 'year': int(year) if movie_name[:4] != year else 0, } except: pass if cp_guess.get('year') == guess.get('year') and len( cp_guess.get('name', '')) > len(guess.get('name', '')): cp_guess['other'] = guess return cp_guess elif guess == {}: cp_guess['other'] = guess return cp_guess guess['other'] = cp_guess return guess
def search(self, movie, quality): results = [] if self.isDisabled() or not self.isAvailable(self.urls['api'] + '?test' + self.getApiExt()): return results cat_id = self.getCatId(quality.get('identifier')) arguments = urlencode({ 'action': 'search', 'q': simplifyString(movie['library']['titles'][0]['title']), 'catid': cat_id[0], 'i': self.conf('id'), 'h': self.conf('api_key'), }) url = "%s?%s" % (self.urls['api'], arguments) cache_key = 'nzbs.%s.%s' % (movie['library'].get('identifier'), str(cat_id)) data = self.getCache(cache_key) if not data: data = self.urlopen(url) self.setCache(cache_key, data) if not data: log.error('Failed to get data from %s.' % url) return results if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s' % (self.getName(), e)) return results for nzb in nzbs: id = int( self.getTextElement(nzb, "link").partition('nzbid=')[2]) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': self.getTextElement(nzb, "title"), 'age': self.calculateAge( int( time.mktime( parse(self.getTextElement( nzb, "pubDate")).timetuple()))), 'size': self.parseSize( self.getTextElement(nzb, "description").split( '</a><br />')[1].split('">')[1]), 'url': self.urls['download'] % (id, self.getApiExt()), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "description"), 'check_nzb': True, } new['score'] = fireEvent('score.calculate', new, movie, single=True) is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=False, single_category=False, single=True) if is_correct_movie: results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBMatrix.com')
def search(self, movie, quality): results = [] if self.isDisabled(): return results q = '"%s %s" %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'], quality.get('identifier')) arguments = tryUrlencode({ 'q': q, 'age': Env.setting('retention', 'nzb'), 'sort': 'agedesc', 'minsize': quality.get('size_min'), 'maxsize': quality.get('size_max'), 'rating': 1, 'max': 250, 'more': 1, 'complete': 1, }) url = "%s?%s" % (self.urls['api'], arguments) cache_key = 'nzbindex.%s.%s' % (movie['library']['identifier'], quality.get('identifier')) data = self.getCache(cache_key, url) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: enclosure = self.getElement(nzb, 'enclosure').attrib nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4]) try: description = self.getTextElement(nzb, "description") except: description = '' def extra_check(new): if '#c20000' in new['description'].lower(): log.info('Wrong: Seems to be passworded: %s', new['name']) return False return True new = { 'id': nzbindex_id, 'type': 'nzb', 'provider': self.getName(), 'download': self.download, 'name': self.getTextElement(nzb, "title"), 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))), 'size': tryInt(enclosure['length']) / 1024 / 1024, 'url': enclosure['url'], 'detail_url': enclosure['url'].replace('/download/', '/release/'), 'description': description, 'get_more_info': self.getMoreInfo, 'extra_check': extra_check, 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = False, single = True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single = True) results.append(new) self.found(new) return results except:
def correctMovie(self, nzb = {}, movie = {}, quality = {}, **kwargs): imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section = 'nzb') if nzb.get('seeds') is None and retention < nzb.get('age', 0): log.info('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False movie_name = getTitle(movie['library']) movie_words = re.split('\W+', simplifyString(movie_name)) nzb_name = simplifyString(nzb['name']) nzb_words = re.split('\W+', nzb_name) required_words = [x.strip().lower() for x in self.conf('required_words').lower().split(',')] if self.conf('required_words') and not list(set(nzb_words) & set(required_words)): log.info("Wrong: Required word missing: %s" % nzb['name']) return False ignored_words = [x.strip().lower() for x in self.conf('ignored_words').split(',')] blacklisted = list(set(nzb_words) & set(ignored_words)) if self.conf('ignored_words') and blacklisted: log.info("Wrong: '%s' blacklisted words: %s" % (nzb['name'], ", ".join(blacklisted))) return False pron_tags = ['xxx', 'sex', 'anal', 't**s', 'f**k', 'p**n', 'orgy', 'milf', 'boobs', 'erotica', 'erotic'] for p_tag in pron_tags: if p_tag in nzb_words and p_tag not in movie_words: log.info('Wrong: %s, probably pr0n', (nzb['name'])) return False #qualities = fireEvent('quality.all', single = True) preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True) # Contains lower quality string if self.containsOtherQuality(nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality): log.info('Wrong: %s, looking for %s', (nzb['name'], quality['label'])) return False # File to small if nzb['size'] and preferred_quality['size_min'] > nzb['size']: log.info('"%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and preferred_quality.get('size_max') < nzb['size']: log.info('"%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if self.checkIMDB([nzb['description']], movie['library']['identifier']): return True for movie_title in movie['library']['titles']: movie_words = re.split('\W+', simplifyString(movie_title['title'])) if self.correctName(nzb['name'], movie_title['title']): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and self.correctYear([nzb['name']], movie['library']['year'], 1): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and self.correctYear([nzb['name']], movie['library']['year'], 0): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'" % (nzb['name'], movie_name, movie['library']['year'])) return False
def _search(self, movie, quality, results): MovieTitles = movie['info']['titles'] moviequality = simplifyString(quality['identifier']) movieyear = movie['info']['year'] if quality['custom']['3d']==1: threeD= True else: threeD=False if moviequality in ("720p","1080p","bd50"): cat1='39' cat2='49' minSize = 2000 elif moviequality in ("dvdr"): cat1='23' cat2='48' minSize = 3000 else: cat1='6' cat2='27' minSize = 500 for MovieTitle in MovieTitles: try: TitleStringReal = str(MovieTitle.encode("latin-1").replace('-',' ')) except: continue if threeD: TitleStringReal = TitleStringReal + ' 3d' data = 'chkInit=1&edTitre='+TitleStringReal+'&chkTitre=on&chkFichier=on&chkCat=on&cats%5B%5D='+cat1+'&cats%5B%5D='+cat2+'&edAge=&edYear=' try: soup = BeautifulSoup( urllib2.urlopen(self.urls['search'], data), "html5lib" ) except Exception, e: log.error(u"Error trying to load BinNewz response: "+e) return [] tables = soup.findAll("table", id="tabliste") for table in tables: rows = table.findAll("tr") for row in rows: cells = row.select("> td") if (len(cells) < 11): continue name = cells[2].text.strip() #filename = cells[5].contents[0] testname=namer_check.correctName(name,movie) #testfilename=namer_check.correctName(filename,movie) if testname==0:# and testfilename==0: continue language = cells[3].find("img").get("src") if not "_fr" in language and not "_frq" in language: continue detectedlang='' if "_fr" in language: detectedlang=' truefrench ' else: detectedlang=' french ' # blacklist_groups = [ "alt.binaries.multimedia" ] blacklist_groups = [] newgroupLink = cells[4].find("a") newsgroup = None if newgroupLink.contents: newsgroup = newgroupLink.contents[0] if newsgroup in self.allowedGroups: newsgroup = self.allowedGroups[newsgroup] else: log.error(u"Unknown binnewz newsgroup: " + newsgroup) continue if newsgroup in blacklist_groups: log.error(u"Ignoring result, newsgroup is blacklisted: " + newsgroup) continue filename = cells[5].contents[0] m = re.search("^(.+)\s+{(.*)}$", name) qualityStr = "" if m: name = m.group(1) qualityStr = m.group(2) m = re.search("^(.+)\s+\[(.*)\]$", name) source = None if m: name = m.group(1) source = m.group(2) m = re.search("(.+)\(([0-9]{4})\)", name) year = "" if m: name = m.group(1) year = m.group(2) if int(year) > movieyear + 1 or int(year) < movieyear - 1: continue m = re.search("(.+)\((\d{2}/\d{2}/\d{4})\)", name) dateStr = "" if m: name = m.group(1) dateStr = m.group(2) year = dateStr[-5:].strip(")").strip("/") m = re.search("(.+)\s+S(\d{2})\s+E(\d{2})(.*)", name) if m: name = m.group(1) + " S" + m.group(2) + "E" + m.group(3) + m.group(4) m = re.search("(.+)\s+S(\d{2})\s+Ep(\d{2})(.*)", name) if m: name = m.group(1) + " S" + m.group(2) + "E" + m.group(3) + m.group(4) filenameLower = filename.lower() searchItems = [] if qualityStr=="": if source in ("Blu Ray-Rip", "HD DVD-Rip"): qualityStr="brrip" elif source =="DVDRip": qualityStr="dvdrip" elif source == "TS": qualityStr ="ts" elif source == "DVDSCR": qualityStr ="scr" elif source == "CAM": qualityStr ="cam" elif moviequality == "dvdr": qualityStr ="dvdr" if year =='': year = '1900' if len(searchItems) == 0 and qualityStr == str(moviequality): searchItems.append( filename ) for searchItem in searchItems: resultno=1 for downloader in self.nzbDownloaders: log.info("Searching for download : " + name + ", search string = "+ searchItem + " on " + downloader.__class__.__name__) try: binsearch_result = downloader.search(searchItem, minSize, newsgroup ) if binsearch_result: new={} def extra_check(item): return True qualitytag='' if qualityStr.lower() in ['720p','1080p']: qualitytag=' hd x264 h264 ' elif qualityStr.lower() in ['dvdrip']: qualitytag=' dvd xvid ' elif qualityStr.lower() in ['brrip']: qualitytag=' hdrip ' elif qualityStr.lower() in ['ts']: qualitytag=' webrip ' elif qualityStr.lower() in ['scr']: qualitytag='' elif qualityStr.lower() in ['dvdr']: qualitytag=' pal video_ts ' new['id'] = binsearch_result.nzbid new['name'] = name + detectedlang + qualityStr + qualitytag + downloader.__class__.__name__ new['url'] = binsearch_result.nzburl new['detail_url'] = binsearch_result.refererURL new['size'] = binsearch_result.sizeInMegs new['age'] = binsearch_result.age new['extra_check'] = extra_check results.append(new) resultno=resultno+1 log.info("Found : " + searchItem + " on " + downloader.__class__.__name__) if resultno==3: break except Exception, e: log.error("Searching from " + downloader.__class__.__name__ + " failed : " + str(e) + traceback.format_exc())
def getUrl(self, url): return self.getCache(md5(simplifyString(url)), url=url)
def correctRelease(self, nzb=None, media=None, quality=None, **kwargs): if media.get('type') != 'movie': return media_title = fireEvent('searcher.get_search_title', media, single=True) imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section='nzb') if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): log.info2( 'Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False # Check for required and ignored words if not fireEvent( 'searcher.correct_words', nzb['name'], media, single=True): return False preferred_quality = quality if quality else fireEvent( 'quality.single', identifier=quality['identifier'], single=True) # Contains lower quality string contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year=media['info']['year'], preferred_quality=preferred_quality, single=True) if contains_other != False: log.info2( 'Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality')) return False # Contains lower quality string if not fireEvent('searcher.correct_3d', nzb, preferred_quality=preferred_quality, single=True): log.info2( 'Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label'])) return False # File to small if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt( nzb['size']): log.info2( 'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt( nzb['size']): log.info2( 'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if getImdb(nzb.get('description', '')) == getIdentifier(media): return True for raw_title in media['info']['titles']: for movie_title in possibleTitles(raw_title): movie_words = re.split('\W+', simplifyString(movie_title)) if fireEvent('searcher.correct_name', nzb['name'], movie_title, single=True): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and fireEvent( 'searcher.correct_year', nzb['name'], media['info']['year'], 1, single=True): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and fireEvent( 'searcher.correct_year', nzb['name'], media['info']['year'], 0, single=True): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year'])) return False
def getSearchParams(self, movie, quality): results = [] MovieTitles = movie['library']['info']['titles'] moviequality = simplifyString(quality['identifier']) for MovieTitle in MovieTitles: try: TitleStringReal = str( MovieTitle.encode("latin-1").replace('-', ' ')) except: TitleStringReal = str( MovieTitle.encode("utf-8").replace('-', ' ')) if moviequality in ['720p']: results.append( urllib.urlencode({ 'search': TitleStringReal, 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=15" ) results.append( urllib.urlencode({ 'search': simplifyString(TitleStringReal), 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=15" ) elif moviequality in ['1080p']: results.append( urllib.urlencode({ 'search': TitleStringReal, 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=16" ) results.append( urllib.urlencode({ 'search': simplifyString(TitleStringReal), 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=16" ) elif moviequality in ['dvd-r']: results.append( urllib.urlencode({ 'search': TitleStringReal, 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=13&term%5B7%5D%5B%5D=14" ) results.append( urllib.urlencode({ 'search': simplifyString(TitleStringReal), 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=13&term%5B7%5D%5B%5D=14" ) elif moviequality in ['br-disk']: results.append( urllib.urlencode({ 'search': TitleStringReal, 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=17" ) results.append( urllib.urlencode({ 'search': simplifyString(TitleStringReal), 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=17" ) else: results.append( urllib.urlencode({ 'search': TitleStringReal, 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=8&term%5B7%5D%5B%5D=9&term%5B7%5D%5B%5D=10" ) results.append( urllib.urlencode({ 'search': simplifyString(TitleStringReal), 'cat': 210, 'submit': 'Recherche', 'subcat': 631 }) + "&term%5B17%5D%5B%5D=541&term%5B17%5D%5B%5D=542&term%5B17%5D%5B%5D=719&term%5B7%5D%5B%5D=8&term%5B7%5D%5B%5D=9&term%5B7%5D%5B%5D=10" ) return results