def search_manual(searchstr, languages, year=None): search_string = urllib.unquote(searchstr) if year: search_string = search_string + ' ' + str(year) log(__name__, "manual_search='%s', addon_version=%s" % (search_string, __version__)) url = self_host + "/search.php?search=" + search_string + '&Submit=Search' content = get_url(url, 'headers') #with open('/root/.kodi/temp/files.py', 'wb') as f: f.write(repr(content)) #content = test() regexp = '''</td><td><a href="(.+?)".+?>(.+?)<''' match = re.compile(regexp, re.IGNORECASE | re.MULTILINE | re.DOTALL).findall(content) if match: if len(match) > 1: dialog = xbmcgui.Dialog() sel = dialog.select("Select item", [name for link, name in match]) if sel >= 0: show_url = self_host + "/" + match[int(sel)][0] for s in get_subs(show_url, languages, ''): append_subtitle(s) else: return else: show_url = self_host + "/" + match[0][0] for s in get_subs(show_url, languages, ''): append_subtitle(s) else: for s in get_subs('', languages, '', content): append_subtitle(s) if content is not None: return False
def query(searchurl, langs, file_original_path, filename_string): sublinks = [] socket.setdefaulttimeout(3) request = urllib2.Request(searchurl) request.add_header("Pragma", "no-cache") page = urllib2.build_opener().open(request) content = page.read() content = content.replace("The safer, easier way", 'The safer, easier way " />') soup = BeautifulSoup(content) file_name = str(os.path.basename(file_original_path)).split("-")[-1].lower() for langs_html in soup("td", {"class": "language"}): try: subs = langs_html.findPrevious("td", {"class": "NewsTitle", "colspan": "3"}) fullLanguage = str(langs_html).split('class="language">')[1].split("<a")[0].replace("\n", "") subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0] if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1: hashed = True else: hashed = False try: lang = get_language_info(fullLanguage) except: lang = "" statusTD = langs_html.findNext("td") status = statusTD.find("b").string.strip() linkTD = statusTD.findNext("td") link = "%s%s" % (self_host, linkTD.find("a")["href"]) if subs.findNext("td", {"class": "newsDate", "colspan": "2"}).findAll("img", {"title": "Hearing Impaired"}): HI = True else: HI = False if status == "Completed" and (lang["3let"] in langs): sublinks.append( { "rating": "0", "filename": "%s-%s" % (filename_string, subteams), "sync": hashed, "link": link, "lang": lang, "hearing_imp": HI, } ) except: log(__name__, "ERROR IN BS") pass sublinks.sort(key=lambda x: [not x["sync"]]) log(__name__, "sub='%s'" % (sublinks)) for s in sublinks: append_subtitle(s)
def query(searchurl, langs, file_original_path, filename_string): log(__name__, "query: 'searchurl=%s, langs=%s, file_original_path=%s, filename_string=%s'" % (searchurl, langs, file_original_path, filename_string)) sublinks = get_subs(searchurl, langs, filename_string) sublinks.sort(key=lambda x: [not x['sync']]) #log(__name__, "sub='%s'" % (sublinks)) for s in sublinks: append_subtitle(s)
def query(search_url, languages, file_original_path=None): sub_links = [] socket.setdefaulttimeout(20) request = urllib2.Request(search_url, headers=req_headers) request.add_header('Pragma', 'no-cache') page = urllib2.build_opener().open(request) content = page.read() content = content.replace("The safer, easier way", "The safer, easier way \" />") soup = BeautifulSoup(content) if file_original_path is not None: file_original_path_clean = normalize_string(file_original_path.encode('utf-8')) file_name = str(os.path.basename(file_original_path_clean)).split("-")[-1].lower() else: file_name = None for language_html in soup("td", {"class": "language"}): box = language_html.findPrevious("td", {"class": "NewsTitle", "colspan": "3"}) full_language = str(language_html).split('class="language">')[1].split('<a')[0].replace("\n", "") sub_teams = self_release_pattern.match(str(box.contents[1])).groups()[0] if file_name is not None and (str(sub_teams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1: hashed = True else: hashed = False sub_language = get_language_info(full_language) if sub_language is None: sub_language = {} status_td = language_html.findNext("td") status = status_td.find("b").string.strip() link_td = status_td.findNext("td") link = "%s%s" % (self_host, link_td.find("a")["href"]) if box.findNext("td", {"class": "newsDate", "colspan": "2"}).findAll('img', {'title': 'Hearing Impaired'}): hearing_imp = True else: hearing_imp = False if status == "Completed" and (sub_language['3let'] in languages): title = soup.find('span', {'class': 'titulo'}).contents[0].strip(' \t\n\r') sub_links.append( {'rating': '0', 'filename': "%s - %s" % (title, sub_teams), 'sync': hashed, 'link': link, 'lang': sub_language, 'hearing_imp': hearing_imp}) sub_links.sort(key=lambda x: [not x['sync']]) log(__name__, "sub='%s'" % sub_links) for sub_link in sub_links: append_subtitle(sub_link)
def query_TvShow(name, season, episode, langs, file_original_path): sublinks = [] name = name.lower().replace(" ", "_").replace("$#*!","shit").replace("'","") # need this for $#*! My Dad Says and That 70s show searchurl = "%s/serie/%s/%s/%s/addic7ed" %(self_host, name, season, episode) socket.setdefaulttimeout(3) request = urllib2.Request(searchurl) request.add_header('Pragma', 'no-cache') page = urllib2.build_opener().open(request) content = page.read() content = content.replace("The safer, easier way", "The safer, easier way \" />") soup = BeautifulSoup(content) file_name = str(os.path.basename(file_original_path)).split("-")[-1].lower() for subs in soup("td", {"class":"NewsTitle", "colspan" : "3"}): try: langs_html = subs.findNext("td", {"class" : "language"}) fullLanguage = str(langs_html).split('class="language">')[1].split('<a')[0].replace("\n","") subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0] if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1: hashed = True else: hashed = False try: lang = get_language_info(fullLanguage) except: lang = "" statusTD = langs_html.findNext("td") status = statusTD.find("b").string.strip() linkTD = statusTD.findNext("td") link = "%s%s" % (self_host,linkTD.find("a")["href"]) if(subs.findNext("td", {"class":"newsDate", "colspan" : "2"}).findAll('img', {'title': 'Hearing Impaired'})): HI = True else: HI = False if status == "Completed" and (lang['3let'] in langs) : sublinks.append({'rating': '0', 'filename': "%s.S%.2dE%.2d-%s" %(name.replace("_", ".").title(), int(season), int(episode),subteams ), 'sync': hashed, 'link': link, 'lang': lang, 'hearing_imp': HI}) except: log(__name__, "ERROR IN BS") pass sublinks.sort(key=lambda x: [not x['sync']]) log(__name__, "sub='%s'" % (sublinks)) for s in sublinks: append_subtitle(s)
def search(data): filename = os.path.splitext(os.path.basename(data['file_original_path']))[0] log(__name__, "Search_addic7ed='%s', filename='%s', addon_version=%s" % (data, filename, __version__)) if data['mansearch']: search_manual(data['mansearchstr'], data['3let_language']) elif data['tvshow']: query_tvshow(data['tvshow'], data['season'], data['episode'], data['3let_language'], filename) elif data['title'] and data['year']: query_film(data['title'], data['year'], data['3let_language'], filename) else: search_filename(filename)
def Search(item): filename = os.path.splitext(os.path.basename(item['file_original_path']))[0] log(__name__, "Search_addicted='%s', filename='%s', addon_version=%s" % (item, filename, __version__)) if item['mansearch']: search_manual(item['mansearchstr'], item['3let_language'], filename) elif item['tvshow']: query_TvShow(item['tvshow'], item['season'], item['episode'], item['3let_language'], filename) elif item['title'] and item['year']: query_Film(item['title'], item['year'], item['3let_language'], filename) else: search_filename(filename, item['3let_language'])
def Search(item): filename = os.path.splitext(os.path.basename(item["file_original_path"]))[0] log(__name__, "Search_addic7ed='%s', filename='%s', addon_version=%s" % (item, filename, __version__)) if item["mansearch"]: search_manual(item["mansearchstr"], item["3let_language"], filename) elif item["tvshow"]: query_TvShow(item["tvshow"], item["season"], item["episode"], item["3let_language"], filename) elif item["title"] and item["year"]: query_Film(item["title"], item["year"], item["3let_language"], filename) else: search_filename(filename, item["3let_language"])
def Search(item): filename = os.path.splitext(os.path.basename(item['file_original_path']))[0] log(__name__, "Search_addic7ed='%s', filename='%s', addon_version=%s" % (item, filename, __version__)) if item['mansearch']: search_manual(item['mansearchstr'], item['3let_language'], filename) elif item['tvshow']: query_TvShow(item['tvshow'], item['season'], item['episode'], item['3let_language'], filename) elif item['title'] and item['year']: query_Film(item['title'], item['year'], item['3let_language'], filename) else: search_filename(filename, item['3let_language'])
def query(searchurl, langs, file_original_path, filename_string): sublinks = [] socket.setdefaulttimeout(20) request = urllib2.Request(searchurl, headers=req_headers) request.add_header('Pragma', 'no-cache') page = urllib2.build_opener().open(request) content = page.read() content = content.replace("The safer, easier way", "The safer, easier way \" />") soup = BeautifulSoup(content) file_original_path_clean = normalizeString(file_original_path.encode('utf-8')) file_name = str(os.path.basename(file_original_path_clean)).split("-")[-1].lower() for langs_html in soup("td", {"class" : "language"}): try: subs = langs_html.findPrevious("td", {"class":"NewsTitle", "colspan" : "3"}) fullLanguage = str(langs_html).split('class="language">')[1].split('<a')[0].replace("\n","") subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0] if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1: hashed = True else: hashed = False try: lang = get_language_info(fullLanguage) except: lang = "" statusTD = langs_html.findNext("td") status = statusTD.find("b").string.strip() linkTD = statusTD.findNext("td") link = "%s%s" % (self_host,linkTD.find("a")["href"]) if(subs.findNext("td", {"class":"newsDate", "colspan" : "2"}).findAll('img', {'title': 'Hearing Impaired'})): HI = True else: HI = False if status == "Completed" and (lang['3let'] in langs) : sublinks.append({'rating': '0', 'filename': "%s-%s" %(filename_string, subteams ), 'sync': hashed, 'link': link, 'lang': lang, 'hearing_imp': HI}) except: log(__name__, "ERROR IN BS") pass sublinks.sort(key=lambda x: [not x['sync']]) log(__name__, "sub='%s'" % (sublinks)) for s in sublinks: append_subtitle(s)
def query(searchurl, langs, file_original_path, filename_string): sublinks = [] socket.setdefaulttimeout(3) request = urllib2.Request(searchurl) request.add_header('Pragma', 'no-cache') page = urllib2.build_opener().open(request) content = page.read() content = content.replace("The safer, easier way", "The safer, easier way \" />") soup = BeautifulSoup(content) file_name = str(os.path.basename(file_original_path)).split("-")[-1].lower() for langs_html in soup("td", {"class" : "language"}): try: subs = langs_html.findPrevious("td", {"class":"NewsTitle", "colspan" : "3"}) fullLanguage = str(langs_html).split('class="language">')[1].split('<a')[0].replace("\n","") subteams = self_release_pattern.match(str(subs.contents[1])).groups()[0] if (str(subteams.replace("WEB-DL-", "").lower()).find(str(file_name))) > -1: hashed = True else: hashed = False try: lang = get_language_info(fullLanguage) except: lang = "" statusTD = langs_html.findNext("td") status = statusTD.find("b").string.strip() linkTD = statusTD.findNext("td") link = "%s%s" % (self_host,linkTD.find("a")["href"]) if(subs.findNext("td", {"class":"newsDate", "colspan" : "2"}).findAll('img', {'title': 'Hearing Impaired'})): HI = True else: HI = False if status == "Completed" and (lang['3let'] in langs) : sublinks.append({'rating': '0', 'filename': "%s-%s" %(filename_string, subteams ), 'sync': hashed, 'link': link, 'lang': lang, 'hearing_imp': HI}) except: log(__name__, "ERROR IN BS") pass sublinks.sort(key=lambda x: [not x['sync']]) log(__name__, "sub='%s'" % (sublinks)) for s in sublinks: append_subtitle(s)
def search_filename(filename, languages): title, year = xbmc.getCleanMovieTitle(filename) log(__name__, 'clean title: "%s" (%s)' % (title, year)) try: yearval = int(year) except ValueError: yearval = 0 if title and yearval > 1900: query_Film(title, year, item["3let_language"], filename) else: match = re.search(r"\WS(?P<season>\d\d)E(?P<episode>\d\d)", title, flags=re.IGNORECASE) if match is not None: tvshow = string.strip(title[: match.start("season") - 1]) season = string.lstrip(match.group("season"), "0") episode = string.lstrip(match.group("episode"), "0") query_TvShow(tvshow, season, episode, item["3let_language"], filename) else: search_manual(filename, item["3let_language"], filename)
def search_filename(filename, languages): title, year = xbmc.getCleanMovieTitle(filename) log(__name__, "clean title: \"%s\" (%s)" % (title, year)) try: yearval = int(year) except ValueError: yearval = 0 if title and yearval > 1900: query_Film(title, year, item['3let_language'], filename) else: match = re.search(r'\WS(?P<season>\d\d)E(?P<episode>\d\d)', title, flags=re.IGNORECASE) if match is not None: tvshow = string.strip(title[:match.start('season')-1]) season = string.lstrip(match.group('season'), '0') episode = string.lstrip(match.group('episode'), '0') query_TvShow(tvshow, season, episode, item['3let_language'], filename) else: search_manual(filename, item['3let_language'], filename)
def Search(item): filename = os.path.splitext(os.path.basename(item['file_original_path']))[0] #log(__name__, "Search_filename='%s', addon_version=%s" % (filename, __version__)) if item['mansearch']: search_manual(item['mansearchstr'], item['3let_language']) if len(item['tvshow']) > 0: query_TvShow(item['tvshow'], item['season'], item['episode'], item['3let_language'], filename) else: if str(item['year']) == "": titlu = item['title'] item['title'], item['year'] = xbmc.getCleanMovieTitle(titlu) #log(__name__, "first item from filename='%s'" % (titlu)) episodes = re.compile('S(\d{1,2})E(\d{1,2})', re.IGNORECASE).findall(item['title']) if episodes: item['season'] = episodes[0][0] item['episode'] = episodes[0][1] else: episodes = re.compile('(\d)(\d{1,2})', re.IGNORECASE).findall(item['title']) if episodes: item['season'] = episodes[0][0] item['episode'] = episodes[0][1] item['title'] = addic7ize((re.sub('(\d)(\d{1,2})', '', (re.sub('S(\d{1,2})E(\d{1,2})', '', item['title'])))).strip()) try: item['title'] = item['title'].split(' ', 1)[0] except: pass log(__name__, "item from filename='%s'" % (item)) if len(item['season']) > 0 and len(item['episode']) > 0: query_TvShow(item['title'], item['season'], item['episode'], item['3let_language'], filename) else: if item['year']: query_Film(item['title'], item['year'], item['3let_language'], filename) else: search_manual(item['title'], item['3let_language'], item['year']) else: query_Film(item['title'], item['year'], item['3let_language'], filename)
def query_TvShow(name, season, episode, langs, file_original_path): log(__name__, "query show: 'name=%s, season=%s, episode=%s, langs=%s, file_original_path=%s'" % (name, season, episode, langs, file_original_path)) name = addic7ize(name).lower().replace(" ", "_") searchurl = "%s/serie/%s/%s/%s/addic7ed" % (self_host, name, season, episode) filename_string = "%s.S%.2dE%.2d" % (name.replace("_", ".").title(), int(season), int(episode)) query(searchurl, langs, file_original_path, filename_string)
def query_Film(name, year, langs, file_original_path): log(__name__, "query film: 'name=%s, year=%s, langs=%s, file_original_path=%s'" % (name, year, langs, file_original_path)) name = urllib.quote(name.replace(" ", "_")) searchurl = "%s/film/%s_(%s)-Download" % (self_host, name, str(year)) filename_string = "%s" % (name.replace("_", ".").title()) query(searchurl, langs, file_original_path, filename_string)