def wait_for_nzf(folder, sab_nzo_id, nzf): log("wait_for_nzf: folder: %s sab_nzo_id: %s nzf.filename: %s" % (folder, sab_nzo_id, nzf.filename)) iscanceled = False is_rar_found = False # If rar exist we skip dialogs some_rar = os.path.join(folder, nzf.filename) if utils.exists(some_rar): is_rar_found = True if not is_rar_found: progressDialog = xbmcgui.DialogProgress() progressDialog.create('Pneumatic', 'Request to SABnzbd succeeded, waiting for ', utils.short_string(nzf.filename)) time_now = time.time() while not is_rar_found: time.sleep(1) if utils.exists(some_rar): # TODO Look for optimization # Wait until the file is written to disk before proceeding size_now = float(nzf.bytes) size_later = 0 while (size_now != size_later) or (size_now == 0) or (size_later == 0): size_now = utils.size(some_rar) if size_now != size_later: time.sleep(0.5) size_later = utils.size(some_rar) is_rar_found = True break nzo = Nzo(sab_nzo_id) m_nzf = nzo.get_nzf_id(nzf.nzf_id) percent, label = utils.wait_for_rar_label(nzo, m_nzf, time_now) progressDialog.update(percent, 'Request to SABnzbd succeeded, waiting for', utils.short_string(nzf.filename), label) if progressDialog.iscanceled(): progressDialog.close() dialog = xbmcgui.Dialog() ret = dialog.select('What do you want to do?', ['Delete job', 'Just download']) # Fix for hang when playing .strm xbmc.Player().stop() xbmc.executebuiltin('Dialog.Close(all, true)') if ret == 0: sabnzbd.nzo_pause(sab_nzo_id) time.sleep(3) delete_ = sabnzbd.nzo_delete_files(sab_nzo_id) if not "ok" in delete_: xbmc.log(delete_) utils.notification("Deleting failed") else: utils.notification("Deleting succeeded") elif ret == 1: # allow the previous select dialog to close time.sleep(1) just_download({'nzoid': sab_nzo_id}) return True progressDialog.close() return iscanceled
def wait_for_rar(folder, sab_nzo_id, some_rar): isCanceled = False is_rar_found = False # If some_rar exist we skip dialogs for file, bytes in utils.sorted_rar_file_list(utils.list_dir(folder)): if file == some_rar: is_rar_found = True break if not is_rar_found: seconds = 0 progressDialog = xbmcgui.DialogProgress() progressDialog.create('NZBS', 'Request to SABnzbd succeeded, waiting for ', utils.short_string(some_rar)) while not is_rar_found: seconds += 1 time.sleep(1) dirList = utils.sorted_rar_file_list(utils.list_dir(folder)) for file, bytes in dirList: if file == some_rar: path = os.path.join(folder,file) # Wait until the file is written to disk before proceeding size_now = int(bytes) size_later = 0 while (size_now != size_later) or (size_now == 0) or (size_later == 0): size_now = os.stat(path).st_size if size_now != size_later: time.sleep(0.5) size_later = os.stat(path).st_size is_rar_found = True break label = str(seconds) + " seconds" progressDialog.update(0, 'Request to SABnzbd succeeded, waiting for', utils.short_string(some_rar), label) if progressDialog.iscanceled(): progressDialog.close() dialog = xbmcgui.Dialog() ret = dialog.select('What do you want to do?', ['Delete job', 'Just download']) if ret == 0: pause = SABNZBD.pause('',sab_nzo_id) time.sleep(3) delete_ = SABNZBD.delete_queue('',sab_nzo_id) if not "ok" in delete_: xbmc.log(delete_) xbmc.executebuiltin('Notification("NZBS","Deleting failed")') else: xbmc.executebuiltin('Notification("NZBS","Deleting succeeded")') iscanceled = True return iscanceled if ret == 1: iscanceled = True xbmc.executebuiltin('Notification("NZBS","Downloading")') return iscanceled progressDialog.close() return isCanceled
def read_rss(url, name, pre_msg, find_xml): recent_id = open(os.path.join(settings['ignore_loc'], name), 'r').read() try: rss = urllib.request.urlopen(url).read().decode("utf-8") xml = etree.fromstring(rss) except: # Don't need anymore than this for something like this print("Failed to read/parse {0} ({1}) RSS".format(name, url)) return False if bool(find_xml['sub_listing']): entry = xml[0][find_xml['entries_in']] else: entry = xml[find_xml['entries_in']] current_id = entry.findtext(find_xml['entry_id']) if current_id == recent_id: return False with open(os.path.join(settings['ignore_loc'], name), "w") as f: f.write(current_id) if bool(find_xml['get_href']): msg_url = entry.find(find_xml['link_id']).get('href') else: msg_url = entry.findtext(find_xml['link_id']) msg_msg = re.sub('<[^<]+?>', '', entry.findtext(find_xml['msg_id'])) msg_msg = re.sub( ' +', ' ', os.linesep.join([s for s in msg_msg.splitlines() if s])).lstrip() msg = "{0}{1}\n{2}".format(pre_msg, utils.short_string(msg_msg, 90), msg_url) post_tweet(status_api, msg)
def read_rss(url, name, pre_msg, find_xml): recent_id = open(os.path.join(settings['ignore_loc'], name), 'r').read() try: rss = urllib.request.urlopen(url).read().decode("utf-8") xml = etree.fromstring(rss) except: # Don't need anymore than this for something like this print("Failed to read/parse {0} ({1}) RSS".format(name, url)) return False if bool(find_xml['sub_listing']): entry = xml[0][find_xml['entries_in']] else: entry = xml[find_xml['entries_in']] current_id = entry.findtext( find_xml['entry_id']) if current_id == recent_id: return False with open(os.path.join(settings['ignore_loc'], name), "w") as f: f.write(current_id) if bool(find_xml['get_href']): msg_url = entry.find(find_xml['link_id']).get('href') else: msg_url = entry.findtext(find_xml['link_id']) msg_msg = re.sub('<[^<]+?>', '', entry.findtext(find_xml['msg_id'])) msg_msg = re.sub(' +', ' ', os.linesep.join( [s for s in msg_msg.splitlines() if s])).lstrip() msg = "{0}{1}\n{2}".format(pre_msg, utils.short_string(msg_msg, 90), msg_url) if not DEBUG: print(msg) status_api.update_status(status=msg)
def wait_for_nzf(folder, sab_nzo_id, nzf): log("wait_for_nzf: folder: %s sab_nzo_id: %s nzf.filename: %s" % (folder, sab_nzo_id, nzf.filename)) iscanceled = False is_rar_found = False # If rar exist we skip dialogs some_rar = os.path.join(folder, nzf.filename) if utils.exists(some_rar): is_rar_found = True if not is_rar_found: progressDialog = xbmcgui.DialogProgress() progressDialog.create('Pneumatic', 'Request to SABnzbd succeeded, waiting for ', utils.short_string(nzf.filename)) time_now = time.time() while not is_rar_found: time.sleep(1) if utils.exists(some_rar): # TODO Look for optimization # Wait until the file is written to disk before proceeding size_now = int(nzf.bytes) size_later = 0 while (size_now != size_later) or (size_now == 0) or (size_later == 0): size_now = utils.size(some_rar) if size_now != size_later: time.sleep(0.5) size_later = utils.size(some_rar) is_rar_found = True break nzo = sabnzbd.Nzo(SABNZBD, sab_nzo_id) m_nzf = nzo.get_nzf_id(nzf.nzf_id) percent, label = utils.wait_for_rar_label(nzo, m_nzf, time_now) progressDialog.update(percent, 'Request to SABnzbd succeeded, waiting for', utils.short_string(nzf.filename), label) if progressDialog.iscanceled(): progressDialog.close() dialog = xbmcgui.Dialog() ret = dialog.select('What do you want to do?', ['Delete job', 'Just download']) # Fix for hang when playing .strm xbmc.Player().stop() xbmc.executebuiltin('Dialog.Close(all, true)') if ret == 0: pause = SABNZBD.pause_queue(id=sab_nzo_id) time.sleep(3) delete_ = SABNZBD.delete_queue('',sab_nzo_id) if not "ok" in delete_: xbmc.log(delete_) utils.notification("Deleting failed") else: utils.notification("Deleting succeeded") elif ret == 1: # allow the previous select dialog to close time.sleep(1) just_download({'nzoid': sab_nzo_id}) return True progressDialog.close() return iscanceled
def source(api, status): lines = [] tag_re = re.compile(r'<[^>]+>') def info(image): url = "http://iqdb.org/?url=%s" % (str(image)) soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" if soup.find('th', text="No relevant matches"): return False, False, False site = None links = soup.find_all('a') for link in links: try: link['href'] except: continue if link.string == "(hide)": # Haven't broke yet, onto low results return False, False, False if "chan.sankakucomplex.com/post/show/" in link['href']: if "http" not in link['href']: url = "http:" + link['href'] else: url = link['href'] site = 0 break elif "http://danbooru.donmai.us/posts/" in link['href']: url = link['href'] site = 1 break if site is None: # No link found! return False, False, False soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" try: if site == 0: # Sankaku artist = soup.find( 'li', class_="tag-type-artist").find_next('a').text.title() elif site == 1: # Danbooru artist = soup.find( 'h2', text="Artist").find_next( 'a', class_="search-tag").text.title() except: artist = "" try: if site == 0: # Sankaku series = soup.find( 'li', class_="tag-type-copyright").find_next( 'a').text.title() elif site == 1: # Danbooru series = soup.find( 'h2', text="Copyrights").find_next( 'a', class_="search-tag").text.title() except: series = "" try: if site == 0: # Sankaku names = soup.find_all('li', class_="tag-type-character") elif site == 1: # Danbooru names = soup.find_all('li', class_="category-4") name = [] for a in names: if site == 0: a = a.find_next('a').text.title() elif site == 1: a = a.find_next('a').find_next('a').text.title() name.append(re.sub(r' \([^)]*\)', '', a)) name = list(set(name)) if len(name) >= 2: names = utils.make_paste( text='\n'.join(name), title="Source Names") else: names = ''.join(name) except: names = "" return artist, series, names def tweeted_image(api, status): """Return the image url from the tweet.""" try: tweet = api.get_status(status.in_reply_to_status_id) tweet = tweet.entities['media'][0]['media_url_https'] if "tweet_video_thumb" in str(tweet): is_gif = True else: is_gif = False if ".mp4" in tweet: return "Sorry, source is a video and not an image!", False return tweet, is_gif except: return "Are you sure you're asking for source on an image?", False tweeted_image, is_gif = tweeted_image(api, status) if ("Are you sure" in tweeted_image) or ("source is a" in tweeted_image): count_trigger("source") return tweeted_image artist, series, names = info(tweeted_image) saucenao = u"http://saucenao.com/search.php?urlify=1&url={0}".format( str(tweeted_image)) if artist == "OFFLINE": return "Unable to search for source! Try using SauceNAO: " + saucenao if not artist and not series and not names: return "No relevant source information found!\n" + saucenao else: if artist: artist = "By: {0}\n".format(artist) if names: names = "Character(s): {0}\n".format(names) if series: series = "From: {0}\n".format(utils.short_string(series, 25)) handles = status.text.lower() handles = [word for word in handles.split() if word.startswith('@')] handles = list(set(handles)) handles = ' '.join(handles).replace( "@" + settings["twitter_track"][0].lower(), "") m = "{0}{1}{2}".format(artist, names, series) if is_gif: m += "*Source is a gif so this could be inaccurate.\n" if (len(m) + 24) >= 120: m = utils.make_paste(m) m = "Source information is too long to Tweet:\n" + m + "\n" m = "{0}\n{1}{2}".format(handles, m, saucenao) return m.replace("&Amp;", "&")
def short_string(self, s, limit): return short_string(s, limit)
def source(api, status): lines = [] tag_re = re.compile(r'<[^>]+>') def info(image): url = "http://iqdb.org/?url=%s" % (str(image)) soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" if soup.find('th', text="No relevant matches"): return False, False, False site = None links = soup.find_all('a') for link in links: try: link['href'] except: continue if link.string == "(hide)": # Haven't broke yet, onto low results return False, False, False if "chan.sankakucomplex.com/post/show/" in link['href']: if "http" not in link['href']: url = "http:" + link['href'] else: url = link['href'] site = 0 break elif "http://danbooru.donmai.us/posts/" in link['href']: url = link['href'] site = 1 break if site is None: # No link found! return False, False, False soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" try: if site == 0: # Sankaku artist = soup.find( 'li', class_="tag-type-artist").find_next('a').text.title() elif site == 1: # Danbooru artist = soup.find( 'h2', text="Artist").find_next( 'a', class_="search-tag").text.title() except: artist = "" try: if site == 0: # Sankaku series = soup.find( 'li', class_="tag-type-copyright").find_next( 'a').text.title() elif site == 1: # Danbooru series = soup.find( 'h2', text="Copyrights").find_next( 'a', class_="search-tag").text.title() except: series = "" try: if site == 0: # Sankaku names = soup.find_all('li', class_="tag-type-character") elif site == 1: # Danbooru names = soup.find_all('li', class_="category-4") name = [] for a in names: if site == 0: a = a.find_next('a').text.title() elif site == 1: a = a.find_next('a').find_next('a').text.title() name.append(re.sub(r' \([^)]*\)', '', a)) name = list(set(name)) if len(name) >= 2: names = utils.make_paste( text='\n'.join(name), title="Source Names") else: names = ''.join(name) except: names = "" return artist, series, names def tweeted_image(api, status): """Return the image url from the tweet.""" try: tweet = api.lookup_status(id=status['in_reply_to_status_id']) tweet = tweet[0]['entities']['media'][0]['media_url_https'] if "tweet_video_thumb" in str(tweet): is_gif = True else: is_gif = False if ".mp4" in tweet: return "Sorry, source is a video and not an image!", False return tweet, is_gif except: return "Are you sure you're asking for source on an image?", False tweeted_image, is_gif = tweeted_image(api, status) if ("Are you sure" in tweeted_image) or ("source is a" in tweeted_image): return tweeted_image artist, series, names = info(tweeted_image) saucenao = u"http://saucenao.com/search.php?urlify=1&url={0}".format( str(tweeted_image)) if artist == "OFFLINE": return "Unable to search for source! Try using SauceNAO: " + saucenao if not artist and not series and not names: return "No relevant source information found!\n" + saucenao else: if artist: artist = "By: {0}\n".format(artist) if names: names = "Character(s): {0}\n".format(names) if series: series = "From: {0}\n".format(utils.short_string(series, 25)) handles = status['text'].lower() handles = [word for word in handles.split() if word.startswith('@')] handles = list(set(handles)) handles = ' '.join(handles).replace( "@" + settings["twitter_track"][0].lower(), "") m = "{0}{1}{2}".format(artist, names, series) if is_gif: m += "*Source is a gif so this could be inaccurate.\n" if (len(m) + 24) >= 120: m = utils.make_paste(m) m = "Source information is too long to Tweet:\n" + m + "\n" m = "{0}\n{1}{2}".format(handles, m, saucenao) return m.replace("&Amp;", "&")
def wait_for_rar(folder, sab_nzo_id, some_rar): isCanceled = False is_rar_found = False # If some_rar exist we skip dialogs for file, bytes in utils.sorted_rar_file_list(utils.list_dir(folder)): if file == some_rar: is_rar_found = True break if not is_rar_found: seconds = 0 progressDialog = xbmcgui.DialogProgress() progressDialog.create('NZBS', 'Request to SABnzbd succeeded, waiting for ', utils.short_string(some_rar)) while not is_rar_found: seconds += 1 time.sleep(1) dirList = utils.sorted_rar_file_list(utils.list_dir(folder)) for file, bytes in dirList: if file == some_rar: path = os.path.join(folder, file) # Wait until the file is written to disk before proceeding size_now = int(bytes) size_later = 0 while (size_now != size_later) or (size_now == 0) or (size_later == 0): size_now = os.stat(path).st_size if size_now != size_later: time.sleep(0.5) size_later = os.stat(path).st_size is_rar_found = True break label = str(seconds) + " seconds" progressDialog.update(0, 'Request to SABnzbd succeeded, waiting for', utils.short_string(some_rar), label) if progressDialog.iscanceled(): progressDialog.close() dialog = xbmcgui.Dialog() ret = dialog.select('What do you want to do?', ['Delete job', 'Just download']) if ret == 0: pause = SABNZBD.pause('', sab_nzo_id) time.sleep(3) delete_ = SABNZBD.delete_queue('', sab_nzo_id) if not "ok" in delete_: xbmc.log(delete_) xbmc.executebuiltin( 'Notification("NZBS","Deleting failed")') else: xbmc.executebuiltin( 'Notification("NZBS","Deleting succeeded")') iscanceled = True return iscanceled if ret == 1: iscanceled = True xbmc.executebuiltin('Notification("NZBS","Downloading")') return iscanceled progressDialog.close() return isCanceled