def paste(message): source = message.reply_to_message source_text = source.text or source.caption new_paste = make_paste(source_text, source.from_user.first_name) if not new_paste: return bot.reply_to(source, text=new_paste, disable_web_page_preview=True) bot.delete_message(chat_id=message.chat.id, message_id=message.message_id) logger.info( "User {0} has requested a paste version of a message {1}".format( get_user(message.from_user), message.reply_to_message.message_id))
def check_possible_names(self): possible_names = [] if self.site == 0: names = self.soup.find_all('li', attrs={'class': 'tag-type-character'}) for a in names: name = a.next.text name = name.title() if " " not in name: name += " $" possible_names.append(name) elif self.site == 2: names = self.soup.find_all('li', attrs={'class': 'tag-type-character'}) for a in names: name = a.findNext('a').findNext('a').findNext('a').text name = name.title() if " " not in name: name += " $" possible_names.append(name) if not possible_names: return False if self.name.lower() + " $" in list(map(str.lower, possible_names)): return True help_eng = "%sRegister one of these names:" % self.gender.title() help_frn = "%sRegister un de ces noms:" % self.gender.title() help_spn = "" name_list_string = '\n'.join(possible_names) end_eng = """Don't see the name you are looking for here? Read the help: http://ace3df.github.io/AcePictureBot/commands/""" end_frn = "(Needs Translation)" end_spn = "(Needs Translation)" text = """English: {0} French: {1} Spanish: {2} {3} {4} {5} {6} """.format(help_eng, help_frn, help_spn, name_list_string, end_eng, end_frn, end_spn) m = u"More than one name was found! Help: {0}".format( make_paste(text=text, title=self.name)) return m
def document_to_paste(message): document = message.document file_info = bot.get_file(document.file_id) try: file_content = bot.download_file(file_info.file_path).decode() except UnicodeDecodeError: logger.info("Can't decode file content") return new_paste = make_paste(file_content, message.from_user.first_name, document.file_name) if not new_paste: return bot.reply_to(message, text=new_paste) logger.info( "Successfully created a paste of a document from message {}".format( message.message_id))
def info(image): url = "http://iqdb.org/?url=%s" % (str(image)) soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" if soup.find('th', text="No relevant matches"): return False, False, False site = None links = soup.find_all('a') for link in links: try: link['href'] except: continue if link.string == "(hide)": # Haven't broke yet, onto low results return False, False, False if "chan.sankakucomplex.com/post/show/" in link['href']: if "http" not in link['href']: url = "http:" + link['href'] else: url = link['href'] site = 0 break elif "http://danbooru.donmai.us/posts/" in link['href']: url = link['href'] site = 1 break if site is None: # No link found! return False, False, False soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" try: if site == 0: # Sankaku artist = soup.find( 'li', class_="tag-type-artist").find_next('a').text.title() elif site == 1: # Danbooru artist = soup.find( 'h2', text="Artist").find_next( 'a', class_="search-tag").text.title() except: artist = "" try: if site == 0: # Sankaku series = soup.find( 'li', class_="tag-type-copyright").find_next( 'a').text.title() elif site == 1: # Danbooru series = soup.find( 'h2', text="Copyrights").find_next( 'a', class_="search-tag").text.title() except: series = "" try: if site == 0: # Sankaku names = soup.find_all('li', class_="tag-type-character") elif site == 1: # Danbooru names = soup.find_all('li', class_="category-4") name = [] for a in names: if site == 0: a = a.find_next('a').text.title() elif site == 1: a = a.find_next('a').find_next('a').text.title() name.append(re.sub(r' \([^)]*\)', '', a)) name = list(set(name)) if len(name) >= 2: names = utils.make_paste( text='\n'.join(name), title="Source Names") else: names = ''.join(name) except: names = "" return artist, series, names
def source(api, status): lines = [] tag_re = re.compile(r'<[^>]+>') def info(image): url = "http://iqdb.org/?url=%s" % (str(image)) soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" if soup.find('th', text="No relevant matches"): return False, False, False site = None links = soup.find_all('a') for link in links: try: link['href'] except: continue if link.string == "(hide)": # Haven't broke yet, onto low results return False, False, False if "chan.sankakucomplex.com/post/show/" in link['href']: if "http" not in link['href']: url = "http:" + link['href'] else: url = link['href'] site = 0 break elif "http://danbooru.donmai.us/posts/" in link['href']: url = link['href'] site = 1 break if site is None: # No link found! return False, False, False soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" try: if site == 0: # Sankaku artist = soup.find( 'li', class_="tag-type-artist").find_next('a').text.title() elif site == 1: # Danbooru artist = soup.find( 'h2', text="Artist").find_next( 'a', class_="search-tag").text.title() except: artist = "" try: if site == 0: # Sankaku series = soup.find( 'li', class_="tag-type-copyright").find_next( 'a').text.title() elif site == 1: # Danbooru series = soup.find( 'h2', text="Copyrights").find_next( 'a', class_="search-tag").text.title() except: series = "" try: if site == 0: # Sankaku names = soup.find_all('li', class_="tag-type-character") elif site == 1: # Danbooru names = soup.find_all('li', class_="category-4") name = [] for a in names: if site == 0: a = a.find_next('a').text.title() elif site == 1: a = a.find_next('a').find_next('a').text.title() name.append(re.sub(r' \([^)]*\)', '', a)) name = list(set(name)) if len(name) >= 2: names = utils.make_paste( text='\n'.join(name), title="Source Names") else: names = ''.join(name) except: names = "" return artist, series, names def tweeted_image(api, status): """Return the image url from the tweet.""" try: tweet = api.get_status(status.in_reply_to_status_id) tweet = tweet.entities['media'][0]['media_url_https'] if "tweet_video_thumb" in str(tweet): is_gif = True else: is_gif = False if ".mp4" in tweet: return "Sorry, source is a video and not an image!", False return tweet, is_gif except: return "Are you sure you're asking for source on an image?", False tweeted_image, is_gif = tweeted_image(api, status) if ("Are you sure" in tweeted_image) or ("source is a" in tweeted_image): count_trigger("source") return tweeted_image artist, series, names = info(tweeted_image) saucenao = u"http://saucenao.com/search.php?urlify=1&url={0}".format( str(tweeted_image)) if artist == "OFFLINE": return "Unable to search for source! Try using SauceNAO: " + saucenao if not artist and not series and not names: return "No relevant source information found!\n" + saucenao else: if artist: artist = "By: {0}\n".format(artist) if names: names = "Character(s): {0}\n".format(names) if series: series = "From: {0}\n".format(utils.short_string(series, 25)) handles = status.text.lower() handles = [word for word in handles.split() if word.startswith('@')] handles = list(set(handles)) handles = ' '.join(handles).replace( "@" + settings["twitter_track"][0].lower(), "") m = "{0}{1}{2}".format(artist, names, series) if is_gif: m += "*Source is a gif so this could be inaccurate.\n" if (len(m) + 24) >= 120: m = utils.make_paste(m) m = "Source information is too long to Tweet:\n" + m + "\n" m = "{0}\n{1}{2}".format(handles, m, saucenao) return m.replace("&Amp;", "&")
def source(api, status): lines = [] tag_re = re.compile(r'<[^>]+>') def info(image): url = "http://iqdb.org/?url=%s" % (str(image)) soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" if soup.find('th', text="No relevant matches"): return False, False, False site = None links = soup.find_all('a') for link in links: try: link['href'] except: continue if link.string == "(hide)": # Haven't broke yet, onto low results return False, False, False if "chan.sankakucomplex.com/post/show/" in link['href']: if "http" not in link['href']: url = "http:" + link['href'] else: url = link['href'] site = 0 break elif "http://danbooru.donmai.us/posts/" in link['href']: url = link['href'] site = 1 break if site is None: # No link found! return False, False, False soup = utils.scrape_site(url) if not soup: return "OFFLINE", "OFFLINE", "OFFLINE" try: if site == 0: # Sankaku artist = soup.find( 'li', class_="tag-type-artist").find_next('a').text.title() elif site == 1: # Danbooru artist = soup.find( 'h2', text="Artist").find_next( 'a', class_="search-tag").text.title() except: artist = "" try: if site == 0: # Sankaku series = soup.find( 'li', class_="tag-type-copyright").find_next( 'a').text.title() elif site == 1: # Danbooru series = soup.find( 'h2', text="Copyrights").find_next( 'a', class_="search-tag").text.title() except: series = "" try: if site == 0: # Sankaku names = soup.find_all('li', class_="tag-type-character") elif site == 1: # Danbooru names = soup.find_all('li', class_="category-4") name = [] for a in names: if site == 0: a = a.find_next('a').text.title() elif site == 1: a = a.find_next('a').find_next('a').text.title() name.append(re.sub(r' \([^)]*\)', '', a)) name = list(set(name)) if len(name) >= 2: names = utils.make_paste( text='\n'.join(name), title="Source Names") else: names = ''.join(name) except: names = "" return artist, series, names def tweeted_image(api, status): """Return the image url from the tweet.""" try: tweet = api.lookup_status(id=status['in_reply_to_status_id']) tweet = tweet[0]['entities']['media'][0]['media_url_https'] if "tweet_video_thumb" in str(tweet): is_gif = True else: is_gif = False if ".mp4" in tweet: return "Sorry, source is a video and not an image!", False return tweet, is_gif except: return "Are you sure you're asking for source on an image?", False tweeted_image, is_gif = tweeted_image(api, status) if ("Are you sure" in tweeted_image) or ("source is a" in tweeted_image): return tweeted_image artist, series, names = info(tweeted_image) saucenao = u"http://saucenao.com/search.php?urlify=1&url={0}".format( str(tweeted_image)) if artist == "OFFLINE": return "Unable to search for source! Try using SauceNAO: " + saucenao if not artist and not series and not names: return "No relevant source information found!\n" + saucenao else: if artist: artist = "By: {0}\n".format(artist) if names: names = "Character(s): {0}\n".format(names) if series: series = "From: {0}\n".format(utils.short_string(series, 25)) handles = status['text'].lower() handles = [word for word in handles.split() if word.startswith('@')] handles = list(set(handles)) handles = ' '.join(handles).replace( "@" + settings["twitter_track"][0].lower(), "") m = "{0}{1}{2}".format(artist, names, series) if is_gif: m += "*Source is a gif so this could be inaccurate.\n" if (len(m) + 24) >= 120: m = utils.make_paste(m) m = "Source information is too long to Tweet:\n" + m + "\n" m = "{0}\n{1}{2}".format(handles, m, saucenao) return m.replace("&Amp;", "&")