def submit_post(self, account, filename): # Load post data post = json.load(open(filename)) title = post['title'] tags = post['tags'] caption = post['caption'] image = post['image'] # upload the image and retrieve the url url = imgur.upload_image(image) # process the iamge for EXIF data to construct a table table = exif.process_image(image) # Use template to construct a body for the post body = textwrap.dedent(f""" <center> {caption} --- [![image]({url})]({url}) --- {table} </center> """) # Submit post to the Steem Blockchain self.steem.post(title=title, body=body, author=account, tags=tags)
def deferred_upload_photo(blobs, dive_id): dive = Dive.get_by_id(int(dive_id)) for blob in blobs: img = blob.get_all() if img is None: raise Exception("Invalid blob") links = imgur.upload_image(img) dive.add_photo(links) blob.delete() dive.put() application.uncache('dive/%s' % dive_id)
def transcribe_tweet(tweet_url, template): """Generates a Markdown message by filling the values into the message template. PArameters ---------- tweet_url : str The tweet url. template : str The message string template. See the template folder for details. Returns ------- str The post template filled with the tweet data values. """ request_token() tweet_id = tweet_url.split("/status/")[-1].split("?")[0] final_url = BASE_URL + \ f"statuses/show.json?id={tweet_id}&tweet_mode=extended" # We make a GET requeswt to the tweet url. with requests.get(final_url, headers=HEADERS) as tweet_response: # We send the HTML source of the tweet to the scrape_Tweet function. tweet_data = scrape_tweet(tweet_response.text) # We start taking the values from the returned dictionary and applying transformations. tweet_date = datetime.fromtimestamp(tweet_data["timestamp"]) # By default we assume we have image links and initialize the inner links template. image_links_text = "*****\n\n**Imágenes:**\n\n" if len(tweet_data["images"]) > 0: # For each link we have we will mirror it to Imgur and update our inner links template. for index, link in enumerate(tweet_data["images"], 1): # We upload the image to Imgur and get the new url. imgur_url = upload_image(link) # We update our inner template with both links (original and Imgur). image_links_text += "[Imagen {}]({}) - [Mirror]({})\n\n".format( index, link, imgur_url) else: # If we have no images we set the image_links_text to an empty string. image_links_text = "" # By default we assume we have video links and initialize the inner links template. video_links_text = "*****\n\n**Video(s):**\n\n" if len(tweet_data["videos"]) > 0: # For each link we have we will update our inner links template. for index, link in enumerate(tweet_data["videos"], 1): # We update our inner template with the links. video_links_text += "[Video {}]({})\n\n".format(index, link) else: # If we have no videos we set the video_links_text to an empty string. video_links_text = "" # By default we assume we have url links and initialize the inner links template. url_links_text = "*****\n\n**Link(s):**\n\n" if len(tweet_data["links"]) > 0: # For each link we have we will update our inner links template. for index, link in enumerate(tweet_data["links"], 1): # Fix for urls with parenthesis. link = link.replace("(", "\(").replace(")", "\)") # We update our inner template with the links. url_links_text += "[Link {}]({})\n\n".format(index, link) else: # If we have no links we set the video_links_text to an empty string. url_links_text = "" text_lines = list() # We split the tweet text by the new line character. for line in tweet_data["text"].split("\n"): # If the list element is not empty we apply a custom formatting. if len(line) > 0: # We will add a backlash when a line starts with a hashtag to avoid making a Markdown header. if line[0] == "#": text_lines.append("\#" + line[1:]) else: text_lines.append(line) else: text_lines.append("\n") # We join together the tweet text to its original form but with our cleaned formatting. # The templates can be found in the templates folder. tweet_text = "\n".join(text_lines) # We fill in the message template with our variables. post_text = template.format( tweet_data["fullname"], tweet_data["username"], tweet_date, tweet_date, tweet_data["permalink"], tweet_text, image_links_text, video_links_text, url_links_text, tweet_data["retweets"], tweet_data["favorites"]) return post_text
def main_bots_settings(message, user_id, option, bot_name=False, value=False): if bot_name: if option == 'first': DB.reset_greeting(user_id=user_id, bot_name=client_status[user_id]['bot_name'], new_greeting=value) del client_status[user_id]['option'] return set_greetings(chat_id=user_id) elif option == 'delay': if value.isdigit(): DB.set_greeting_delay( user_id=user_id, value=value, bot_name=client_status[user_id]['bot_name']) del client_status[user_id]['option'] bot.send_message(user_id, text='Готово.') else: bot.send_message( message.from_user.id, text='Введите задержку в секундах, например: *18*', parse_mode='Markdown') return elif option == 'second': DB.reset_greeting(second=True, user_id=user_id, new_greeting=value, bot_name=bot_name) bot.send_message(user_id, text='Готово.') del client_status[user_id]['option'] return set_greetings(chat_id=user_id) elif option == 'dispatch': dispatch(_bot=bot_name, user_id=user_id, text=message.text) bot.send_message(message.chat.id, text='Рассылка окончена.') else: if option in ['first', 'delay', 'second']: if option == 'first': DB.reset_greeting(user_id=user_id, new_greeting=value) bot.send_message(user_id, text='Готово.') del client_status[user_id]['option'] return set_greetings(chat_id=user_id, _all=True) elif option == 'delay': if value.isdigit(): DB.set_greeting_delay(user_id=user_id, value=value, for_all=True) else: bot.send_message( user_id, text='Введите задержку в секундах, например: *18*', parse_mode='Markdown') return elif option == 'second': DB.reset_greeting(second=True, user_id=user_id, new_greeting=value) bot.send_message(user_id, text='Готово.') del client_status[user_id]['option'] return set_greetings(chat_id=user_id, _all=True) elif option == 'dispatch': dispatch(for_all=True, user_id=user_id, text=message.text) bot.send_message(message.chat.id, text='Рассылка окончена.') elif option == 'add': validate = validate_commands(message.text) if not validate[0]: keyboard = telebot.types.InlineKeyboardMarkup() cancel = telebot.types.InlineKeyboardButton( text='Отмена', callback_data='cancel') keyboard.add(cancel) if message.text not in [ x[0] for x in DB.get_commands(_all=True, user_id=message.from_user.id) ]: if message.text.startswith('/'): client_status[user_id]['command'] = validate[1][0] client_status[user_id]['option'] = 'set_value' bot.send_message( chat_id=user_id, text= 'Укажите значение для команды *(только текст или смайлы)*', parse_mode='Markdown', reply_markup=keyboard) else: bot.send_message( user_id, text='Ошибочный формат команды. Попробуйте снова.', reply_markup=keyboard) return return bot.send_message( message.chat.id, text='У Вас уже есть такая команда. Укажите другое имя.', reply_markup=keyboard) else: add_more_commands(validate[1], message) elif option == 'set_value': keyboard = telebot.types.InlineKeyboardMarkup() next_step = telebot.types.InlineKeyboardButton( text='Завершить', callback_data='next') keyboard.add(next_step) client_status[user_id]['value'] = message.text client_status[user_id]['option'] = 'set_image' bot.send_message( chat_id=user_id, text= 'Отправьте фото. (нажмите завершить чтобы пропустить этот шаг)', reply_markup=keyboard) return elif option == 'set_image': command = client_status[user_id]['command'] value = client_status[user_id]['value'] _file_id = bot.get_file(message.photo[-1].file_id) path = _file_id.file_path image_url = str(imgur.upload_image(path)) DB.add_command(user_id=user_id, command=command, msg=value, image=image_url) elif option == 'edit': msg = message.text DB.update_command(user_id=user_id, command=client_status[user_id]['command'], value=msg) bot.send_message(user_id, text='Готово.') commands_settings(chat_id=user_id, user_id=user_id, get=True) del client_status[user_id]['option'] return del client_status[user_id]['option'] bot.send_message(user_id, text='Готово.') get_two_level_settings( send=True, message=message, bot_name=client_status[message.from_user.id]['bot_name'])
def init_bot(): """Inits the bot.""" prep_data = prep.get_prep(NOW.timestamp()) explodes = list() results_table = "" for index, candidate in enumerate(prep_data["candidates"]): candidate_votes = prep_data["votes"][index] candidate_percentage = prep_data["percentages"][index] # We check if the current candidate has the most votes. If so we format it in bold letters. if candidate_votes == max(prep_data["votes"]): explodes.append(0.1) results_table += "**{}** | **{:,}** | **{}%**\n".format( candidate, candidate_votes, candidate_percentage) else: explodes.append(0.0) results_table += "{} | {:,} | {}%\n".format( candidate, candidate_votes, candidate_percentage) # We create the pie plot by using a dark style. plt.style.use("dark_background") patches, texts, autotexts = plt.pie(prep_data["percentages"], explode=explodes, colors=prep_data["colors"], autopct="%1.2f%%", startangle=90) plt.axis("equal") plt.legend(patches, prep_data["candidates"], bbox_to_anchor=(1, 0.5), loc="center right", bbox_transform=plt.gcf().transFigure) plt.title("Elecciones Presidenciales 2018") plt.figtext(.5, 0, "Última actualización: {:%d-%m-%Y a las %H:%M:%S}".format(NOW), fontsize=12, va="baseline", ha="center") plt.subplots_adjust(left=0.0, bottom=0.1, right=0.45) plt.draw() # Once our plot has been drawn, we save it and prepare it for upload to Imgur. fig_path = "./figs/{}.png".format(int(NOW.timestamp())) plt.savefig(fig_path) image_link = imgur.upload_image(NOW, fig_path) # We start the Reddit Markdown message with a header. message = "Elecciones Presidenciales 2018.\n\nSe sincroniza cada 15 minutos.\n\n" # We add the results table. message += """Candidato | Votos | Porcentaje\n--|--|--\n""" message += results_table # We add additional metadata. message += "\nTotal de Votos: {:,} | Actas Computadas: {}%\n".format( prep_data["totalVotos"], prep_data["actasCapturadas"]) # We add the Imgur URL. message += "## [Resultados en Gráfica]({})\n".format(image_link) # We add the footer which includes the latest formatted date. message += "*****\n^Última ^sincronización: ^{:%d-%m-%Y ^a ^las ^%H:%M:%S}".format( NOW) # Finally, we send it all to a Reddit post. reddit.update_post(message)
def results(): risk = '' # build a request object given an action req = request.get_json(force=True) # gets the action from the JSON request action = req.get('queryResult').get('action') # if statement to choose response based on action print(action) if action == 'tick-search': # saves the parameter tick passed in the JSON request tick = req.get('queryResult').get('parameters').get('tick') # calls draw plot to re-write image 'searchedTicker.png' ss.drawTickerPlots(tick) return { "fulfillmentMessages": [ { "text": { "text": ["Here is {}'s performance: ".format(tick)] }, "platform": "TELEGRAM" }, { "image": { # uploads image using imgur and passed new URL "imageUri": im.upload_image('Images/searchedTicker.png') }, "platform": "TELEGRAM" }, { "text": { "text": [ "Enter another image or enter 'Main menu' to go to menu" ] }, "platform": "TELEGRAM" } ] } elif action == 'sector-search': # saves the parameter sector from the JSON request sector = req.get('queryResult').get('parameters').get('sector') # returns the dataframe for top performers in the sector txt = ss.drawSectorPlots(sector) return { "fulfillmentMessages": [{ "text": { "text": [ # string version of dataframe giving top 5 performers in given sector txt ] }, "platform": "TELEGRAM" }] } elif action == 'classify': # calls array of image URLs to return images return { "fulfillmentMessages": [ { "text": { "text": ["Here is your breakdown by asset type: "] }, "platform": "TELEGRAM" }, { "image": { #"imageUri": "https://scontent-atl3-1.xx.fbcdn.net/v/t1.0-9/117774336_1044925022589847_8385272999349051066_n.jpg?_nc_cat=106&_nc_sid=85a577&_nc_ohc=J0wuYDR4fbYAX-U8UoN&_nc_ht=scontent-atl3-1.xx&oh=174104f237bbd56f3c35ff28b8120d92&oe=5FAF8958" "imageUri": photos[0] }, "platform": "TELEGRAM" }, { "text": { "text": ["Here is the breakdown by sector: "] }, "platform": "TELEGRAM" }, { "image": { #"imageUri": "https://i.imgur.com/B0r0aDA.png" "imageUri": photos[2] }, "platform": "TELEGRAM" } ], "fulfillmentText": "Happy to help :)" } elif action == 'high-low': # returns JSON response with text object full of highest and lowest performers return {'fulfillmentText': '{}\n\n {}'.format(perf, sec)} elif action == 'portfolio': return { "fulfillmentMessages": [ { "text": { "text": ["Here are historical returns from your portfolio "] }, "platform": "TELEGRAM" }, { "image": { # "imageUri": "https://i.imgur.com/B0r0aDA.png" "imageUri": photos[3] }, "platform": "TELEGRAM" }, { "text": { "text": [table] }, "platform": "TELEGRAM" }, { "text": { "text": [ "\nWould you like to search a particular asset, see top performers, or view your portfolio " "classified? " ] }, "platform": "TELEGRAM" }, ] } elif action == "marketData": # returns plots for DOW and S&P 500, indicative of marker strength return { "fulfillmentMessages": [ { "text": { "text": ["Here is the Dow's Performance over Time: "] }, "platform": "TELEGRAM" }, { "image": { # "imageUri": "https://i.imgur.com/B0r0aDA.png" "imageUri": photos[4] }, "platform": "TELEGRAM" }, { "text": { "text": ["Here is S&P's Performance: "] }, "platform": "TELEGRAM" }, { "image": { #"imageUri": "https://i.imgur.com/B0r0aDA.png" "imageUri": photos[5] }, "platform": "TELEGRAM" }, ], } elif action == "asset": # saves parameter ticker from JSON request sym = req.get('queryResult').get('parameters').get('ticker') # calls plots to get data for specific asset return { 'fulfillmentText': pl.portfolioSpecificData(sym) + '\nEnter another ticker or "Main Menu" to go back to menu' } elif action == "poojan": return { "fulfillmentMessages": [ { "text": { "text": ["ooooo Pooojannnn"] }, "platform": "TELEGRAM" }, { "image": { "imageUri": "https://scontent-atl3-1.xx.fbcdn.net/v/t1.0-9/117774336_1044925022589847_8385272999349051066_n.jpg?_nc_cat=106&_nc_sid=85a577&_nc_ohc=J0wuYDR4fbYAX-U8UoN&_nc_ht=scontent-atl3-1.xx&oh=174104f237bbd56f3c35ff28b8120d92&oe=5FAF8958" # "imageUri": photos[4] }, "platform": "TELEGRAM" }, ], } elif action == "suggest": risk = req.get('queryResult').get('parameters').get( 'risk').strip().lower() sect = req.get('queryResult').get('parameters').get('sector') # txt = 'Given your desire to take {} risk in the given sectors, we suggest these options: \n'.format(risk) if len(sect) > 0: txt = '\n'.join(sugg.generateSuggestions(risk, sect)) else: txt = '\n'.join(sugg.generateSuggestions(risk)) return {'fulfillmentText': txt}
if link in error_links: continue # If the url is a link to craigslist examine it if link.domain == 'craigslist.org' or link.domain.endswith('.craigslist.org'): # See if we have already processed it if link.href not in processed: # Get a screenshot of it filename = renderer.render(link.href) if dry_run: print("Created image for story\n %s\n %s\n %s" % (link.title, link.comment_page(), filename)) processed += [link.href] # Don't store settings in dry-run mode else: # Submit the screenshot to imgur.com imgur_link = imgur.upload_image(filename) print("Uploading image for story\n %s\n %s\n %s" % (link.title, link.comment_page(), imgur_link)) # Delete image os.unlink(filename) # Submit the link back to reddit reddit.submit_comment(link.comment_page(), 'Imgur cache: %s' % (imgur_link)) # Mark this link as processed processed += [link.href] with open(settings_filename, 'wb') as settings: pickle.dump((processed, imgur_key, reddit_username), settings) # Skip to waiting for the next link uploaded = True
def evaluate_path(path, title, album_hash='1t9Mpfemtvldk1a'): data = imgur.upload_image(album_hash, path) url = data['data']['link'] return url, evaluate_image(url, title)
def transcribe_tweet(tweet_url, template): """Performs web scraping and fills the values from the message template. PArameters ---------- tweet_url : string The tweet url. template : string The message string template. See the template folder for details. Returns ------- string The post template filled with the tweet data values. """ headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0"} # We make a GET requeswt to the tweet url. with requests.get(tweet_url, headers=headers) as tweet_response: # We send the HTML source of the tweet to the scrape_Tweet function. tweet_data = scrape_tweet(tweet_response.text) # We start taking the values from the returned dictionary and applying transformations. tweet_date = datetime.fromtimestamp(tweet_data["timestamp"]) # By default we assume we have image links and initialize the inner links template. links_text = "*****\n\n**Image(s):**\n\n" if len(tweet_data["links"]) > 0: # For each link we have we will mirror it to Imgur and update our inner links template. for index, link in enumerate(tweet_data["links"]): # We upload the image to Imgur and get the new url. imgur_url = upload_image(link) # We update our inner template with both links (original and Imgur). links_text += "[Image {}]({}) - [Mirror]({})\n\n".format( index + 1, link, imgur_url) else: # If we have no links we set the links_text to an empty string. links_text = "" text_lines = list() # We split the tweet text by the new line character. for line in tweet_data["text"].split("\n"): # If the list element is not empty we apply a custom formatting. if len(line) > 0: # We will add a backlash when a line starts with a hashtag to avoid making a Markdown header. if line[0] == "#": text_lines.append("\#" + line[1:]) else: text_lines.append(line) else: text_lines.append("\n") # We join together the tweet text to its original form but with our cleaned formatting. # The templates can be found in the templates folder. tweet_text = "\n".join(text_lines) # We fill in the message template with our variables. post_text = template.format( tweet_data["fullname"], tweet_data["username"], tweet_date, tweet_date, tweet_data["permalink"], tweet_text, links_text, tweet_data["retweets"], tweet_data["favorites"], tweet_data["replies"] ) return post_text