def update_from_cryptonator_data(self, repo): """ Updates the value of conversion cryptocurrencies using cryptonator data. :type repo: ConvertRepo """ # Get currency ConvertType currency_type = repo.get_type_by_name("currency") # Pull json data from preev website, combine into 1 dict currency_codes = [ "LTC", "BTC", "BCH", "DOGE", "XMR", "ETH", "ETC", "DASH" ] for code in currency_codes: # Get data try: data = Commons.load_url_json( "https://api.cryptonator.com/api/ticker/{}-eur".format( code)) except Exception as e: # If it fails, because it failed to parse the JSON, give it another go # Cryptonator API returns HTML sometimes. I don't know why. if "Expecting value:" in str(e): time.sleep(5) data = Commons.load_url_json( "https://api.cryptonator.com/api/ticker/{}-eur".format( code)) else: raise e # Get the ConvertUnit object for the currency reference currency_unit = currency_type.get_unit_by_name(code) if currency_unit is None: continue # Update the value currency_unit.update_value(data["ticker"]["price"])
def _get_api_data(self, path: str, needs_cookie: bool = False) -> Union[Dict, List]: fa_api_url = os.getenv("FA_API_URL", "https://faexport.spangle.org.uk") url = "{}/{}".format(fa_api_url, path) if needs_cookie: cookie_string = "b=" + self.b + "; a=" + self.a return Commons.load_url_json(url, [["FA_COOKIE", cookie_string]]) return Commons.load_url_json(url)
def test_load_url_json(): url1 = "https://httpbin.org/get" data1 = Commons.load_url_json(url1) assert "args" in data1, "Element missing from json dict response." assert "headers" in data1, "Element missing from json dict response." assert "origin" in data1, "Element missing from json dict response." assert "url" in data1, "Element missing from json dict response." assert data1["url"] == url1, "JSON data incorrect." url2 = "https://httpbin.org/headers" headers2 = [["User-Agent", "Example data"]] data2 = Commons.load_url_json(url2, headers2) assert "headers" in data2, "Element missing from json response dict." assert "User-Agent" in data2["headers"], "Header missing from request." assert (data2["headers"]["User-Agent"] == "Example data" ), "Header data missing from request."
def get_youtube_playlist(self, playlist_id, page_token=None): """Returns a list of video information for a youtube playlist.""" list_videos = [] # Get API key api_key = self.hallo_obj.get_api_key("youtube") if api_key is None: raise Exception("Youtube API key missing.") # Find API url api_fields = "nextPageToken,items(snippet/title,snippet/resourceId/videoId)" api_url = ( "https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId={}" "&fields={}&key={}".format(playlist_id, urllib.parse.quote(api_fields), api_key)) if page_token is not None: api_url += "&pageToken={}".format(page_token) # Load API response (in json). api_dict = Commons.load_url_json(api_url) for api_item in api_dict["items"]: new_video = { "title": api_item["snippet"]["title"], "video_id": api_item["snippet"]["resourceId"]["videoId"], } list_videos.append(new_video) # Check if there's another page to add if "nextPageToken" in api_dict: list_videos.extend( self.get_youtube_playlist(playlist_id, api_dict["nextPageToken"])) # Return list return list_videos
def site_youtube(self, url_address, page_opener, page_request): """Handling for youtube links""" # Find video id if "youtu.be" in url_address: video_id = url_address.split("/")[-1].split("?")[0] else: video_id = url_address.split("/")[-1].split("=")[1].split("&")[0] # Find API url api_key = self.hallo_obj.get_api_key("youtube") if api_key is None: return None api_url = ("https://www.googleapis.com/youtube/v3/videos?id={}" "&part=snippet,contentDetails,statistics&key={}".format( video_id, api_key)) # Load API response (in json). api_dict = Commons.load_url_json(api_url) # Get video data from API response. video_title = api_dict["items"][0]["snippet"]["title"] video_duration = api_dict["items"][0]["contentDetails"]["duration"][ 2:].lower() video_views = api_dict["items"][0]["statistics"]["viewCount"] # Create output output = "Youtube video> Title: {} | Length {} | Views: {}.".format( video_title, video_duration, video_views) return output
def site_imgur(self, url_address, page_opener, page_request): """Handling imgur links""" # Hand off imgur album links to a different handler function. if "/a/" in url_address: return self.site_imgur_album(url_address, page_opener, page_request) # Handle individual imgur image links imgur_id = url_address.split("/")[-1].split(".")[0] api_url = "https://api.imgur.com/3/image/{}".format(imgur_id) # Load API response (in json) using Client-ID. api_key = self.hallo_obj.get_api_key("imgur") if api_key is None: return None api_dict = Commons.load_url_json(api_url, [["Authorization", api_key]]) # Get title, width, height, size, and view count from API data image_title = str(api_dict["data"]["title"]) image_width = str(api_dict["data"]["width"]) image_height = str(api_dict["data"]["height"]) image_size = int(api_dict["data"]["size"]) image_size_string = self.file_size_to_string(image_size) image_views = api_dict["data"]["views"] # Create output and return output = "Imgur> Title: {} | Size: {}x{} | Filesize: {} | Views: {:,}.".format( image_title, image_width, image_height, image_size_string, image_views) return output
def run(self, event): api_key = event.server.hallo.get_api_key("thecatapi") if api_key is None: return event.create_response("No API key loaded for cat api.") url = "http://thecatapi.com/api/images/get?format=json&api_key={}&type=gif".format( api_key) cat_obj = Commons.load_url_json(url)[0] cat_url = cat_obj["url"] return event.create_response(cat_url)
def get_random_link_result(self, search): """Gets a random link from the e621 api.""" line_clean = search.replace(" ", "%20") url = "https://e621.net/posts.json?tags=order:random%20score:%3E0%20{}%20&limit=1".format( line_clean) return_list = Commons.load_url_json(url) if len(return_list["posts"]) == 0: return None else: result = return_list["posts"][0] return result
def run(self, event): url = "https://type.fit/api/quotes" # Get api response json_dict = Commons.load_url_json(url) # Select a random quote from response quote = Commons.get_random_choice(json_dict)[0] # Construct response quote_text = quote["text"] author = quote["author"] output = '"{}" - {}'.format(quote_text, author) return event.create_response(output)
def run(self, event): input_clean = event.command_args.strip().lower() url = "https://api.randomuser.me/0.6/?nat=gb&format=json" # Get api response json_dict = Commons.load_url_json(url) user_dict = json_dict["results"][0]["user"] # Construct response name = "{} {} {}".format( user_dict["name"]["title"], user_dict["name"]["first"], user_dict["name"]["last"], ).title() email = user_dict["email"] address = "{}, {}, {}".format( user_dict["location"]["street"].title(), user_dict["location"]["city"].title(), user_dict["location"]["postcode"], ) username = user_dict["username"] password = user_dict["password"] date_of_birth = Commons.format_unix_time(int(user_dict["dob"])) phone_home = user_dict["phone"] phone_mob = user_dict["cell"] national_insurance = user_dict["NINO"] pronoun = "he" if user_dict["gender"] == "male" else "she" pronoun_possessive = "his" if user_dict["gender"] == "male" else "her" if input_clean not in ["more", "full", "verbose", "all"]: output = "I have generated this person: Say hello to {}. {} was born at {}.".format( name, pronoun.title(), date_of_birth ) return event.create_response(output) output = ( "I have generated this person: Say hello to {}. " "{} was born at {} and lives at {}. " '{} uses the email {}, the username {} and usually uses the password "{}". ' "{} home number is {} but {} mobile number is {}. " "{} national insurance number is {}.".format( name, pronoun.title(), date_of_birth, address, pronoun.title(), email, username, password, pronoun_possessive.title(), phone_home, pronoun_possessive, phone_mob, pronoun_possessive.title(), national_insurance, ) ) return event.create_response(output)
def read_path(self, path): """ Save given data in a specified column for the current date row. :type path: str :rtype: list | dict """ headers = None if self.dailys_key is not None: headers = [["Authorization", self.dailys_key]] return Commons.load_url_json("{}/{}".format(self.dailys_url, path), headers)
def passive_trigger(self, evt): """ :type evt: Event.Event :rtype: None """ user_parser = hallo.modules.user_data.UserDataParser() fa_data: hallo.modules.user_data.FAKeyData = user_parser.get_data_by_user_and_type( self.spreadsheet.user, hallo.modules.user_data.FAKeyData ) if fa_data is None: raise hallo.modules.dailys.dailys_field.DailysException( "No FA data has been set up for the FA field module to use." ) cookie = "b=" + fa_data.cookie_b + "; a=" + fa_data.cookie_a fa_api_url = os.getenv("FA_API_URL", "https://faexport.spangle.org.uk") try: notifications_data = Commons.load_url_json( "{}/notifications/others.json".format(fa_api_url), [["FA_COOKIE", cookie]], ) except Exception: raise hallo.modules.dailys.dailys_field.DailysException( "FA key in storage is not currently logged in to FA." ) profile_name = notifications_data["current_user"]["profile_name"] profile_data = Commons.load_url_json("{}/user/{}.json".format(fa_api_url, profile_name)) notifications = { "submissions": notifications_data["notification_counts"]["submissions"], "comments": notifications_data["notification_counts"]["comments"], "journals": notifications_data["notification_counts"]["journals"], "favourites": notifications_data["notification_counts"]["favorites"], "watches": notifications_data["notification_counts"]["watchers"], "notes": notifications_data["notification_counts"]["notes"], "watchers_count": profile_data["watchers"]["count"], "watching_count": profile_data["watching"]["count"] } d = (evt.get_send_time() - timedelta(1)).date() self.save_data(notifications, d) # Send date to destination notif_str = json.dumps(notifications) self.message_channel(notif_str)
def run(self, event): space_dict = Commons.load_url_json( "https://www.howmanypeopleareinspacerightnow.com/space.json" ) space_number = str(space_dict["number"]) space_names = ", ".join( person["name"].strip() for person in space_dict["people"] ) output_string = "There are {} people in space right now. Their names are: {}.".format( space_number, space_names ) return event.create_response(output_string)
def run(self, event): user_data_parser = hallo.modules.user_data.UserDataParser() line_clean = event.command_args.strip().lower() if line_clean == "": location_entry = user_data_parser.get_data_by_user_and_type( event.user, hallo.modules.user_data.WeatherLocationData) if location_entry is None: return event.create_response( "No location stored for this user. Please specify a location or " + 'store one with the "setup weather location data" function.' ) else: # Check if a user was specified test_user = event.user.server.get_user_by_name(line_clean) if event.channel is not None and event.channel.is_user_in_channel( test_user): location_entry = user_data_parser.get_data_by_user_and_type( test_user, hallo.modules.user_data.WeatherLocationData) if location_entry is None: return event.create_response( "No location stored for this user. Please specify a location or " + 'store one with the "setup weather location data" function.' ) else: location_entry = hallo.modules.user_data.WeatherLocationData.create_from_input( event) api_key = event.server.hallo.get_api_key("openweathermap") if api_key is None: return event.create_response( "No API key loaded for openweathermap.") url = "https://api.openweathermap.org/data/2.5/weather{}&APPID={}".format( self.build_query(location_entry), api_key) response = Commons.load_url_json(url) if str(response["cod"]) != "200": return event.create_response("Location not recognised.") city_name = response["name"] weather_main = response["weather"][0]["main"] weather_desc = response["weather"][0]["description"] weather_temp = response["main"]["temp"] - 273.15 weather_humidity = response["main"]["humidity"] weather_wind_speed = response["wind"]["speed"] output = ("Current weather in {} is {} ({}). " "Temp: {:.2f}C, Humidity: {}%, Wind speed: {}m/s".format( city_name, weather_main, weather_desc, weather_temp, weather_humidity, weather_wind_speed, )) return event.create_response(output)
def run(self, event): url_line = event.command_args.replace(" ", "+").lower() url = "https://api.urbandictionary.com/v0/define?term={}".format( url_line) urban_dict = Commons.load_url_json(url) if len(urban_dict["list"]) > 0: definition = (urban_dict["list"][0]["definition"].replace( "\r", "").replace("\n", "")) return event.create_response(definition) else: return event.create_response( "Sorry, I cannot find a definition for {}.".format( event.command_args))
def run(self, event): rgb_list = Commons.get_random_int(0, 255, 3) hex_code = "{}{}{}".format( hex(rgb_list[0])[2:].zfill(2), hex(rgb_list[1])[2:].zfill(2), hex(rgb_list[2])[2:].zfill(2), ).upper() url = "https://www.thecolorapi.com/id?hex={}".format(hex_code) human_url = "{}&format=html".format(url) colour_data = Commons.load_url_json(url) colour_name = colour_data["name"]["value"] output = "Randomly chosen colour is: {} #{} or rgb({},{},{}) {}".format( colour_name, hex_code, rgb_list[0], rgb_list[1], rgb_list[2], human_url ) return event.create_response(output)
def site_imdb(self, url_address, page_opener, page_request): """Handling for imdb links""" # If URL isn't to an imdb title, just do normal url handling. if "imdb.com/title" not in url_address: return self.url_generic(url_address, page_opener, page_request) # Get the imdb movie ID movie_id_search = re.search("title/(tt[0-9]*)", url_address) if movie_id_search is None: return self.url_generic(url_address, page_opener, page_request) movie_id = movie_id_search.group(1) # Download API response api_url = "https://www.omdbapi.com/?i={}".format(movie_id) api_dict = Commons.load_url_json(api_url) # Get movie information from API response movie_title = api_dict["Title"] movie_year = api_dict["Year"] movie_genre = api_dict["Genre"] movie_rating = api_dict["imdbRating"] movie_votes = api_dict["imdbVotes"] # Construct output output = "IMDB> Title: {} ({}) | Rating {}/10, {} votes. | Genres: {}.".format( movie_title, movie_year, movie_rating, movie_votes, movie_genre) return output
def site_ebay(self, url_address, page_opener, page_request): """Handling for ebay links""" # Get the ebay item id item_id = url_address.split("/")[-1] api_key = self.hallo_obj.get_api_key("ebay") if api_key is None: return None # Get API response api_url = ( "http://open.api.ebay.com/shopping?callname=GetSingleItem&responseencoding=JSON&appid={}" "&siteid=0&version=515&ItemID={}&IncludeSelector=Details".format( api_key, item_id)) api_dict = Commons.load_url_json(api_url) # Get item data from api response item_title = api_dict["Item"]["Title"] item_price = "{} {}".format( api_dict["Item"]["CurrentPrice"]["Value"], api_dict["Item"]["CurrentPrice"]["CurrencyID"], ) item_end_time = api_dict["Item"]["EndTime"][:19].replace("T", " ") # Start building output output = "eBay> Title: {} | Price: {} | ".format( item_title, item_price) # Check listing type if api_dict["Item"]["ListingType"] == "Chinese": # Listing type: bidding item_bid_count = str(api_dict["Item"]["BidCount"]) if item_bid_count == "1": output += "Auction, {} bid".format(item_bid_count) else: output += "Auction, {} bids".format(item_bid_count) elif api_dict["Item"]["ListingType"] == "FixedPriceItem": # Listing type: buy it now output += "Buy it now | " output += "Ends: {}".format(item_end_time) return output
def run(self, event): line_clean = event.command_args.strip().replace(" ", "_") url = ( "https://en.wikipedia.org/w/api.php?format=json&action=query&titles={}" "&prop=revisions&rvprop=content&redirects=True".format(line_clean) ) article_dict = Commons.load_url_json(url) page_code = list(article_dict["query"]["pages"])[0] article_text = article_dict["query"]["pages"][page_code]["revisions"][0]["*"] old_scan = article_text new_scan = re.sub("{{[^{^}]*}}", "", old_scan) # Strip templates while new_scan != old_scan: old_scan = new_scan new_scan = re.sub( "{{[^{^}]*}}", "", old_scan ) # Keep stripping templates until they're gone plain_text = new_scan.replace("''", "") plain_text = re.sub(r"<ref[^<]*</ref>", "", plain_text) # Strip out references old_scan = plain_text # Repeatedly strip links from image descriptions new_scan = re.sub(r"(\[\[File:[^][]+)\[\[[^]]+]]", r"\1", old_scan) while new_scan != old_scan: old_scan = new_scan new_scan = re.sub(r"(\[\[File:[^][]+)\[\[[^]]+]]", r"\1", old_scan) plain_text = new_scan plain_text = re.sub(r"\[\[File:[^]]+]]", "", plain_text) # Strip out images plain_text = re.sub( r"\[\[[^]^|]*\|([^]]*)]]", r"\1", plain_text ) # Strip out links with specified names plain_text = re.sub(r"\[\[([^]]*)]]", r"\1", plain_text) # Strip out links plain_text = re.sub(r"<!--[^>]*-->", "", plain_text) # Strip out comments plain_text = re.sub( r"<ref[^>]*/>", "", plain_text ) # Strip out remaining references first_paragraph = plain_text.strip().split("\n")[0] return event.create_response(first_paragraph)
def site_imgur_album(self, url_address, page_opener, page_request): """Handling imgur albums""" imgur_id = url_address.split("/")[-1].split("#")[0] api_url = "https://api.imgur.com/3/album/{}".format(imgur_id) # Load API response (in json) using Client-ID. api_key = self.hallo_obj.get_api_key("imgur") if api_key is None: return None api_dict = Commons.load_url_json(api_url, [["Authorization", api_key]]) # Get album title and view count from API data album_title = api_dict["data"]["title"] album_views = api_dict["data"]["views"] # Start on output output = "Imgur album> Album title: {} | Gallery views: {:,} | ".format( album_title, album_views) if "section" in api_dict["data"]: album_section = api_dict["data"]["section"] output += "Section: {} | ".format(album_section) album_count = api_dict["data"]["images_count"] # If an image was specified, show some information about that specific image if "#" in url_address: image_number = int(url_address.split("#")[-1]) image_width = api_dict["data"]["images"][image_number]["width"] image_height = api_dict["data"]["images"][image_number]["height"] image_size = int(api_dict["data"]["images"][image_number]["size"]) image_size_string = self.file_size_to_string(image_size) output += "Image {} of {} | Current image: {}x{}, {}.".format( image_number + 1, album_count, image_width, image_height, image_size_string, ) return output output += "{} images.".format(album_count) return output
def run(self, event): if len(event.command_args.split()) <= 1: lang_change = "" trans_string = event.command_args else: lang_change = event.command_args.split()[0] trans_string = " ".join(event.command_args.split()[1:]) if "->" not in lang_change: lang_from = "auto" lang_to = "en" trans_string = lang_change + " " + trans_string else: lang_from = lang_change.split("->")[0] lang_to = lang_change.split("->")[1] trans_safe = urllib.parse.quote(trans_string.strip(), "") # This uses google's secret translate API, it's not meant to be used by robots, and often it won't work url = ( "http://translate.google.com/translate_a/t?client=t&text={}&hl=en&sl={}&tl={}" "&ie=UTF-8&oe=UTF-8&multires=1&otf=1&pc=1&trs=1&ssel=3&tsel=6&sc=1" .format(trans_safe, lang_from, lang_to)) trans_dict = Commons.load_url_json(url, [], True) translation_string = " ".join([x[0] for x in trans_dict[0]]) return event.create_response( "Translation: {}".format(translation_string))
def run(self, event): line_clean = event.command_args.strip().lower() regex_fluff = re.compile(r"\b(for|[io]n)\b") # Clear input fluff line_clean = regex_fluff.sub("", line_clean).strip() # Hunt for the days offset days_offset = 0 regex_now = re.compile(r"(now|current(ly)?|today)") regex_tomorrow = re.compile(r"(to|the\s+)morrow") regex_weekday = re.compile( r"\b(this\s+|next\s+|)(mo(n(day)?)?|tu(e(s(day)?)?)?|we(d(nesday)?)?|th(u(r(sday)?)?)?|" + r"fr(i(day)?)?|sa(t(urday)?)?|su(n(day)?)?)\b") regex_days = re.compile(r"(([0-9]+)\s*d(ays?)?)") regex_weeks = re.compile(r"(([0-9]+)\s*w(eeks?)?)") if regex_now.search(line_clean): days_offset = 0 line_clean = regex_now.sub("", line_clean).strip() elif regex_tomorrow.search(line_clean): days_offset = 1 line_clean = regex_tomorrow.sub("", line_clean).strip() elif regex_weekday.search(line_clean): match = regex_weekday.search(line_clean) current_weekday = datetime.date.today().weekday() specified_weekday = self.weekday_to_number(match.group(2)) days_offset = (specified_weekday - current_weekday) % 7 line_clean = regex_weekday.sub("", line_clean).strip() elif regex_days.search(line_clean): match = regex_days.search(line_clean) days_offset = int(match.group(2)) line_clean = regex_days.sub("", line_clean).strip() elif regex_weeks.search(line_clean): match = regex_weeks.search(line_clean) days_offset = 7 * int(match.group(2)) line_clean = regex_weeks.sub("", line_clean).strip() # Figure out if a user or city was specified user_data_parser = hallo.modules.user_data.UserDataParser() if line_clean == "": location_entry = user_data_parser.get_data_by_user_and_type( event.user, hallo.modules.user_data.WeatherLocationData) if location_entry is None: return event.create_response( "No location stored for this user. Please specify a location or " + 'store one with the "setup weather location data" function.' ) else: test_user = event.server.get_user_by_name(line_clean) if event.channel is not None and event.channel.is_user_in_channel( test_user): location_entry = user_data_parser.get_data_by_user_and_type( test_user, hallo.modules.user_data.WeatherLocationData) if location_entry is None: return event.create_response( "No location stored for this user. Please specify a location or " + 'store one with the "setup weather location data" function.' ) else: location_entry = hallo.modules.user_data.WeatherLocationData.create_from_input( event) # Get API response api_key = event.server.hallo.get_api_key("openweathermap") if api_key is None: return event.create_response( "No API key loaded for openweathermap.") url = ("https://api.openweathermap.org/data/2.5/forecast/daily{}" "&cnt=16&APPID={}".format(self.build_query(location_entry), api_key)) response = Commons.load_url_json(url) # Check API responded well if str(response["cod"]) != "200": return event.create_response("Location not recognised.") # Check that days is within bounds for API response days_available = len(response["list"]) if days_offset > days_available: return event.create_response( "I cannot predict the weather that far in the future. " + "I can't predict much further than 2 weeks.") # Format and return output city_name = response["city"]["name"] if days_offset == 0: today_main = response["list"][0]["weather"][0]["main"] today_desc = response["list"][0]["weather"][0]["description"] today_temp = response["list"][0]["temp"]["day"] - 273.15 today_humi = response["list"][0]["humidity"] today_spee = response["list"][0]["speed"] tomor_main = response["list"][1]["weather"][0]["main"] tomor_desc = response["list"][1]["weather"][0]["description"] tomor_temp = response["list"][1]["temp"]["day"] - 273.15 tomor_humi = response["list"][1]["humidity"] tomor_spee = response["list"][1]["speed"] dayaf_main = response["list"][2]["weather"][0]["main"] dayaf_desc = response["list"][2]["weather"][0]["description"] dayaf_temp = response["list"][2]["temp"]["day"] - 273.15 dayaf_humi = response["list"][2]["humidity"] dayaf_spee = response["list"][2]["speed"] output = ( "Weather in {} today will be {} ({}) " "Temp: {:.2f}C, Humidity: {}%, Wind speed: {}m/s. ".format( city_name, today_main, today_desc, today_temp, today_humi, today_spee, )) # Add tomorrow output output += "Tomorrow: {} ({}) {:.2f}C {}% {}m/s ".format( tomor_main, tomor_desc, tomor_temp, tomor_humi, tomor_spee) # Day after output output += "Day after: {} ({}) {:.2f}C {}% {}m/s.".format( dayaf_main, dayaf_desc, dayaf_temp, dayaf_humi, dayaf_spee) return event.create_response(output) response_weather = response["list"][days_offset] weather_main = response_weather["weather"][0]["main"] weather_desc = response_weather["weather"][0]["description"] weather_temp = response_weather["temp"]["day"] - 273.15 weather_humidity = response_weather["humidity"] weather_wind_speed = response_weather["speed"] output = ("Weather in {} {} will be {} ({}). " "Temp: {:.2f}C, Humidity: {}%, Wind speed: {}m/s".format( city_name, self.number_days(days_offset), weather_main, weather_desc, weather_temp, weather_humidity, weather_wind_speed, )) return event.create_response(output)
def current_state(self) -> List[Dict]: url = "https://www.reddit.com/r/{}/new.json".format(self.subreddit) results = Commons.load_url_json(url) return results["data"]["children"]
def _format_custom_sites( self, server: Server, channel: Optional[Channel], user: Optional[User], item: ElementTree.Element) -> Optional[EventMessage]: if "xkcd.com" in self.url: item_title = item.find("title").text item_link = item.find("link").text comic_number = item_link.strip("/").split("/")[-1] json_link = f"https://xkcd.com/{comic_number}/info.0.json" comic_json = Commons.load_url_json(json_link) alt_text = comic_json["alt"] output = f'Update on "{self.feed_title}" RSS feed. "{item_title}" {item_link}\nAlt text: {alt_text}' return EventMessage(server, channel, user, output, inbound=False) if "awoocomic" in self.feed_title: item_title = item.find("title").text if " - " in item_title: item_title = item_title.split(" - ")[0] item_link = item.find("link").text output = f'Update on "{self.feed_title}" RSS feed. "{item_title}" {item_link}' return EventMessage(server, channel, user, output, inbound=False) if "smbc-comics.com" in self.url: item_title = item.find("title").text item_link = item.find("link").text page_code = Commons.load_url_string(item_link) soup = BeautifulSoup(page_code, "html.parser") comic_img = soup.select_one("img#cc-comic") alt_text = comic_img["title"] after_comic_img = soup.select_one("#aftercomic img") return EventMessageWithPhoto( server, channel, user, f'Update on "{self.feed_title}" RSS feed. "{item_title}" {item_link}\nAlt text: {alt_text}', [comic_img["src"], after_comic_img["src"]], inbound=False) if "rss.app" in self.url: item_title = _get_item_title(item) item_link = get_rss_item_link(item) page_code = Commons.load_url_string(item_link) soup = BeautifulSoup(page_code, "html.parser") head_script = soup.select_one("head script") if head_script is None: return None url_regex = re.compile(r"var url = \"([^\"]+)\";", re.IGNORECASE) url_result = url_regex.search(head_script.text) if url_result is None: return None output = f'Update on "{self.feed_title}" RSS feed. "{item_title}" {url_result.group(1)}' return EventMessage(server, channel, user, output, inbound=False) if "nitter.net" in self.url: item_title = _get_item_title(item) item_link = get_rss_item_link(item).replace( "nitter.net", "twitter.com") # Construct output output = f'Update on "{self.feed_title}" RSS feed. "{item_title}" {item_link}' output_evt = EventMessage(server, channel, user, output, inbound=False) return output_evt return None
def current_state(self) -> List[Dict]: search = "{} order:-id".format(self.search) # Sort by id url = "https://e621.net/posts.json?tags={}&limit=50".format( urllib.parse.quote(search)) results = Commons.load_url_json(url) return results["posts"]