def DeleteRule(ad): ads = sf.OpenJson(name="ads") if ad in ads: ads.pop(ad, None) sf.SaveJson(name="ads", data=ads) return True else: return False
def DeleteChannel(key): channels = sf.OpenJson(name="channels") if key in channels: channels.pop(key, None) sf.SaveJson(name="channels", data=channels) return True else: return False
def run(self): settings = SharedFunctions.get_package_settings() if settings: path = settings.get("docs_path", None) if path and os.path.isdir(path): scraped_api = scrape_api(path) if scraped_api: SharedFunctions.write_resource_file( "The Subliming of Isaac.json", scraped_api) completions = generate_completions( scraped_api, settings.get("completions_scope", "source.lua")) if completions: SharedFunctions.write_resource_file( "The Subliming of Isaac.sublime-completions", completions) sublime.status_message( "Finished scraping Afterbirth+ API...") else: if not path: SharedFunctions.error_message( "'docs_path' setting is undefined.") else: SharedFunctions.error_message( "The value of the 'docs_path' setting is not a path to an existing directory." )
def handleInserts(self, dynamicDB): """ handleInserts returns a newly generated row to be inserted to the store. First, the function passes a set of matches generated using regex. If a key is in the store, then a message explaining this is printed out and a random key is generated instead. If the format is incorrect (not as INSERT [key] WITH VALUES (col=tag, col2=tag2, col3=tag3….), INSERT VALUES (col=tag, col2=tag2, col3=tag3…), a usage is printed out, and the current operation is abandoned. Keyword Arguments: dynamicDB -- the key value store maintained in the program Return values: self.insertedrow -- the row we are inserting """ key = '' values = {} equalMatches = self.matches parser = re.compile(r'[a-z-0-9*!@#$%^&~_.+{}:\'"]+', re.IGNORECASE) matches = parser.findall(" ".join(self.matches)) # This is if the user inputs in the format # INSERT [key] WITH VALUES (col=tag, col2=tag2, col3=tag3….) if matches[2].lower() == 'with' and matches[3].lower() == 'values' \ and len(matches) >= 5: # check if key already exists in the key value store key = matches[1].lower() if key in dynamicDB and dynamicDB[key]['isFree'] == 'false': print("Key already in key value store. Selecting new random " "key instead...\n") while key in dynamicDB: key = SharedFunctions.generateRandomKey() matches = SharedFunctions.spaceMatches(4, equalMatches) # This function parses the matches and generates new rows in the # proper format. values = self.generateNewRows(matches) # This is if the user inputs in the format # INSERT VALUES (col=tag, col2=tag2, col3=tag3…) elif matches[1].lower() == 'values' and len(matches) >= 3: matches = SharedFunctions.spaceMatches(2, equalMatches) while key in dynamicDB or key == '': key = SharedFunctions.generateRandomKey() values = self.generateNewRows(matches) else: print("Insert format is incorrect. Usage:\n INSERT [key] WITH " "VALUES (col=tag, col2=tag2...) \n INSERT VALUES (col=tag," " col2=tag2, col3=tag3...)") if values != {}: self.insertedRow = {key: {'isFree': 'false', 'data': values}} return self.insertedRow
def parse_enums(a_path): enums = {} contents = SharedFunctions.read_file(a_path) lines = contents.split("\n") if lines: enum_name = None enum_members = [] for line in lines: match = REGEX_ENUM_NAME.search(line) if match: if enum_name: enums[enum_name] = {APIKeyEnum.MEMBERS: enum_members} enum_members = [] enum_name = match.group(1) else: match = REGEX_ENUM_MEMBER.search(line) if match: member = {APIKeyEnum.NAME: match.group("name")} description_string = match.group("desc") if description_string: description_string = REGEX_HTML_TAG_REPLACER.sub( "", description_string) member[APIKeyEnum.DESCRIPTION] = description_string enum_members.append(member) if enum_members: enums[enum_name] = {APIKeyEnum.MEMBERS: enum_members} return enums return None
def main(): print("Starting up client") # Create a client socket at the same IP and port as receiver in order to send to the server serverIP = '192.168.192.1' serverPort = 5000 filename = os.getcwd() filename += '/angle.txt' running = True while running: # Open a socket to connect with server s = sf.createSocket(serverIP, serverPort, serverBool=False) # After connection is established, wait for input from Server task = s.recv(1024) if (task == b'GRC'): if not (GNUThread.is_set()): threading.Thread(target=music.main).start() GNUThread.set() elif (task == b'AoA'): AoA = readFile(filename) s.sendall(AoA.encode('utf-8')) elif (task == b'Shut down'): running = False music.exitThread.set() else: print("Got unknown command: ", task) s.close()
def GetChannels(): channels = sf.OpenJson(name="channels") list_of_channels = "" for channel in channels: list_of_channels = list_of_channels + str(channel) + "\n" return list_of_channels
def formatTell(targetNick, tell): timeSent = datetime.utcfromtimestamp(tell[u"sentAt"]) timeSinceTell = SharedFunctions.durationSecondsToText( (datetime.utcnow() - timeSent).seconds) return u"{recipient}: {message} (sent by {sender} on {timeSent} UTC; {timeSinceTell} ago)"\ .format(recipient=targetNick, message=tell[u"text"], sender=tell[u"sender"], timeSent=timeSent.isoformat(' '), timeSinceTell=timeSinceTell)
def execute(self, message): """ :type message: IrcMessage """ uptime = round(time.time() - message.bot.connectedAt) message.reply( u"I have been running for {}".format( SharedFunctions.durationSecondsToText(uptime)), "say")
def enable_plugin(self, pname): try: self.logger.debug("enabling plugin '%s'" % (pname)) try: self.pluginmods[pname] = __import__(pname) except Exception, e: self.logger.error("exception importing '%s' plugin:\n%s" % (pname, shared.indent(traceback.format_exc().strip(), 1))) else:
def execute(self, message): """ :type message: IrcMessage """ uptime = round(time.time() - message.bot.connectedAt) replytext = u"I have been running for {}".format(SharedFunctions.durationSecondsToText(uptime)) message.bot.say(message.source, replytext)
def handleDeletes(self, dynamicDB): """ handleDeletes handles all information relating to deletes, including processing input. handleDeletes passes either a key or a set of values to the function marking keys for deletion. The matches generated using regex are used for this purpose. If a key is not in the store, then a message explaining this is printed out. If the format is incorrect (not as DELETE [key], or DELETE VALUES (col=tag, col2=tag2, col3=tag3….) a usage is printed out, and the current operation is abandoned. Keyword Arguments: dynamicDB -- the key value store maintained in the program Return values: selectedKeys, the list of keys that we delete from the store. """ parser = re.compile(r'[a-z-0-9*!@#$%^&~_.+{}:\'"]+', re.IGNORECASE) matches = parser.findall(" ".join(self.matches)) # This is if the input is in the form DELETE VALUES (col=tag, col2=tag2...) if matches[1].lower() == 'values' and len(matches) >= 3: if " and " in " ".join(matches).lower() or " or " in " ".join( matches).lower(): matches = SharedFunctions.conjMatches(2, self.matches) self.selectedKeys += SharedFunctions.selectKeyswithAndOrs( matches, dynamicDB) else: matches = SharedFunctions.spaceMatches(2, self.matches) self.selectedKeys += SharedFunctions.findMatchingKeys( matches, dynamicDB) # This is if just a key was specified. elif len(matches) == 2: if matches[1].lower() in dynamicDB and dynamicDB[matches[1].lower()] \ ['isFree'] == 'false': self.selectedKeys.append(matches[1].lower()) else: print("The key is not in the store!") else: print("Delete format is incorrect. Usage:\n DELETE [key] " " \n DELETE VALUES (col=tag," " col2=tag2, col3=tag3...) \n DELETE VALUES (col=tag" " AND/OR col2=tag2 AND/OR col3=tag3...)") return self.selectedKeys
def formatNewTweetText(self, username, tweetData, tweetAge=None, addTweetAge=False): if addTweetAge: if not tweetAge: tweetAge = self.getTweetAge(tweetData['created_at']) tweetAge = SharedFunctions.durationSecondsToText( tweetAge.total_seconds()) tweetAge = ' ({} ago)'.format(tweetAge) else: tweetAge = '' tweetUrl = "http://twitter.com/_/status/{}".format( tweetData['id_str'] ) #Use _ instead of username to save some characters #Remove newlines formattedTweetText = tweetData['text'].replace('\n\n', '\n').replace( '\n', Constants.GREY_SEPARATOR) #Fix special characters (convert '&' to '&' for instance) formattedTweetText = HTMLParser.HTMLParser().unescape( formattedTweetText) #Remove the link to the photo at the end, but mention that there is one if 'media' in tweetData['entities']: for mediaItem in tweetData['entities']['media']: formattedTweetText = formattedTweetText.replace( mediaItem['url'], u'') formattedTweetText += u"(has {})".format(mediaItem['type']) #Add in all the text around the tweet now, so we get a better sense of message length formattedTweetText = u"{name}: {text}{age}{sep}{url}".format( name=SharedFunctions.makeTextBold(self.getDisplayName(username)), text=formattedTweetText, age=tweetAge, sep=Constants.GREY_SEPARATOR, url=tweetUrl) #Expand URLs (if it'd fit) if 'urls' in tweetData['entities']: for urldata in tweetData['entities']['urls']: if len(formattedTweetText) - len(urldata['url']) + len( urldata['expanded_url']) < 325: formattedTweetText = formattedTweetText.replace( urldata['url'], urldata['expanded_url']) return formattedTweetText
def enable_plugin(self, pname): try: self.logger.debug("enabling plugin '%s'" % (pname)) try: self.pluginmods[pname] = __import__(pname) except Exception, e: self.logger.error( "exception importing '%s' plugin:\n%s" % (pname, shared.indent(traceback.format_exc().strip(), 1))) else:
def generateWord(self, parameters=None): """Generate a word by putting letters together in semi-random order. Based on an old mIRC script of mine""" # Initial set-up vowels = ['a', 'e', 'i', 'o', 'u'] specialVowels = ['y'] consonants = [ 'b', 'c', 'd', 'f', 'g', 'h', 'k', 'l', 'm', 'n', 'p', 'r', 's', 't' ] specialConsonants = ['j', 'q', 'v', 'w', 'x', 'z'] newLetterFraction = 5 vowelChance = 50 #percent #Determine how many words we're going to have to generate repeats = 1 if parameters and len(parameters) > 0: repeats = SharedFunctions.parseInt(parameters[0], 1, 1, 25) words = [] for i in xrange(0, repeats): word = u"" currentVowelChance = vowelChance currentNewLetterFraction = newLetterFraction consonantCount = 0 while random.randint(0, currentNewLetterFraction) <= 6: if random.randint(1, 100) <= currentVowelChance: consonantCount = 0 #vowel. Check if we're going to add a special or normal vowel if random.randint(1, 100) <= 10: word += random.choice(specialVowels) currentVowelChance -= 30 else: word += random.choice(vowels) currentVowelChance -= 20 else: consonantCount += 1 #consonant, same deal if random.randint(1, 100) <= 25: word += random.choice(specialConsonants) currentVowelChance += 30 else: word += random.choice(consonants) currentVowelChance += 20 if consonantCount > 3: currentVowelChance = 100 currentNewLetterFraction += 1 #Enough letters added. Finish up word = word[0].upper() + word[1:] words.append(word) #Enough words generated, let's return the result return u", ".join(words)
def executeScheduledFunction(self): starttime = time.time() #First load all the stored tweet data, if it exists twitterInfoFilename = os.path.join(GlobalStore.scriptfolder, 'data', 'tweets', 'metadata.json') if os.path.exists(twitterInfoFilename): with open(twitterInfoFilename, 'r') as twitterInfoFile: storedInfo = json.load(twitterInfoFile) else: storedInfo = {} #Create the 'tweets' folder if it doesn't exist already, so we can create our files in there once we're done if not os.path.exists(os.path.dirname(twitterInfoFilename)): os.makedirs(os.path.dirname(twitterInfoFilename)) #Go through all the names we need to update for name, username in self.twitterUsernames.iteritems(): highestIdDownloaded = 0 if username not in storedInfo: storedInfo[username] = {'linecount': 0} elif "highestIdDownloaded" in storedInfo[username]: highestIdDownloaded = storedInfo[username][ 'highestIdDownloaded'] tweetResponse = SharedFunctions.downloadTweets( username, downloadNewerThanId=highestIdDownloaded) if not tweetResponse[0]: self.logError( "[STTip] Something went wrong while downloading new tweets for '{}', skipping" .format(username)) continue #If there aren't any tweets, stop here if len(tweetResponse[1]) == 0: continue tweets = tweetResponse[1] #Reverse tweets so they're from old to new. That way when we write them to file, the entire file will be old to new # Not necessary but neat tweets.reverse() #All tweets downloaded. Time to process them tweetfile = open( os.path.join(GlobalStore.scriptfolder, 'data', 'tweets', "{}.txt".format(username)), "a") for tweet in tweets: tweetfile.write(tweet['text'].replace('\n', ' ').encode( encoding='utf-8', errors='replace') + '\n') tweetfile.close() #Get the id of the last tweet in the list (the newest one), so we know where to start downloading from next time storedInfo[username]['highestIdDownloaded'] = tweets[-1]['id'] storedInfo[username]['linecount'] += len(tweets) #Save the stored info to disk too, for future lookups with open(twitterInfoFilename, 'w') as twitterFile: twitterFile.write(json.dumps(storedInfo)) self.logInfo( "[STtip] Updating tweets took {} seconds".format(time.time() - starttime))
def GetAdsRuleList(): ads = sf.OpenJson(name="ads") if ads["enable"] == 0: list_of_channels = "*WARNING!\nAdBlock disabled*\n\n" else: list_of_channels = "" for ad in ads: if ad != "enable": list_of_channels = list_of_channels + str(ad) + "\n" return list_of_channels
def ChangeEnableAds(isEnbale): ads = sf.OpenJson(name="ads") if (isEnbale == None): if ads["enable"] == 1: ads["enable"] = 0 ads_block_status = "disabled" else: ads["enable"] = 1 ads_block_status = "enabled" else: if isEnbale == True: ads["enable"] = 1 ads_block_status = "enabled" else: ads["enable"] = 0 ads_block_status = "disabled" sf.SaveJson(name="ads", data=ads) return ads_block_status
def load_FeatureMappingDictionary (self,fileaddress): if not GF.FILE_CheckFileExists (fileaddress): self.PROGRAM_Halt ("Cannot load FeatureMappingDictionary file:"+fileaddress); try: fhandle = open(fileaddress,"rb") self.__FeatureMappingDictionary = pickle.load(fhandle) fhandle.close() except Exception as E: self.PROGRAM_Halt ("Error loading FeatureMappingDictionary from file. Error:"+E.message); if not isinstance(self.__FeatureMappingDictionary ,dict): self.PROGRAM_Halt ("Error loading FeatureMappingDictionary from file. Error: Not a pickled python dictionary."); self.lp(["FeatureMappingDictionary is successfully loaded from file.", "file:"+fileaddress , "number of features:"+str(len(self.__FeatureMappingDictionary))])
def run(self): global API_LOADED if not API_LOADED: self.afterbirth_api = SharedFunctions.load_afterbirth_api() if not self.afterbirth_api: return API_LOADED = True items = [] for key, item in self.afterbirth_api.items(): items.append([key, "Browse dictionary", type(item)]) self.keys_so_far = [] self.items = self.sort_items(items) self.separate_types(self.items) self.window.show_quick_panel(self.items, self.on_select)
def reload_plugin(self, pname): if pname in self.pluginmods and pname in self.pluginobjs: try : self.logger.debug("reloading plugin '%s'" % (pname)) self.pluginmods[pname] = reload(sys.modules[pname]) self.pluginobjs[pname] = getattr(self.pluginmods[pname], pname)(self.global_config) except Exception, e: self.logger.error("exception reloading '%s' plugin:\n%s" % (pname, shared.indent(traceback.format_exc().strip(), 1))) self.disable_plugin(pname) else: if not self.pluginobjs[pname].required_config_loaded(): self.logger.error("plugin '%s' requires config section [%s] with parameters %s" % (pname, pname, self.pluginobjs[pname].required_config)) self.disable_plugin(pname) else: self.logger.info("reloaded plugin '%s'" % (pname))
def generateName(self, parameters=None): genderDict = None namecount = 1 #Determine if a specific gender name and/or number of names was requested if parameters: #Make sure parameters is a list, so we don't iterate over each letter in a string accidentally if not isinstance(parameters, (tuple, list)): parameters = [parameters] #Go through all parameters to see if they're either a gender specifier or a name count number for param in parameters: if self.isGenderParameter(param): genderDict = self.getGenderWords(param, False) else: try: namecount = int(param) # Limit the number of names namecount = max(namecount, 1) namecount = min(namecount, 10) except ValueError: pass #If no gender parameter was passed, pick a random one if not genderDict: genderDict = self.getGenderWords(None, False) names = [] for i in xrange(namecount): # First get a last name lastName = self.getRandomLine("LastNames.txt") #Get the right name for the provided gender if genderDict['gender'] == 'f': firstName = self.getRandomLine("FirstNamesFemale.txt") else: firstName = self.getRandomLine("FirstNamesMale.txt") #with a chance add a middle letter: if (parameters and "addletter" in parameters) or random.randint(1, 100) <= 15: names.append(u"{} {}. {}".format( firstName, self.getBasicOrSpecialLetter(50, 75).upper(), lastName)) else: names.append(u"{} {}".format(firstName, lastName)) return SharedFunctions.joinWithSeparator(names)
def getRandomLine(self, filename, filelocation=None): if not filelocation: filelocation = self.filesLocation filepath = os.path.abspath( os.path.join(GlobalStore.scriptfolder, filelocation, filename)) #Check if the provided file is in our 'generator' folder if not filepath.startswith(self.filesLocation): #Trying to get out of the 'generators' folder self.logWarning( "[Gen] User is trying to access files outside the 'generators' folder with filename '{}'" .format(filename)) return "[Access error]" line = SharedFunctions.getRandomLineFromFile(filepath) if not line: #The line function encountered an error, so it returned None # Since we expect a string, provide an empty one return "[File error]" return line
def execute(self, message): """ :type message: IrcMessage """ replytext = u"" #First just get anything new, if there is any output = subprocess.check_output(['git', 'pull']) if output.startswith("Already up-to-date."): replytext = u"No new updates" else: maxUpdatesToDisplay = 15 #New files, new updates! Check what they are output = subprocess.check_output( ['git', 'log', '--format=oneline']) outputLines = output.splitlines() commitMessages = [] linecount = 0 for line in outputLines: lineparts = line.split(" ", 1) #If we've reached a commit we've already mentioned, stop the whole thing if lineparts[0] == self.lastCommitHash: break linecount += 1 #Only show the last few commit messages, but keep counting lines regardless if len(commitMessages) < maxUpdatesToDisplay: commitMessages.append(lineparts[1]) if linecount == 1: replytext = u"One new commit: {}".format(commitMessages[0]) else: commitMessages.reverse( ) #Otherwise the messages are ordered new to old replytext = u"{:,} new commits: {}".format( linecount, SharedFunctions.joinWithSeparator(commitMessages)) if linecount > maxUpdatesToDisplay: replytext += u"; {:,} older ones".format( linecount - maxUpdatesToDisplay) #Set the last mentioned hash to the newest one self.lastCommitHash = outputLines[0].split(" ", 1)[0] message.reply(replytext, "say")
def reload_plugin(self, pname): if pname in self.pluginmods and pname in self.pluginobjs: try: self.logger.debug("reloading plugin '%s'" % (pname)) self.pluginmods[pname] = reload(sys.modules[pname]) self.pluginobjs[pname] = getattr(self.pluginmods[pname], pname)(self.global_config) except Exception, e: self.logger.error( "exception reloading '%s' plugin:\n%s" % (pname, shared.indent(traceback.format_exc().strip(), 1))) self.disable_plugin(pname) else: if not self.pluginobjs[pname].required_config_loaded(): self.logger.error( "plugin '%s' requires config section [%s] with parameters %s" % (pname, pname, self.pluginobjs[pname].required_config)) self.disable_plugin(pname) else: self.logger.info("reloaded plugin '%s'" % (pname))
def parse_namespace(a_path): contents = SharedFunctions.read_file(a_path) namespace_name = get_namespace_name(contents) lines = contents.split("\n") if lines: namespace_interface = {} namespace_functions = {} function = {} for line in lines: function_signature_match = REGEX_FUNCTION_SIGNATURE.search(line) if function_signature_match: if function: namespace_functions[function[APIKeyEnum.NAME]] = function del function[APIKeyEnum.NAME] function = {} function[APIKeyEnum.NAME] = function_signature_match.group( "name") returns = get_function_return_type(function_signature_match) if returns: function[APIKeyEnum.RETURNS] = returns parameters = get_function_parameters(function_signature_match) if parameters: function[APIKeyEnum.PARAMETERS] = parameters else: description_match = REGEX_DESCRIPTION.search(line) if description_match: description_string = description_match.group(1).strip() description_string = REGEX_HTML_TAG_REPLACER.sub( "", description_string) if function: function[APIKeyEnum.DESCRIPTION] = description_string if function: namespace_functions[function[APIKeyEnum.NAME]] = function del function[APIKeyEnum.NAME] if namespace_functions: namespace_interface[APIKeyEnum.FUNCTIONS] = namespace_functions if namespace_name and namespace_interface: return namespace_name, namespace_interface return None, None
def process_response(self, message): self.logger.debug("process_response called") for key in sorted(self.pluginobjs.keys()): if self.in_plugin_scope(message, key): self.logger.debug("plugin '%s' is in scope, processing" % key) if (hasattr(self.pluginobjs[key], 'process_response') and inspect.ismethod( self.pluginobjs[key].process_response)): try: self.pluginobjs[key].process_response(message) except Exception, e: self.logger.error( "exception in '%s' process_response():\n%s" % (key, shared.indent(traceback.format_exc().strip(), 1))) else: self.logger.error( "skipping plugin '%s', process_response not defined" % key) else: self.logger.debug("plugin '%s' is not in scope, SKIPPING" % key)
def execute(self, message): """ :type message: IrcMessage """ name = "" if message.messagePartsLength > 0: name = message.messageParts[0].lower() if name == 'random': name = random.choice(self.twitterUsernames.keys()) replytext = "" if name in self.twitterUsernames: replytext = SharedFunctions.getRandomLineFromTweetFile(self.twitterUsernames[name]) if not replytext.lower().startswith(name): replytext = u"{} tip: {}".format(name[0:1].upper() + name[1:], replytext) else: if name != "": replytext = "I don't know anybody by the name of '{}', sorry. ".format(name) replytext += "Type '{}{} <name>' to hear one of <name>'s tips, or use 'random' to have me pick a name for you. ".format(message.bot.factory.commandPrefix, message.trigger) replytext += "Available tip-givers: {}".format(", ".join(sorted(self.twitterUsernames.keys()))) replytext = replytext.encode('utf-8', 'replace') message.bot.say(message.source, replytext)
def getTweets(self, name='random', searchterm=None): name = name.lower() if name == 'random': name = random.choice(self.twitterUsernames.keys()) if name not in self.twitterUsernames: return (False, "I don't know anybody by the name of '{}', sorry. ".format( name)) tweetFileName = os.path.join( GlobalStore.scriptfolder, 'data', 'tweets', '{}.txt'.format(self.twitterUsernames[name])) if not os.path.exists(tweetFileName): self.executeScheduledFunction() return ( False, "I don't seem to have the tweets for '{}', sorry! I'll retrieve them right away, try again in a bit" .format(name)) tweets = SharedFunctions.getAllLinesFromFile(tweetFileName) if searchterm is not None: #Search terms provided! Go through all the tweets to find matches regex = None try: regex = re.compile(searchterm, re.IGNORECASE) except (re.error, SyntaxError): self.logWarning( "[STtip] '{}' is an invalid regular expression. Using it literally" .format(searchterm)) for i in xrange(0, len(tweets)): #Take a tweet from the start, and only put it back at the end if it matches the regex tweet = tweets.pop(0) if regex and regex.search(tweet) or searchterm in tweet: tweets.append(tweet) if len(tweets) == 0: return (False, "Sorry, no tweets matching your search were found") else: return (True, tweets)
except Exception, e: self.logger.error("exception importing '%s' plugin:\n%s" % (pname, shared.indent(traceback.format_exc().strip(), 1))) else: try: self.pluginobjs[pname] = getattr(self.pluginmods[pname], pname)(self.global_config) except AttributeError, e: self.logger.error("plugin '%s' could not be loaded (%s)" % (pname, e)) self.disable_plugin(pname) else: if not self.pluginobjs[pname].required_config_loaded(): self.logger.error("plugin '%s' requires config section '%s' with parameters %s" % (pname, pname, self.pluginobjs[pname].required_config)) self.disable_plugin(pname) else: self.logger.info("enabled plugin '%s'" % (pname)) except Exception, e: self.logger.error("exception when initializing plugin '%s':\n%s" % (pname, shared.indent(traceback.format_exc().strip(), 1))) self.disable_plugin(pname) def disable_plugin(self, pname): self.logger.info("disabling plugin '%s'" % (pname)) if (pname in sys.modules): del(sys.modules[pname]) if (pname in self.pluginmods): del(self.pluginmods[pname]) if (pname in self.pluginobjs): del(self.pluginobjs[pname]) if self.file_watcher != None: self.file_watcher.remove_item("%s.py" % pname) if self.auto_delete_class_files == True: self.logger.debug(" testing for class file : %s" % (os.path.join("%s" % self.dname, "%s$py.class" % pname))) if (os.path.isfile(os.path.join("%s" % self.dname, "%s$py.class" % pname))):
def updateTwitterMessages(self): self.isUpdating = True for name, username in self.twitterUsernames.iteritems(): #print "Updating stored twitter messages for '{}'".format(username) SharedFunctions.downloadNewTweets(username) self.isUpdating = False
def handleUpdates(self, dynamicDB): """ handleUpdates handles anything in the form UPDATE [key] WTIH VALUES (col=tag, col2=tag2, etc). Any other format causes the file to abandon the modification. Through each (col=tag, col2=tag2, etc), we change tag type's value to be the new value the user passed in. This set of changed values is then set to be the data with the value associated with the key. Other irregularities causing abandonment of modification include the type not being in the set of accepted types, and having tags/values without the other. Keyword Argument: dynamicDB -- the key value store maintained in the program Return Values: self.updatedRow, self.replacedRow -- the tuple of a new row to update with, and the row to be replaced """ error = False usage = "Usage:\n UPDATE [key] WITH VALUES (col=tag, col2=tag2...) \n" parser = re.compile(r'[a-z-0-9*!@#$%^&~_.+{}:\'"]+', re.IGNORECASE) matches = parser.findall(" ".join(self.matches)) if matches[2].lower() == 'with' and matches[3].lower() == 'values' \ and len(matches) >= 5: # check if key already exists in the key value store key = matches[1].lower() if key in dynamicDB and dynamicDB[key]['isFree'] == 'false': allVals = dynamicDB[key]['data'] replaced = json.dumps(dynamicDB[key]['data']) matches = SharedFunctions.spaceMatches(4, self.matches) # Check that all types/columns are matched correctly if len(matches) % 2 == 1: print( "Column types must be associated with column values!" " \n ", usage) error = True # Check that either the column we want to modify # is in the row or is in the set. Otherwise, # this is an invalid input. if not error: for colIndex in range(0, len(matches), 2): if matches[colIndex] not in self.typesSet: self.typesSet.add(matches[colIndex]) allVals[matches[colIndex]] = matches[colIndex + 1] # Now set the data of the row to be the modified values. dynamicDB[key]['data'] = allVals self.updatedRow = { key: { 'isFree': 'false', 'data': allVals } } self.replacedRow = { key: { 'isFree': 'false', 'data': ast.literal_eval(replaced) } } else: print("The key is not in the store!") else: print("UPDATE format is incorrect. \n", usage) return self.updatedRow, self.replacedRow
def execute(self, message): """ :type message: IrcMessage """ starttime = time.time() urlmatch = re.search(r"(https?://\S+)", message.message) if not urlmatch: print "(Title Retrieve module triggered, but no url found)" else: url = urlmatch.group() while url.endswith(")") or url.endswith('/'): url = url[:-1] try: title = None #There's some special cases for often used pages. #Twitch! if url.count('twitch.tv') > 0: #Twitch channelmatches = re.search("https?://w*\.twitch\.tv/([^/]+)", url) if channelmatches: channel = channelmatches.group(1) channeldata = {} isChannelOnline = False twitchheaders = {'Accept': 'application/vnd.twitchtv.v2+json'} twitchStreamPage = requests.get(u"https://api.twitch.tv/kraken/streams/" + channel, headers=twitchheaders) streamdata = json.loads(twitchStreamPage.text.encode('utf-8')) print "stream data:" #for key, value in streamdata.iteritems(): # print u" {}: {}".format(key, value) if 'stream' in streamdata and streamdata['stream'] is not None: # print "using Stream API call" channeldata = streamdata['stream']['channel'] isChannelOnline = True elif 'error' not in streamdata: # print "Using Channels API call" twitchChannelPage = requests.get(u"https://api.twitch.tv/kraken/channels/" + channel, headers=twitchheaders) channeldata = json.loads(twitchChannelPage.text.encode('utf-8')) if len(channeldata) > 0: # print "Channel data:" # for key, value in channeldata.iteritems(): # print u" {}: {}".format(key, value) title = u"Twitch - {username}" if channeldata['game'] is not None: title += u", playing {game}" if channeldata['mature']: title += u" [Mature]" if isChannelOnline: title += u" (Online)" else: title += u" (Offline)" title = title.format(username=channeldata['display_name'], game=channeldata['game']) elif url.count('youtube.com') > 0 or url.count('youtu.be') > 0: #Youtube! if not GlobalStore.commandhandler.apikeys.has_section('google') or not GlobalStore.commandhandler.apikeys.has_option('google', 'apikey'): print "[ERROR] Google API key not found!" else: #First we need to determine the video ID from something like this: http://www.youtube.com/watch?v=jmAKXADLcxY or http://youtu.be/jmAKXADLcxY videoId = u"" if url.count('youtu.be') > 0: videoId = url[url.rfind('/')+1:] else: videoIdMatch = re.search('.+v=([^&#\Z]+)', url) if videoIdMatch: videoId = videoIdMatch.group(1) if videoId != u"": googleUrl = "https://www.googleapis.com/youtube/v3/videos" params = {'part': 'statistics,snippet,contentDetails', 'id': videoId, 'key': GlobalStore.commandhandler.apikeys.get('google', 'apikey')} params['fields'] = 'items/snippet(title,description),items/contentDetails/duration,items/statistics(viewCount,likeCount,dislikeCount)' googleJson = json.loads(requests.get(googleUrl, params=params).text.encode('utf-8')) if 'error' in googleJson: print u"ERROR with Google requests. {}: {}. [{}]".format(googleJson['error']['code'], googleJson['error']['message'], json.dumps(googleJson).replace('\n',' ')) elif 'items' not in googleJson or len(googleJson['items']) != 1: print u"Unexpected reply from Google API: {}".format(json.dumps(googleJson).replace('\n', ' ')) else: videoData = googleJson['items'][0] durationtimes = SharedFunctions.parseIsoDate(videoData['contentDetails']['duration']) durationstring = u"" if durationtimes['day'] > 0: durationstring += u"{day} d, " if durationtimes['hour'] > 0: durationstring += u"{hour:02}:" durationstring += u"{minute:02}:{second:02}" durationstring = durationstring.format(**durationtimes) #Check if there's a description description = videoData['snippet']['description'].strip() if description == u"": description = u"<No description>" #likePercentage = int(videoData['statistics']['likeCount']) / int(videoData['statistics']['dislikeCount']) title = u"{title} [{duration}, {viewcount:,} views]: {description}".format(title=videoData['snippet']['title'].strip(), duration=durationstring, viewcount=int(videoData['statistics']['viewCount']), description=description) elif url.count('imgur.com') > 0: if not GlobalStore.commandhandler.apikeys.has_section('imgur') or not GlobalStore.commandhandler.apikeys.has_option('imgur', 'clientid'): print "[ERROR] Imgur API key not found!" else: imageIdMatches = re.search('imgur\.com/([^.]+)', url, re.IGNORECASE) if imageIdMatches is None: print "No Imgur ID found in '{}'".format(url) else: imageId = imageIdMatches.group(1) print "[imgur] initial id: '{}'".format(imageId) isGallery = False imageType = 'image' if '/' in imageId: if 'gallery' in imageId: imageType = 'gallery/album' isGallery = True imageId = imageId[imageId.rfind('/')+1:] print "Modified Imgur id: '{}'".format(imageId) headers = {"Authorization": "Client-ID "+GlobalStore.commandhandler.apikeys.get('imgur','clientid')} imgurUrl = "https://api.imgur.com/3/{type}/{id}".format(type=imageType, id=imageId) print "url: {}".format(url) imgurDataPage = requests.get(imgurUrl, headers=headers) print "page: {}".format(imgurDataPage.text.encode('utf-8')) imgdata = json.loads(imgurDataPage.text.encode('utf-8')) if imgdata['success'] != True or imgdata['status'] != 200: print "[UrlTitle Imgur] Error while retrieving image data: {}".format(imgurDataPage.text.encode('utf-8')) else: #print imgurData imgdata = imgdata['data'] if imgdata['title'] is None: imgdata['title'] = u"No Title" title = u"{imgdata[title]} (" if isGallery: title += u"{imgdata[images_count]} images" else: imgFilesize = imgdata['size'] / 1024.0 #Split into two lines because we're only formatting imgFilesize here, and otherwise it errors out on imgdata title += u"{imgdata[width]:,}x{imgdata[height]:,}" title += u" {imgFilesize:,.0f} kb".format(imgFilesize=imgFilesize) title += u" {imgdata[views]:,} views" title += u")" if 'animated' in imgdata and imgdata['animated'] == True: title += u" (Animated)" if 'nsfw' in imgdata and imgdata['nsfw'] == True: title += u" (NSFW!)" title = title.format(imgdata=imgdata) elif url.count('twitter.com') > 0: tweetMatches = re.search('twitter.com/(?P<name>[^/]+)(?:/status/(?P<id>[^/]+).*)?', url) if not tweetMatches: print "No twitter matches found in '{}'".format(url) else: print "Tweetmatches: {}".format(tweetMatches.groupdict()) #print "[UrlTitleFinder Twitter] Username is '{}', tweet id is '{}'".format(tweetMatches.group('name'), tweetMatches.group('id')) if not GlobalStore.commandhandler.apikeys.has_section('twitter') or not GlobalStore.commandhandler.apikeys.has_option('twitter', 'tokentype') or not GlobalStore.commandhandler.apikeys.has_option('twitter', 'token'): print "[UrlTitleFinder] ERROR: Twitter API token info not found!" else: headers = {"Authorization": "{} {}".format(GlobalStore.commandhandler.apikeys.get('twitter', 'tokentype'), GlobalStore.commandhandler.apikeys.get('twitter', 'token'))} if 'id' in tweetMatches.groupdict() and tweetMatches.group('id') is not None: #Specific tweet twitterUrl = "https://api.twitter.com/1.1/statuses/show.json?id={id}".format(id=tweetMatches.group('id')) twitterDataPage = requests.get(twitterUrl, headers=headers) twitterdata = json.loads(twitterDataPage.text.encode('utf-8')) print twitterdata title = u"@{username} ({name}): {text} [{timestamp}]".format(username=twitterdata['user']['screen_name'], name=twitterdata['user']['name'], text=twitterdata['text'], timestamp=twitterdata['created_at']) else: #User page twitterUrl = u"https://api.twitter.com/1.1/users/show.json?screen_name={name}".format(name=tweetMatches.group('name')) twitterDataPage = requests.get(twitterUrl, headers=headers) twitterdata = json.loads(twitterDataPage.text.encode('utf-8')) title = u"{name} (@{screen_name}): {description} ({statuses_count:,} tweets posted, {followers_count:,} followers, following {friends_count:,})" if 'verified' in twitterdata and twitterdata['verified'] == True: title += u". Verified account" title = title.format(**twitterdata) #If nothing has been found so far, just display whatever is between the <title> tags if title is None: print "Using default title search" #Check here and not later because sites like Imgur can have .jpg URLs and we still want to check those extensionsToIgnore = ['.jpg', '.jpeg', '.gif', '.png', '.bmp', '.avi', '.wav', '.mp3', '.zip', '.rar', '.7z', '.pdf'] for ext in extensionsToIgnore: if url.endswith(ext): print "Skipping title search, ignorable extension" return titlematch = re.search(r'<title.*?>(.+)</title>', requests.get(url).text) if not titlematch: print "No title found on page '{}'".format(url) else: title = titlematch.group(1).strip() except requests.exceptions.ConnectionError as error: print "(A connection error occurred while trying to retrieve '{}': {})".format(url, error) #Finally, display the result of all the hard work, if there was any if title is not None: title = title.replace('\n', ' ') #Convert weird characters like ' back into normal ones like ' title = HTMLParser.HTMLParser().unescape(title) if len(title) > 250: title = title[:250] + "..." print "[urlTitleFinder] Time taken: {} seconds".format(time.time() - starttime) message.bot.say(message.source, u"Title: {}".format(title))
def execute(self, message): """ :type message: IrcMessage.IrcMessage """ #Making this work in PMs requires either a different storage method than "server channel", # or a better lookup method than 'if channel in bot.channelUserList' if message.isPrivateMessage: message.reply( "I'm sorry, this module doesn't work in private messages (yet?). Poke my owner if you want it added!", "say") return if message.messagePartsLength == 0: message.reply( "Please add a parameter. Use 'list' to see which streamers I'm watching, or 'add' to add one of your own", "say") return parameter = message.messageParts[0].lower() if (parameter == "add" or parameter == "live" ) and 'twitch' not in GlobalStore.commandhandler.apikeys: message.reply( "Oh, I'm sorry, I seem to have lost my access key to Twitch. Inform my owner(s), they can probably find it for me!", "say") return #All options need this for lookup serverChannelString = "{} {}".format(message.bot.serverfolder, message.source) if parameter == "list": followedStreamers = [] for streamername, streamerdata in self.watchedStreamersData.iteritems( ): #Get the streamer's nickname, if any if self.doesStreamerHaveNickname(streamername, serverChannelString): streamername = u"{}({})".format( streamerdata['nicknames'][serverChannelString], streamername) #Check to see if this streamer is followed in the channel the command came from if serverChannelString in streamerdata['followChannels']: followedStreamers.append(streamername) elif serverChannelString in streamerdata['reportChannels']: followedStreamers.append(streamername + "[a]") if len(followedStreamers) == 0: message.reply( u"I'm not watching anybody for this channel. You can add streamers for me to watch with the 'add' parameter", "say") else: followedStreamers.sort() message.reply( u"I'm watching {:,} streamer(s): ".format( len(followedStreamers)) + u", ".join(followedStreamers), "say") elif parameter == "add" or parameter == "follow": if message.messagePartsLength < 2: # At the risk of ruining the joke, the '26 hours' is a reference to Star Trek DS9, not a mistake message.reply( u"Watch which streamer? I'm on Twitch 26 hours a day so you're going to have to be more specific", "say") else: streamername = message.messageParts[1].lower() streamerdata = self.watchedStreamersData.get( streamername, None) #Check if they're already being followed if streamerdata and (serverChannelString in streamerdata['followChannels'] or serverChannelString in streamerdata['reportChannels']): message.reply( u"I'm already following {}. Seems you're not the only who likes them!" .format(streamername), "say") return #If we don't have data on this streamer yet, retrieve it if not streamerdata: try: r = requests.get( "https://api.twitch.tv/kraken/users", params={ "client_id": GlobalStore.commandhandler.apikeys['twitch'], "api_version": 5, "login": streamername }, timeout=10.0) except requests.exceptions.Timeout: message.reply( u"Apparently Twitch is distracted by its own streams, because it's too slow to respond. Try again in a bit?" ) return twitchData = r.json() if 'error' in twitchData: self.logError( u"[TwitchWatch] Something went wrong when trying to find the clientID of user '{}'. {}" .format( streamername, twitchData['message'] if 'message' in twitchData else "No error message provided")) message.reply( u"Sorry, something went wrong when trying to look up info on that user. Please try again in a bit, maybe it'll go better then", "say") return if twitchData['_total'] != 1: message.reply( u"That... doesn't match anybody on file. Twitch's file, I mean. Maybe you misspelled the streamer's name?", "say") return #No errors, got the streamer data. Store it self.watchedStreamersData[streamername] = { 'clientId': twitchData['users'][0]['_id'], 'hasBeenReportedLive': False, 'followChannels': [], 'reportChannels': [] } #Update the convenience variable too since that's 'None' now streamerdata = self.watchedStreamersData[streamername] #We know we have the basics for the streamer set up, at least, or more if they were already in our files # Add the current server-channel pair in there too shouldAutoReport = (message.messagePartsLength >= 3 and message.messageParts[-1].lower() == "autoreport") channelType = 'reportChannels' if shouldAutoReport else 'followChannels' streamerdata[channelType].append(serverChannelString) self.saveWatchedStreamerData() replytext = u"All right, I'll keep an eye on {}".format( streamername) if shouldAutoReport: replytext += u", and I'll shout in here when they go live" message.reply(replytext, "say") elif parameter == "remove": if message.messagePartsLength < 2: message.reply( "I'm not going to remove all the streamers I watch! Please be more specific", "say") else: streamername = message.messageParts[1].lower() streamerdata = self.watchedStreamersData.get( streamername, None) if not streamerdata: message.reply( u"I don't even know who {} is. So task completed, I guess?" .format(streamername), "say") return #Determine if the streamer is followed or autoreported channelType = None if serverChannelString in streamerdata['followChannels']: channelType = 'followChannels' elif serverChannelString in streamerdata['reportChannels']: channelType = 'reportChannels' if not channelType: message.reply( u"I'm already not watching {}. You're welcome!".format( streamername), "say") return #The streamer is being followed. Remove it from the channel type list it was in streamerdata[channelType].remove(serverChannelString) #If there's no channel watching this streamer anymore, remove it entirely if len(streamerdata['followChannels']) == 0 and len( streamerdata['reportChannels']) == 0: del self.watchedStreamersData[streamername] self.saveWatchedStreamerData() message.reply( u"Ok, I'll stop watching {} then".format(streamername), "say") elif parameter == "toggle" or parameter == "autoreport": #Toggle auto-reporting if message.messagePartsLength < 2: message.reply( u"I can't toggle autoreporting for everybody, that'd get confusing! Please provide a streamer name too", "say") else: streamername = message.messageParts[1].lower() streamerdata = self.watchedStreamersData.get( streamername, None) if not streamerdata or (serverChannelString not in streamerdata['followChannels'] and serverChannelString not in streamerdata['reportChannels']): message.reply( u"I'm not following {}, so I can't toggle autoreporting for them either. Maybe you made a typo, or you forgot to add them with 'add'?", "say") else: if serverChannelString in streamerdata['followChannels']: streamerdata['followChannels'].remove( serverChannelString) streamerdata['reportChannels'].append( serverChannelString) message.reply( u"All right, I'll shout in here when {} goes live. You'll never miss a stream of them again!" .format(streamername), "say") else: streamerdata['reportChannels'].remove( serverChannelString) streamerdata['followChannels'].append( serverChannelString) message.reply( u"Ok, I'll stop mentioning every time {} goes live. But don't blame me if you miss a stream of them!" .format(streamername), "say") self.saveWatchedStreamerData() elif parameter == "setnick": if message.messagePartsLength < 3: message.reply( u"I'm not going to make up a nick! Please add a nickname too", "say") return #Set a nickname for a streamer, since their nick in the channel and their Twitch nick don't always match streamername = message.messageParts[1].lower() streamerdata = self.watchedStreamersData.get(streamername, None) if not streamerdata or (serverChannelString not in streamerdata['followChannels'] and serverChannelString not in streamerdata['reportChannels']): message.reply( u"I don't even follow {}, so setting a nickname is slightly premature. Please introduce me to them first with the 'add' parameter" .format(streamername), "say") return if 'nicknames' not in streamerdata: streamerdata['nicknames'] = {} streamerdata['nicknames'][ serverChannelString] = message.messageParts[2] self.saveWatchedStreamerData() message.reply( u"All right, I'll call {} '{}' from now on".format( streamername, message.messageParts[2]), "say") elif parameter == "removenick": if message.messagePartsLength == 1: message.reply( "I'm not going to delete everybody's nickname! Add the name of the streamer whose nick you want removed", "say") return streamername = message.messageParts[1].lower() streamerdata = self.watchedStreamersData.get(streamername, None) #Maybe they entered the nickname instead of the streamer name. Check if we can find it if not streamerdata: for streamername, streamerdata in self.watchedStreamersData.iteritems( ): if self.doesStreamerHaveNickname( streamername, serverChannelString ) and streamername == streamerdata['nicknames'][ serverChannelString].lower(): #Found a match. If we break now, streamername and streamerdata will be set correctly break else: message.reply( u"I'm sorry, I don't know who {} is. Maybe you made a typo, or you forgot to add the streamer with the 'add' parameter?" .format(message.messageParts[1]), "say") return if not self.doesStreamerHaveNickname(streamername, serverChannelString): message.reply( u"I don't have a nickname stored for {}, so mission accomplished, I guess?" .format(streamername), "say") return message.reply( u"Ok, I removed the nickname '{}', I'll call them by their Twitch username '{}'" .format(streamerdata['nicknames'][serverChannelString], streamername)) del streamerdata['nicknames'][serverChannelString] if len(streamerdata['nicknames']) == 0: del streamerdata['nicknames'] self.saveWatchedStreamerData() elif parameter == "live": streamerIdsToCheck = [] for streamername, streamerdata in self.watchedStreamersData.iteritems( ): if serverChannelString in streamerdata[ 'followChannels'] or serverChannelString in streamerdata[ 'reportChannels']: streamerIdsToCheck.append(streamerdata['clientId']) isSuccess, result = self.retrieveStreamDataForIds( streamerIdsToCheck) if not isSuccess: self.logError( u"[TwitchWatch] An error occurred during a manual live check. " + result) message.reply( u"I'm sorry, I wasn't able to retrieve data from Twitch. It's probably entirely their fault, not mine though. Try again in a little while", "say") return if len(result) == 0: message.reply( "Nobody's live, it seems. Time for videogames and/or random streams, I guess!", "say") else: reportStrings = [] shouldUseShortReportString = len( result ) >= 4 #Use shorter report strings if there's 4 or more people live for streamername, streamerdata in result.iteritems(): displayname = streamerdata['channel']['display_name'] if self.doesStreamerHaveNickname(streamername, serverChannelString): displayname = self.watchedStreamersData[streamername][ 'nicknames'][serverChannelString] if shouldUseShortReportString: reportStrings.append(u"{} ({})".format( displayname, streamerdata['channel']['url'])) else: reportStrings.append( u"{displaynameBold}: {status} [{game}] ({url})". format( displaynameBold=SharedFunctions.makeTextBold( displayname), **streamerdata['channel'])) message.reply(SharedFunctions.joinWithSeparator(reportStrings), "say") else: message.reply( "I don't know what to do with the parameter '{}', sorry. Maybe you made a typo? Or you could try (re)reading the help text" .format(parameter), "say")
def executeScheduledFunction(self): #Go through all our stored streamers, and see if we need to report online status somewhere # If we do, check if they're actually online streamerIdsToCheck = { } #Store as a clientId-to-streamername dict to facilitate reverse lookup in self.streamerdata later for streamername, data in self.watchedStreamersData.iteritems(): if len(data['reportChannels']) > 0: #Ok, store that we need to check whether this stream is online or not # Because doing the check one time for all streamers at once is far more efficient streamerIdsToCheck[data['clientId']] = streamername if len(streamerIdsToCheck) == 0: #Nothing to do! Let's stop now return isSuccess, result = self.retrieveStreamDataForIds( streamerIdsToCheck.keys()) if not isSuccess: self.logError( u"[TwitchWatch] An error occurred during the scheduled live check. " + result) return channelMessages = { } #key is string with server-channel, separated by a space. Value is a list of tuples with data on streams that are live #We don't want to report a stream that's been live for a while already, like if it has been live when the bot was offline and it only just got started # So create a timestamp for at least one update cycle in the past, and if the stream was live before that, don't mention it updated tooOldTimestamp = datetime.datetime.utcnow() - datetime.timedelta( seconds=self.scheduledFunctionTime * 1.5) tooOldTimestamp = datetime.datetime.strftime(tooOldTimestamp, "%Y-%m-%dT%H:%M%SZ") for streamername, streamdata in result.iteritems(): channeldata = streamdata.pop('channel') #Remove this stream from the list of streamers we need to check, so afterwards we can verify which streams we didn't get data on del streamerIdsToCheck[str(channeldata['_id'])] # Only store data for channels that have gone live since our last check if self.watchedStreamersData[streamername]['hasBeenReportedLive']: continue #We will report that this stream is live, so store that we'll have done that self.watchedStreamersData[streamername][ 'hasBeenReportedLive'] = True #If the stream has been online for a while, longer than our update cycle, we must've missed it going online # No use reporting on it now, because that could f.i. cause an autoreport avalanche when the bot is just started up if streamdata['created_at'] < tooOldTimestamp: continue #Store current stream description data for each name, so we can check afterwards which channels we need to send it to # Don't store it as a string, so we can shorten it if one channel would get a lot of live streamer reports for serverChannelString in self.watchedStreamersData[streamername][ 'reportChannels']: #Add this stream's data to the channel's reporting output if serverChannelString not in channelMessages: channelMessages[serverChannelString] = [] displayname = channeldata['display_name'] if self.doesStreamerHaveNickname(streamername, serverChannelString): displayname = self.watchedStreamersData[streamername][ 'nicknames'][serverChannelString] channelMessages[serverChannelString].append( (displayname, channeldata['status'], channeldata['game'], channeldata['url'])) #Now we've got all the stream data we need! # First set the offline streams to offline for clientId, streamername in streamerIdsToCheck.iteritems(): self.watchedStreamersData[streamername][ 'hasBeenReportedLive'] = False self.saveWatchedStreamerData() #And now report each online stream to each channel that wants it for serverChannelString, streamdatalist in channelMessages.iteritems(): server, channel = serverChannelString.rsplit(" ", 1) #First check if we're even in the server and channel we need to report to if server not in GlobalStore.bothandler.bots or channel not in GlobalStore.bothandler.bots[ server].channelsUserList: continue reportStrings = [] #If we have a lot of live streamers to report, keep it short. Otherwise, we can be a bit more verbose if len(streamdatalist) >= 4: #A lot of live streamers to report, keep it short. Just the streamer name and the URL for streamdata in streamdatalist: reportStrings.append(u"{0} ({3})".format(*streamdata)) else: #Only a few streamers live, we can be a bit more verbose for streamdata in streamdatalist: reportStrings.append( u"{streamernameBolded}: {1} [{2}] ({3})".format( streamernameBolded=SharedFunctions.makeTextBold( streamdata[0]), *streamdata)) #Now make the bot say it GlobalStore.bothandler.bots[server].sendMessage( channel.encode("utf8"), u"Streamer{} went live: ".format( u's' if len(reportStrings) > 1 else u'') + SharedFunctions.joinWithSeparator(reportStrings), "say")
def searchWolfram(self, query, podsToParse=5, cleanUpText=True, includeUrl=True): replystring = "" wolframResult = self.fetchWolframData(query, podsToParse) #First check if the query succeeded if not wolframResult[0]: return wolframResult[1] try: xml = ElementTree.fromstring(wolframResult[1]) except ElementTree.ParseError: self.logError("[Wolfram] Unexpected reply, invalid XML:") self.logError(wolframResult[1]) return "Wow, that's some weird data. I don't know what to do with this, sorry. Try reformulating your query, or just try again and see what happens" if xml.attrib['error'] != 'false': replystring = "Sorry, an error occurred. Tell my owner(s) to check the error log" self.logError( "[Wolfram] An error occurred for the search query '{}'. Reply:" .format(query, wolframResult[1])) elif xml.attrib['success'] != 'true': replystring = "No results found, sorry" #Most likely no results were found. See if there are suggestions for search improvements if xml.find('didyoumeans') is not None: didyoumeans = xml.find('didyoumeans').findall('didyoumean') suggestions = [] for didyoumean in didyoumeans: if didyoumean.attrib['level'] != 'low': suggestion = didyoumean.text.replace('\n', '').strip() if len(suggestion) > 0: suggestions.append(suggestion.encode('utf-8')) if len(suggestions) > 0: replystring += ". Did you perhaps mean: {}".format( ", ".join(suggestions)) else: pods = xml.findall('pod') resultFound = False for pod in pods[1:]: if pod.attrib['title'] == "Input": continue for subpod in pod.findall('subpod'): text = subpod.find('plaintext').text #If there's no text, or if it's a dumb result ('3 euros' returns coinweight, which is an image), skip this pod if text is None or text.startswith('\n'): continue if cleanUpText: text = text.replace('\n', Constants.GREY_SEPARATOR).strip() #If there's no text in this pod (for instance if it's just an image) if len(text) == 0: continue replystring += text resultFound = True break if resultFound: break if not resultFound: replystring += "Sorry, results were either images, irrelevant or non-existent" if cleanUpText: replystring = re.sub(' {2,}', ' ', replystring) #Make sure we don't spam the channel, keep message length limited # Shortened URL will be about 25 characters, keep that in mind messageLengthLimit = 270 if includeUrl else 300 if len(replystring) > messageLengthLimit: replystring = replystring[:messageLengthLimit] + '[...]' #Add the search url if includeUrl: searchUrl = "http://www.wolframalpha.com/input/?i={}".format( urllib.quote_plus(query)) #If the message would get too long, shorten the result URL if len(replystring) + len(searchUrl) > 300: searchUrl = SharedFunctions.shortenUrl(searchUrl)[1] replystring += "{}{}".format(Constants.GREY_SEPARATOR, searchUrl) return replystring
def execute(self, message): """ :type message: IrcMessage """ if message.messagePartsLength == 0: message.reply( "There's far too many boardgames to just pick a random one! Please provide a search query", "say") return #Since the API's search is a bit crap and doesn't sort properly, scrape the web search page try: request = requests.get("https://boardgamegeek.com/geeksearch.php", params={ "action": "search", "objecttype": "boardgame", "q": message.message }, timeout=10.0) except requests.exceptions.Timeout: message.reply( "Either your search query was too extensive for BoardGameGeek, or they're distracted by a boardgame. Either way, they took too long to respond, sorry" ) return if request.status_code != 200: message.reply( "Something seems to have gone wrong. At BoardGameGeek, I mean, because I never make mistaks. Try again in a little while", "say") return page = BeautifulSoup(request.content, "html.parser") #Get the first result row row = page.find(class_="collection_objectname") if row is None: message.reply( "BoardGameGeek doesn't think a game called '{}' exists. Maybe you made a typo?" .format(message.message), "say") return #Then get the link to the board game page from that, to get the game ID from the URL # Format of the url is '/boardgame/[ID]/[NAME] gameId = row.find('a')['href'].split('/', 3)[2] #Now query the API to get info on this game try: request = requests.get( "https://www.boardgamegeek.com/xmlapi2/thing", params={'id': gameId}, timeout=10.0) except requests.exceptions.Timeout: message.reply( "I know you need some patience for boardgames, but not for info about boardgames. BoardGameGeek took too long to respond, sorry" ) return try: xml = ElementTree.fromstring(request.content) except ElementTree.ParseError: message.reply( "I don't know how to read the data returned by BoardGameGeek, which is weird because I'm coded very well. Try again in a little while, see if it works then?", "say") return item = xml.find('item') if item is None: #Specific check otherwise Python prints a warning message.reply( "I'm sorry, I didn't find any games called '{}'. Did you make a typo? Or did you just invent a new game?!" .format(message.message), "say") print request.content return replytext = u"{} ({} players, {} minutes, {}): ".format( SharedFunctions.makeTextBold(item.find('name').attrib['value']), self.getValueRangeDescription(item, 'minplayers', 'maxplayers'), self.getValueRangeDescription(item, 'minplaytime', 'maxplaytime'), item.find('yearpublished').attrib['value']) url = u" (http://boardgamegeek.com/boardgame/{})".format(gameId) #Fit in as much of the description as we can lengthLeft = 295 - len(replytext) - len(url) description = HTMLParser.HTMLParser().unescape( item.find('description').text) #Some descriptions start with a disclaimer that it's from the publisher, remove that to save space if description.startswith(u"Game description from the publisher" ) or description.startswith( u"From the manufacturer's website"): description = description.split('\n', 1)[1].lstrip() #Remove newlines description = description.replace('\n', ' ') #Slice it so it fits in the available space, cut at the last word separator description = description[:lengthLeft] description = description[:description.rfind(' ')] + u'[...]' #Show the result replytext += description + url message.reply(replytext, "say")
def execute(self, message): """ :type message: IrcMessage """ replytext = u"" gamenames = {} page = None title = u"" isWeekly = (message.message.lower() == 'weekly' or message.message.lower() == 'week') url = "http://www.humblebundle.com/" if isWeekly: url += 'weekly' pagetext = requests.get(url).content #BeautifulSoup doesn't handle non-standard newlines very well, it inserts </br> at the very end, messing up searching. Prevent that pagetext = pagetext.replace("<br>", "<br />") #Sometimes important tags are in comments, remove those ##pagetext = pagetext.replace("<!--", ">").replace("-->", ">") page = BeautifulSoup(pagetext) #Title is formatted like "Humble Weekly Sale: [company] (pay what...)" if isWeekly: titlematches = re.search("(.+) \(", page.title.string) if not titlematches: #print "No title found in '{}'".format(page.title.string) title = "Humble Weekly Sale" else: title = "The {}".format(titlematches.group(1)) else: titlematches = re.search("(.*) \(", page.title.string) if not titlematches: #print "No title found in '{}'".format(page.title.string) title = "The current Humble Bundle" else: title = "The " + titlematches.group(1) gamecontainers = page.find_all(class_="game-boxes") if len(gamecontainers) > 0: for gamecontainer in gamecontainers: #Don't show the soundtracks if 'class' in gamecontainer.attrs and ('soundtracks' in gamecontainer.attrs['class'] or 'charity' in gamecontainer.attrs['class']): continue gameEntries = gamecontainer.find_all('li', recursive=False) for gameEntry in gameEntries: gameEntryLinks = gameEntry.find_all('a', recursive=False) if not gameEntryLinks: continue #Sometimes there are multiple games in each li-tag, get all the games for gameEntryLink in gameEntryLinks: gamename = u"" gameEntryLinkTexts = gameEntryLink.find_all(text=True, recursive=False) for gameEntryLinkText in gameEntryLinkTexts: #Only add it if there is something to add, and if the current text isn't a comment if len(gameEntryLinkText.strip()) > 0 and 'Comment' not in str(type(gameEntryLinkText)): gamename += ' ' + gameEntryLinkText.strip() else: print "Skipping '{}' because it's too short or a comment (from LI '{}')".format(gameEntryLinkText, gameEntry.attrs['class']) if gamename == u"": gameTitle = gameEntryLink.find(class_="game-info-title") if gameTitle: gamename = gameTitle.text.strip() if gamename == u"": gameTitle = gameEntryLink.find(class_="item-title") if gameTitle: gamename = gameTitle.text.strip() for smallSubtitle in gameEntry.find_all(class_='small-subtitle'): gamename += u' ' + smallSubtitle.text.strip() if ('class' in gameEntry.attrs and 'bta' in gameEntry['class']) or gameEntry.find(alt="lock") or gameEntry.find(class_='hb-lock green'): print "'{}' is a BTA game!".format(gamename) #gamename += u" [BTA]" if 'BTA' not in gamenames: gamenames['BTA'] = [] gamenames['BTA'].append(gamename) elif gameEntry.find(class_="game-price"): price = u"" priceMatch = re.search('\$ ?(\d+(\.\d+)?)', gameEntry.find(class_="game-price").text) if priceMatch: try: price = float(priceMatch.group(1)) except: price = priceMatch.group(1) else: price = gameEntry.find(class_="game-price").text #gamename += u" [{}]".format(price) if price not in gamenames: gamenames[price] = [] gamenames[price].append(gamename) elif gameEntry.find(class_='hb-lock') and gameEntry.find(class_='blue'): #gamename += u" [fixed price]" if 'Fixed price' not in gamenames: gamenames['Fixed price'] = [] gamenames['Fixed price'].append(gamename) else: if 'PWYW' not in gamenames: gamenames['PWYW'] = [] gamenames['PWYW'].append(gamename) #if gamename != u"": # gamenames.append(gamename) #No game containers found. This means it's probably a Mobile bundle, with a different layout else: gametitles = page.find_all(class_="item-title") for gametitle in gametitles: #Skip the entry advertising more games if 'class' in gametitle.parent.attrs and 'bta-teaser' in gametitle.parent.attrs['class']: continue gamename = gametitle.text.strip() if gametitle.find(class_='green'): #gamename += u" [BTA]" if 'BTA' not in gamenames: gamenames['BTA'] = [] gamenames['BTA'].append(gamename) #gamenames.append(gamename) #Totals aren't shown on the site immediately, but are edited into the page with Javascript. Get info from there totalMoney = -1.0 contributors = -1 avgPrice = -1.0 timeLeft = u"" for scriptElement in page.find_all('script'): script = scriptElement.text if script.count("'initial_stats_data':") > 0: #This script element contains the initial data match = re.search("'initial_stats_data':(.+),", script) if match is None: print "Expected to find initial values, but failed!" print script else: data = json.loads(match.group(1)) if 'rawtotal' in data: totalMoney = data['rawtotal'] else: print "Sales data found, but total amount is missing!" print data if 'numberofcontributions' in data and 'total' in data['numberofcontributions']: contributors = int(data['numberofcontributions']['total']) else: print "Contributor data not found!" print data if totalMoney > -1.0 and contributors > -1: avgPrice = totalMoney / contributors timeLeftMatch = re.search('var timing = \{"end": (\d+)\};', script) if timeLeftMatch: timeLeft = SharedFunctions.durationSecondsToText(int(timeLeftMatch.group(1)) - time.time()) break if totalMoney == -1.0 or contributors == -1 or avgPrice == -1.0: replytext = "Sorry, the data could not be retrieved. This is either because the site is down, or because of some weird bug. Please try again in a little while" else: replytext = u"{title} has an average price of ${avgPrice:.2f} and raised ${totalMoney:,} from {contributors:,} people." gamelist = u"" if timeLeft != u"": replytext += u" It will end in {timeLeft}." #If we didn't find any games, pretend like nothing's wrong if len(gamenames) > 0: replytext += u" It contains {gamelist}" #Make sure the cheapest games are in the front if 'PWYW' in gamenames: gamelist += '{}.'.format("; ".join(gamenames['PWYW'])) gamenames.pop('PWYW') for pricelevel in sorted(gamenames.keys()): pricelevelText = str(pricelevel) if isinstance(pricelevel, (int, float)): pricelevelText = '${}'.format(pricelevel) gamelist += ' {pricelevel}: {games}.'.format(pricelevel=pricelevelText, games="; ".join(gamenames[pricelevel])) replytext = replytext.format(title=title, avgPrice=round(avgPrice, 2), totalMoney=round(totalMoney, 2), contributors=contributors, timeLeft=timeLeft, gamelist=gamelist) message.bot.say(message.source, replytext)
import os import sys import tarfile from scipy import ndimage import random from random import shuffle import tensorflow as tf import SharedFunctions #import modules files = glob.glob("/Users/Gidonka/Documents/Programming/NYU/MachineLearning/IndianMovies/IMFDB_Final/*/*/*/*.jpg", recursive=True) #files = filenames of image files labels = glob.glob("/Users/Gidonka/Documents/Programming/NYU/MachineLearning/IndianMovies/IMFDB_Final/*/*/*.txt", recursive=True) #labels = filenames of text files containing labels filenames_and_labels, train_dataset, valid_dataset, test_dataset = SharedFunctions.setup(files, labels) #get entire dataset as well as individual datasets from setup function image_size = 32 num_labels = 7 batch_size = 128 #define some variables tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, 3)) #create placeholder for dataset tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) #create placeholder for labels sess = tf.InteractiveSession() #launch session
def process_response(self, message): self.logger.debug("process_response called") for key in sorted(self.pluginobjs.keys()): if self.in_plugin_scope(message, key): self.logger.debug("plugin '%s' is in scope, processing" % key) if (hasattr(self.pluginobjs[key], 'process_response') and inspect.ismethod(self.pluginobjs[key].process_response)): try: self.pluginobjs[key].process_response(message) except Exception, e: self.logger.error("exception in '%s' process_response():\n%s" % (key, shared.indent(traceback.format_exc().strip(), 1))) else: self.logger.error("skipping plugin '%s', process_response not defined" % key) else: self.logger.debug("plugin '%s' is not in scope, SKIPPING" % key)