def _debian_vlist_fetcher(self, pkg, dist, reverse=False): """Parser for the madison API at https://qa.debian.org/madison.php.""" # This arch value implies 'all' (architecture-independent packages) # and 'source' (source packages), in order to prevent misleading # "Not found" errors. archs = self.registryValue('archs') + ['source', 'all'] arg = {'package': pkg, 'table': dist, 'a': ','.join(set(archs))} url = 'https://qa.debian.org/madison.php?text=on&' + urlencode(arg) log.debug("PkgInfo: Using url %s for debian_vlist_fetcher", url) d = OrderedDict() fd = utils.web.getUrlFd(url) for line in fd.readlines(): L = line.decode("utf-8").split("|") name, version, release, archs = map(str.strip, L) d[release] = (version, archs) if d: if reverse: # *sigh*... I wish there was a better way to do this d = OrderedDict(reversed(tuple(d.items()))) if self.registryValue('verbose'): items = ["{name} \x02({version} [{archs}])\x02".format(name=k, version=v[0], archs=v[1]) for (k, v) in d.items()] else: items = ["{name} \x02({version})\x02".format(name=k, version=v[0]) for (k, v) in d.items()] s = format('Found %n: %L', (len(d), 'result'), items) return s else: log.debug("PkgInfo: No results found for URL %s", url)
def _getlatlng(self, location): api_key = self.registryValue("mapsAPIkey") location = utils.web.urlquote(location) url = ("https://maps.googleapis.com/maps/api/geocode/json?" "address=%s&sensor=false&key=%s" % (location, api_key)) # try and fetch url try: response = utils.web.getUrl(url, headers=HEADERS) except utils.web.Error as e: log.debug(str(e)) # wrap in a big try/except try: result = json.loads(response.decode()) if result["status"] == "OK": lat = str(result["results"][0]["geometry"]["location"]["lat"]) lng = str(result["results"][0]["geometry"]["location"]["lng"]) place = result["results"][0]["formatted_address"] ll = "%s,%s" % (lat, lng) # lat+long into a single string. return {"place": place, "ll": ll} else: self.log.info( "ERROR: _getlatlng: status result NOT ok. Result: {0}". format(result)) except Exception as e: self.log.info("ERROR: _getlatlng: {0}".format(e))
def do_http_msg(self, handler, headers, msg): log.debug("headers: {}".format(headers)) log.debug("text: {}".format(msg)) params = msg important_fields = { 'key': params.get('key', None), 'channel': params.get('channel', None), 'text': params.get('msg', None) } if important_fields['channel'] is None or important_fields['text'] is None or important_fields['key'] is None: missing_fields = list(filter(lambda x: important_fields[x] is None, important_fields.keys())) handler.send_response(403) handler.send_header("Content-Type", "application/json") handler.end_headers() handler.wfile.write(bytes(json.dumps({"success": False, "msg": "Missing field(s).", "fields": missing_fields}), "utf-8")) elif important_fields['key'] == self.registryValue("sendingKey"): self.send_msg(important_fields['channel'], important_fields['text']) handler.send_response(200) handler.send_header("Content-Type", "application/json") handler.end_headers() handler.wfile.write(bytes(json.dumps({"success": True, "msg": "Thanks!"}), "utf-8")) else: handler.send_response(403) handler.send_header("Content-Type", "application/json") handler.end_headers() handler.wfile.write(bytes(json.dumps({"success": False, "msg": "Invalid sendingKey"}), "utf-8"))
def __init__(self, plugin_name, filename, addressing_mode=DEFAULT_MODE, case_sensitive=False): """ Loads the existing database, creating a new one in memory if none exists. """ self.db = {} self._plugin_name = plugin_name self.filename = conf.supybot.directories.data.dirize(filename) self.case_sensitive = case_sensitive self.addressing_mode = addressing_mode try: with open(self.filename, 'rb') as f: self.db = pickle.load(f) except Exception as e: log.debug('%s: Unable to load database, creating ' 'a new one: %s', self._plugin_name, e) else: if not case_sensitive: for key, val in self.db.copy().items(): if not key.islower(): log.debug('%s: case-shifting key %s to %s', self._plugin_name, key, key.lower()) self.db[key.lower()] = val del self.db[key]
def _query(self, function, **params): # filter out empty params params = {key: value for key, value in params.iteritems() if value} log.debug('RfK._query: %s' % repr(params)) if self.registryValue('httpProxy'): opener = urllib2.build_opener( urllib2.ProxyHandler({'http': self.registryValue('httpProxy')})) else: opener = urllib2.build_opener() request_url = '%s%s?key=%s&%s' % ( self.registryValue('queryURL'), function, self.registryValue('queryPass'), urllib.urlencode(params) ) try: response = opener.open(request_url) except IOError, e: log.error('RfK._query: %s' % repr(e)) return None
def requestURL(self, url): # Use a needsRetry flag in case we catch a login failure outside of the SSL exception we seem to always get needsRetry = False response = None try: response = self.session.get(url, verify=True) logger.debug("Request to " + url + " returned code " + str(response.status_code)) needsRetry = self.responseRequiresAuthentication(response) except Exception as e: # If this is an SSL error, we may be being redirected to the login page logger.info("Caught exception on " + url + " request." + str(e)) needsRetry = True if needsRetry: logger.info("Logging in...") response = self.login() if response != None and not self.responseRequiresAuthentication( response): logger.info("Request returned " + str(response.status_code) + " status code") return response return None
def parse(self, pkg, dist, archs, codenames='', suite='', reverse=False, verbose=False): """Parser for the madison API at https://qa.debian.org/madison.php.""" # This arch value implies 'all' (architecture-independent packages) # and 'source' (source packages), in order to prevent misleading # "Not found" errors. self.arg = {'package': pkg, 'table': dist, 'a': archs, 'c': codenames, 's': suite} self.arg = urlencode(self.arg) url = 'https://qa.debian.org/madison.php?text=on&' + self.arg log.debug("PkgInfo: Using url %s for 'vlist' command", url) d = OrderedDict() fd = utils.web.getUrlFd(url) for line in fd.readlines(): L = line.decode("utf-8").split("|") try: L = map(unicode.strip, L) except: L = map(str.strip, L) name, version, release, archs = L d[release] = (version, archs) if d: if reverse: # *sigh*... I wish there was a better way to do this d = OrderedDict(reversed(tuple(d.items()))) if verbose: items = ["{name} \x02({version} [{archs}])\x02".format(name=k, version=v[0], archs=v[1]) for (k, v) in d.items()] else: items = ["{name} \x02({version})\x02".format(name=k, version=v[0]) for (k, v) in d.items()] s = format('Found %n: %L', (len(d), 'result'), items) return s else: log.debug("PkgInfo: No results found for URL %s", url)
def requestURL(self, url): # Use a needsRetry flag in case we catch a login failure outside of the SSL exception we seem to always get needsRetry = False response = None try: response = self.session.get(url, verify=True) logger.debug("Request to " + url + " returned code " + str(response.status_code)) needsRetry = self.responseRequiresAuthentication(response) except Exception as e: # If this is an SSL error, we may be being redirected to the login page logger.info("Caught exception on " + url + " request." + str(e)) needsRetry = True if needsRetry: logger.info("Logging in...") response = self.login() if response != None and not self.responseRequiresAuthentication(response): logger.info("Request returned " + str(response.status_code) + " status code") return response return None
def getCSV(self): data = None try: day = datetime.date.today().strftime("%m-%d-%Y") url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{0}.csv".format( day) r = requests.get(url, timeout=10) r.raise_for_status() except ( requests.exceptions.RequestException, requests.exceptions.HTTPError, ) as e: log.debug("Corona: error retrieving data for today: {0}".format(e)) try: day = datetime.date.today() - datetime.timedelta(days=1) day = day.strftime("%m-%d-%Y") url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{0}.csv".format( day) r = requests.get(url, timeout=10) r.raise_for_status() except ( requests.exceptions.RequestException, requests.exceptions.HTTPError, ) as e: log.debug( "Corona: error retrieving data for yesterday: {0}".format( e)) else: data = csv.DictReader(r.iter_lines(decode_unicode=True)) else: data = csv.DictReader(r.iter_lines(decode_unicode=True)) return data
def initialize_imgur_client(self, channel): """ Check if imgur client id or secret are set, and if so initialize imgur API client """ if self.imgur_client is None: imgur_client_id = self.registryValue("imgurClientID") imgur_client_secret = self.registryValue("imgurClientSecret") imgur_handler_enabled = self.registryValue("imgurHandlerEnabled", channel=channel) if imgur_handler_enabled and imgur_client_id and imgur_client_secret: log.debug("SpiffyTitles: enabling imgur handler") # Initialize API client try: from imgurpython import ImgurClient from imgurpython.helpers.error import ImgurClientError try: self.imgur_client = ImgurClient(imgur_client_id, imgur_client_secret) except ImgurClientError as e: log.error("SpiffyTitles: imgur client error: %s" % (e.error_message)) except ImportError as e: log.error("SpiffyTitles ImportError: %s" % str(e)) else: log.debug("SpiffyTitles: imgur handler disabled or empty client id/secret")
def _translateQuery(self, function, parameters={}): if self.registryValue("appId") == "": log.error("Translate: Set your appId and restart the plugin") return log.debug("Translate.query: %s" % (repr(parameters))) if self.registryValue("httpProxy") != "": opener = urllib2.build_opener(urllib2.ProxyHandler({"http": self.registryValue("httpProxy")})) else: opener = urllib2.build_opener() response = opener.open( self.registryValue("queryURL") + function + "?" + "appId=" + self.registryValue("appId") + "&" + urllib.urlencode(parameters) ) try: data = json.loads(json.dumps(response.read())) data = json.loads(data[1:]) log.debug("Translate.reply: %s" % repr(data)) return data except: log.error("Translate.query error") return None
def __call__(self, irc, msg, args, state): try: super(optional, self).__call__(irc, msg, args, state) except (callbacks.ArgumentError, callbacks.Error), e: log.debug('Got %s, returning default.', utils.exnToString(e)) state.errored = False setDefault(state, self.default)
def _radioQuery(self, function, parameters={}): parameters['w'] = function log.debug('Radio.query: %s' % (repr(parameters))) if (self.registryValue('httpProxy') != ''): opener = urllib2.build_opener( urllib2.ProxyHandler({'http': self.registryValue('httpProxy')})) else: opener = urllib2.build_opener() response = opener.open(self.registryValue('queryURL') + '?'+ 'apikey=' + self.registryValue('queryPass') + '&' + urllib.urlencode(parameters)) data = json.loads(response.read()) # teddy cannot into integer :3 if ('listener' in data): for stream in data['listener']: stream['c'] = int(stream['c']) def convertInteger(d, key): if ((key in d) and (d[key] != None)): d[key] = int(d[key]) for key in ['trackid', 'showbegin', 'showend', 'showid', 'djid']: convertInteger(data, key) log.debug('Radio.reply: %s' % repr(data)) return(data)
def dosearch(self, query, channel): apikey = self.registryValue("developerKey") safe_search = self.registryValue("safeSearch", channel) sort_order = self.registryValue("sortOrder", channel) video_id = None opts = { "q": query, "part": "snippet", "maxResults": "1", "order": sort_order, "key": apikey, "safeSearch": safe_search, "type": "video", } api_url = "https://www.googleapis.com/youtube/v3/search?{0}".format( utils.web.urlencode(opts) ) try: log.debug("YouTube: requesting %s" % (api_url)) request = utils.web.getUrl(api_url).decode() response = json.loads(request) video_id = response["items"][0]["id"]["videoId"] except: log.error( "YouTube: Error retrieving data from API: %s" % request.content.decode() ) return video_id
def _translateQuery(self, function, parameters={}): if (self.registryValue('appId') == ''): log.error('Translate: Set your appId and restart the plugin') return log.debug('Translate.query: %s' % (repr(parameters))) if (self.registryValue('httpProxy') != ''): opener = urllib2.build_opener( urllib2.ProxyHandler({'http': self.registryValue('httpProxy')})) else: opener = urllib2.build_opener() response = opener.open(self.registryValue('queryURL') + function + '?'+ 'appId=' + self.registryValue('appId') + '&' + urllib.urlencode(parameters)) try: data = json.loads(json.dumps(response.read())) data = json.loads(data[1:]) log.debug('Translate.reply: %s' % repr(data)) return(data) except: log.error('Translate.query error') return(None)
def _ddgurl(text, region=None, safeSearch=None): # DuckDuckGo has a 'lite' site free of unparseable JavaScript # elements, so we'll use that to our advantage! url = "https://lite.duckduckgo.com/lite?" params = {"q": text} if region: params["kl"] = region if safeSearch: if safeSearch == "active": params["kp"] = 1 elif safeSearch == "moderate": params["kp"] = -1 elif safeSearch == "off": params["kp"] = -2 url += urlencode(params) log.debug("DDG: Using URL %s for search %s", url, text) real_url, data = utils.web.getUrlTargetAndContent(url) data = data.decode("utf-8") parser = DDGHTMLParser() parser.feed(data) # Remove "sponsored link" results return (url, real_url, parser.results)
def doPrivmsg(self, irc, msg): (channel, message) = msg.args if callbacks.addressed(irc.nick, msg) or ircmsgs.isCtcp( msg) or not irc.isChannel(channel) or not self.registryValue( 'enable', channel): return if msg.nick.lower() in self.registryValue('ignoreNicks', channel): log.debug("Cobe: nick %s in ignoreNicks for %s" % (msg.nick, channel)) return if ircmsgs.isAction(msg): # If the message was an action...we'll learn it anyways! message = ircmsgs.unAction(msg) if irc.nick.lower() in message.lower(): # Were we addressed in the channel? probability = self.registryValue('probabilityWhenAddressed', channel) else: # Okay, we were not addressed, but what's the probability we should reply? probability = self.registryValue('probability', channel) #if self.registryValue('stripNicks'): # removenicks = '|'.join(item + '\W.*?\s' for item in irc.state.channels[channel].users) # text = re.sub(r'' + removenicks + '', 'MAGIC_NICK', text) self._learn(irc, msg, channel, message, probability) # Now we can pass this to our learn function!
class ChannelUserDB(ChannelUserDictionary): def __init__(self, filename): ChannelUserDictionary.__init__(self) self.filename = filename try: fd = file(self.filename) except EnvironmentError, e: log.warning('Couldn\'t open %s: %s.', self.filename, e) return reader = csv.reader(fd) try: lineno = 0 for t in reader: lineno += 1 try: channel = t.pop(0) id = t.pop(0) try: id = int(id) except ValueError: # We'll skip over this so, say, nicks can be kept here. pass v = self.deserialize(channel, id, t) self[channel, id] = v except Exception, e: log.warning('Invalid line #%s in %s.', lineno, self.__class__.__name__) log.debug('Exception: %s', utils.exnToString(e)) except Exception, e: # This catches exceptions from csv.reader. log.warning('Invalid line #%s in %s.', lineno, self.__class__.__name__) log.debug('Exception: %s', utils.exnToString(e))
def dosearch(self, irc, channel, text): google = None ddg = None match = None if self.registryValue("google", channel) > 0: google = irc.getCallback("google") if not google: log.error( "IMDb: Error: Google search enabled but plugin not loaded." ) if self.registryValue("ddg", channel) > 0: ddg = irc.getCallback("ddg") if not ddg: log.error( "IMDb: Error: DDG search enabled but plugin not loaded.") if not google and not ddg: log.error("IMDb: Google and DDG plugins not loaded.") return query = "site:www.imdb.com/title/ %s" % text pattern = re.compile(r"https?://www.imdb.com/title/tt\d+/$") for i in range(1, 3): if match: break if google and self.registryValue("google", channel) == i: try: results = google.decode( google.search(query, irc.network, channel)) for r in results: try: match = re.search(pattern, r["url"]) except TypeError: match = re.search(pattern, r.link) if match: log.debug( "IMDb: found link using Google search: %s" % match.group(0)) break except: pass elif ddg and self.registryValue("ddg", channel) == i: try: results = ddg.search_core( query, channel_context=channel, max_results=10, show_snippet=False, ) for r in results: match = re.search(pattern, r[2]) if match: log.debug("IMDb: found link using DDG search %s" % match.group(0)) break except: pass if match: return match.group(0) else: return
def stopticker(self, irc, msg, args): self.stop = True while self._threads: t = self._threads.pop() log.debug("waiting for %r to finish", t.name) t.join() log.debug("%r exited", t.name) irc.reply("Stopped monitoring.")
def makeIrcsDie(): """Kills Ircs.""" log.info("Killing Irc objects.") for irc in ircs[:]: if not irc.zombie: irc.die() else: log.debug("Not killing %s, it's already a zombie.", irc)
def makeIrcsDie(): """Kills Ircs.""" log.info('Killing Irc objects.') for irc in ircs[:]: if not irc.zombie: irc.die() else: log.debug('Not killing %s, it\'s already a zombie.', irc)
def _loadPlugins(self, irc): self.log.info('Loading plugins (connecting to %s).', irc.network) alwaysLoadImportant = conf.supybot.plugins.alwaysLoadImportant() important = conf.supybot.commands.defaultPlugins.importantPlugins() for (name, value) in conf.supybot.plugins.getValues(fullNames=False): if irc.getCallback(name) is None: load = value() if not load and name in important: if alwaysLoadImportant: s = '%s is configured not to be loaded, but is being '\ 'loaded anyway because ' \ 'supybot.plugins.alwaysLoadImportant is True.' self.log.warning(s, name) load = True if load: # We don't load plugins that don't start with a capital # letter. if name[0].isupper() and not irc.getCallback(name): # This is debug because each log logs its beginning. self.log.debug('Loading %s.', name) try: m = plugin.loadPluginModule(name, ignoreDeprecation=True) plugin.loadPluginClass(irc, m) except callbacks.Error as e: # This is just an error message. log.warning(str(e)) except plugins.NoSuitableDatabase as e: s = 'Failed to load %s: no suitable database(%s).' % ( name, e) log.warning(s) except ImportError as e: e = str(e) if e.endswith(name): s = 'Failed to load {0}: No plugin named {0} exists.'.format( utils.str.dqrepr(name)) elif "No module named 'config'" in e: s = ( "Failed to load %s: This plugin may be incompatible " "with your current Python version. If this error is appearing " "with stock Supybot plugins, remove the stock plugins directory " "(usually ~/Limnoria/plugins) from 'config directories.plugins'." % name) else: s = 'Failed to load %s: import error (%s).' % ( name, e) log.warning(s) except Exception as e: log.exception('Failed to load %s:', name) else: # Let's import the module so configuration is preserved. try: _ = plugin.loadPluginModule(name) except Exception as e: log.debug('Attempted to load %s to preserve its ' 'configuration, but load failed: %s', name, e) world.starting = False
def handler_imgur_image(self, url, info, channel): """ Handles retrieving information about images from the imgur API. Used for both direct images and imgur.com/some_image_id_here type links, as they're both single images. """ self.initialize_imgur_client(channel) from imgurpython.helpers.error import ImgurClientRateLimitError from imgurpython.helpers.error import ImgurClientError title = None if self.imgur_client: """ If there is a period in the path, it's a direct link to an image. If not, then it's a imgur.com/some_image_id_here type link """ if "." in info.path: path = info.path.lstrip("/") image_id = path.split(".")[0] else: image_id = info.path.lstrip("/") if self.is_valid_imgur_id(image_id): log.debug("SpiffyTitles: found image id %s" % (image_id)) try: image = self.imgur_client.get_image(image_id) if image: imgur_template = Template(self.registryValue("imgurTemplate", channel=channel)) readable_file_size = self.get_readable_file_size(image.size) compiled_template = imgur_template.render({ "title": image.title, "type": image.type, "nsfw": image.nsfw, "width": image.width, "height": image.height, "view_count": "{:,}".format(image.views), "file_size": readable_file_size, "section": image.section }) title = compiled_template else: log.error("SpiffyTitles: imgur API returned unexpected results!") except ImgurClientRateLimitError as e: log.error("SpiffyTitles: imgur rate limit error: %s" % (e.error_message)) except ImgurClientError as e: log.error("SpiffyTitles: imgur client error: %s" % (e.error_message)) else: log.error("SpiffyTitles: error retrieving image id for %s" % (url)) if title is not None: return title else: return self.handler_default(url, channel)
def _loadPlugins(self, irc): self.log.debug('Loading plugins (connecting to %s).', irc.network) alwaysLoadImportant = conf.supybot.plugins.alwaysLoadImportant() important = conf.supybot.commands.defaultPlugins.importantPlugins() for (name, value) in conf.supybot.plugins.getValues(fullNames=False): if irc.getCallback(name) is None: load = value() if not load and name in important: if alwaysLoadImportant: s = '%s is configured not to be loaded, but is being '\ 'loaded anyway because ' \ 'supybot.plugins.alwaysLoadImportant is True.' self.log.warning(s, name) load = True if load: # We don't load plugins that don't start with a capital # letter. if name[0].isupper() and not irc.getCallback(name): # This is debug because each log logs its beginning. self.log.debug('Loading %s.', name) try: m = plugin.loadPluginModule(name, ignoreDeprecation=True) plugin.loadPluginClass(irc, m) except callbacks.Error as e: # This is just an error message. log.warning(str(e)) except plugins.NoSuitableDatabase as e: s = 'Failed to load %s: no suitable database(%s).' % ( name, e) log.warning(s) except ImportError as e: e = str(e) if e.endswith(name): s = 'Failed to load {0}: No plugin named {0} exists.'.format( utils.str.dqrepr(name)) elif "No module named 'config'" in e: s = ( "Failed to load %s: This plugin may be incompatible " "with your current Python version. If this error is appearing " "with stock Supybot plugins, remove the stock plugins directory " "(usually ~/Limnoria/plugins) from 'config directories.plugins'." % name) else: s = 'Failed to load %s: import error (%s).' % ( name, e) log.warning(s) except Exception as e: log.exception('Failed to load %s:', name) else: # Let's import the module so configuration is preserved. try: _ = plugin.loadPluginModule(name) except Exception as e: log.debug( 'Attempted to load %s to preserve its ' 'configuration, but load failed: %s', name, e) world.starting = False
def __init__(self, *args, **kwargs): self.db = {} try: with open(filename, 'rb') as f: self.db = pickle.load(f) except Exception as e: log.debug( 'LastFM: Unable to load database, creating ' 'a new one: %s', e)
def doPrivmsg(self, irc, msg): if ircmsgs.isCtcp(msg) and not ircmsgs.isAction(msg): return if msg.args[ 0] == irc.nick and self.running_decision and msg.nick in self.running_decision.dp.participants.keys( ): log.debug("Message:" + msg.args[1]) if self.running_decision.add_vote(msg.nick, msg.args[1]): self.running_decision = None
def britney_phases(self): try: status = urlopen('http://release.debian.org/britney/britney.status', timeout=5) except URLError: log.debug("Unable to get britney status") else: data = status.read() if data.endswith(':\n'): return 'Britney is running, %s phase' % data[:-2].lower()
def _handleSocketError(self, e): # (11, 'Resource temporarily unavailable') raised if connect # hasn't finished yet. We'll keep track of how many we get. if e.args[0] != 11 or self.eagains > 120: drivers.log.disconnect(self.currentServer, e) self.scheduleReconnect() else: log.debug('Got EAGAIN, current count: %s.', self.eagains) self.eagains += 1
def get(self, prefix): """Sets a user ID given the user's prefix.""" user = self._get_key(prefix) log.debug('%s: looking up prefix %s; got user %s', self._plugin_name, prefix, user) if not self.case_sensitive: user = user.lower() # Automatically returns None if entry does not exist return self.db.get(user)
def translationparty(self, irc, msg, args, opts, text): """[--lang <language>[,...]] [--show <none|one|all>] [--max <int>] [--verbose] <text> Try to find equilibrium in back-and-forth translations of <text>. (Defaults: --lang ja --show none --max 50)""" input_langs = ['ja'] show = 'none' max_translations = 50 announce = False if len(text) > 1000: irc.reply('The text to be translated cannot exceed 1000 characters. Your request contains %d characters' % (len(text))) else: for (opt,arg) in opts: if opt == 'lang': input_langs = arg.split(',') if opt == 'max': max_translations = arg if opt == 'verbose': announce = True if opt == 'show': show = arg try: langs = ['en'] for l in input_langs: iso_code = validateLanguage(l) if iso_code == None: raise TranslationError(value="Unknown language: %s" % (l)) langs.append(iso_code) result = self._party(langs, text, max_translations) if announce: if len(result) < max_translations: irc.reply("Equilibrium found!") else: irc.reply("It is doubtful that this phrase will ever reach equilibrium.") texts = map(lambda x: x['text'],result) if show == 'all': irc.reply(" -> ".join(texts).encode('utf8')) elif show == 'one': irc.reply(" -> ".join((texts[0],texts[-1])).encode('utf8')) else: # STILL TOO VERBOSE #irc.reply(('%(text)s [%(iterations)d iterations]' % { 'iterations' : len(texts), 'text' : texts[-1] }).encode('utf8')) irc.reply(texts[-1].encode('utf8')) except TranslationError, e: irc.reply(e) log.error(str(e)) if e.stack is not None: texts = map(lambda x: '[%s] %s' % (x['lang'],x['text']),e.stack) log.debug("Stack: %s" % (" -> ".join(texts).encode('utf8'))) if e.url is not None: log.debug("Last URL: %s" % (e.url))
def startticker(self, irc, msg, args): if not self.stop and self._threads: irc.reply("Already monitoring? %r threads running." % len(self._threads)) return if msg: irc.reply("Starting monitoring.") self.stop = False for ticker in TICKERS: log.debug("starting %r", ticker) t = threading.Thread(target=self._ticker, args=(irc, ticker)) t.start()
def __getManPageFd(self, release, language, command): """Get a file descriptor to the manual page in the Ubuntu Manpage Repository.""" for section in self.sections(): url = self.__buildUrl(release, section, command, language) log.debug('ManpageCache.__getManPageFd: Trying url %s' % url) fd = self.__tryUrl(url) if fd: log.debug('ManpageCache.__getManPageFd: Success.') return fd return None
def _getJsonResponse(self, url, retries=2): try: log.debug('Retrieving: %s' % (url)) doc = web.getUrl(url, headers=HEADERS) log.debug('Response: %s' % (doc)) response = simplejson.loads(doc) return response except web.Error, e: log.warning('Error: %s', str(e)) if retries > 0: log.warning('Retries left: %d' % (retries)) return self._getJsonResponse(url, retries=retries - 1)
def __call__(self, irc, msg, args, stateAttrs={}): state = self._state(self.types[:], stateAttrs) while state.types: context = state.types.pop(0) try: context(irc, msg, args, state) except IndexError: raise callbacks.ArgumentError if args and not state.allowExtra: log.debug('args and not self.allowExtra: %r', args) raise callbacks.ArgumentError return state
def answer(self, msg): if not self.active or self.correct: return if not self.correct: for ans in self.a: ans = " ".join(ans.split()).strip().lower() guess = " ".join(msg.args[1].split()).strip().lower() if guess == ans: self.correct = True break elif not self.correct: answer = self.clean(ans) guess = self.clean(guess) if not self.correct and guess == answer: self.correct = True break elif (not self.correct and self.flexibility < 1 and self.flexibility > 0.5): dist = textdistance.jaro_winkler(guess, answer) log.debug( "Jeopardy: guess: {0}, answer: {1}, length: {2}, " "distance: {3}, flexibility: {4}".format( guess, answer, len(answer), dist, self.flexibility)) if dist >= self.flexibility: self.correct = True break elif (dist < self.flexibility and "," in self.a[0] or "&" in self.a[0]): dist = textdistance.jaccard(guess, answer) if dist >= self.flexibility: self.correct = True break if self.correct: if not msg.nick in self.scores: self.scores[msg.nick] = 0 self.scores[msg.nick] += self.p if not msg.nick in self.roundscores: self.roundscores[msg.nick] = 0 self.roundscores[msg.nick] += self.p self.unanswered = 0 reply = self.correct_template.render( nick=msg.nick, answer=self.a[0], points=self.p, round=self.roundscores[msg.nick], total=self.scores[msg.nick], ) self.reply(reply) self.correct = True self.answered += 1 self.clear() self.newquestion()
def __init__(self, *args, **kwargs): """ Loads the existing database, creating a new one in memory if none exists. """ self.db = {} try: with open(filename, 'rb') as f: self.db = pickle.load(f) except Exception as e: log.debug('LastFM: Unable to load database, creating ' 'a new one: %s', e)
def _getJsonResponse(self,url,retries = 2): try: log.debug('Retrieving: %s' % (url)) doc = web.getUrl(url, headers=HEADERS) log.debug('Response: %s' % (doc)) response = simplejson.loads(doc) return response except web.Error, e: log.warning('Error: %s',str(e)) if retries > 0: log.warning('Retries left: %d' % (retries)) return self._getJsonResponse(url,retries=retries-1)
def newf(url, headers={}, data=None): self.assertIsNone(data, "Unexpected POST to %s" % url) assert expected_requests, url (expected_url, response) = expected_requests.pop(0) self.assertEqual(url, expected_url, "Unexpected URL: %s" % url) log.debug("Got request to %s", url) if isinstance(response, bytes): return response elif isinstance(response, Exception): raise response else: assert False, response
def __call__(self, irc, msg, args, state): st = state.essence() try: while args: self.__parent.__call__(irc, msg, args, st) except IndexError: pass except (callbacks.ArgumentError, callbacks.Error), e: if not self.continueOnError: raise else: log.debug('Got %s, returning default.', utils.exnToString(e)) pass
def __init__(self, getopts): self.spec = getopts # for repr self.getopts = {} self.getoptL = [] for (name, spec) in getopts.iteritems(): if spec == '': self.getoptL.append(name) self.getopts[name] = None else: self.getoptL.append(name + '=') self.getopts[name] = contextify(spec) log.debug('getopts: %r', self.getopts) log.debug('getoptL: %r', self.getoptL)
def user_has_capability(self, msg): channel = msg.args[0] mask = msg.prefix required_capability = self.registryValue("requireCapability") cap = ircdb.makeChannelCapability(channel, required_capability) has_cap = ircdb.checkCapability(mask, cap, ignoreDefaultAllow=True) if has_cap: log.debug("SpiffyTitles: %s has required capability '%s'" % (mask, required_capability)) else: log.debug("SpiffyTitles: %s does NOT have required capability '%s'" % (mask, required_capability)) return has_cap
def __init__(self, *args, **kwargs): """ Loads the existing database, creating a new one in memory if none exists. """ self.db = {} try: with open(filename, 'rb') as f: self.db = pickle.load(f) except Exception as e: log.debug( 'LastFM: Unable to load database, creating ' 'a new one: %s', e)
def flush(self): """Flushes the channel database to its file.""" if not self.noFlush: if self.filename is not None: fd = utils.file.AtomicFile(self.filename) for (channel, c) in self.channels.iteritems(): fd.write('channel %s' % channel) fd.write(os.linesep) c.preserve(fd, indent=' ') fd.close() else: log.warning('ChannelsDictionary.flush without self.filename.') else: log.debug('Not flushing ChannelsDictionary because of noFlush.')
def translate(self, irc, msg, args, optlist, text): """[--from <source>] [--to <target>] <text> Translate text using Google Translate API. Uses automatic language detection if source not set. No target uses the plugin default. """ optlist = dict(optlist) key = self.registryValue("translate.key") if not key: irc.reply("Error: No API key has been set.") return if "from" in optlist: source = optlist.get("from") else: source = self.registryValue("translate.source", msg.channel) if "to" in optlist: target = optlist.get("to") else: target = self.registryValue("translate.target", msg.channel) url = "https://translation.googleapis.com/language/translate/v2" if source != "auto": url += "?q={0}&target={1}&source={2}&key={3}".format( text, target, source, key ) else: url += "?q={0}&target={1}&key={2}".format(text, target, key) response = requests.get(url, timeout=10) if not response.status_code == 200: log.debug( "GoogleCloud: Error accessing {0}: {1}".format( url, response.content.decode() ) ) return result = json.loads(response.content) if not result.get("data"): log.debug("GoogleCloud: Error opening JSON response") return if result["data"]["translations"][0].get("detectedSourceLanguage"): reply = "{0} [{1}~>{2}]".format( html.unescape(result["data"]["translations"][0]["translatedText"]), result["data"]["translations"][0]["detectedSourceLanguage"], target, ) else: reply = "{0} [{1}~>{2}]".format( html.unescape(result["data"]["translations"][0]["translatedText"]), source, target, ) irc.reply(reply)
def checkIgnored(hostmask, recipient='', users=users, channels=channels): """checkIgnored(hostmask, recipient='') -> True/False Checks if the user is ignored by the recipient of the message. """ try: id = users.getUserId(hostmask) user = users.getUser(id) if user._checkCapability('owner'): # Owners shouldn't ever be ignored. return False elif user.ignore: log.debug('Ignoring %s due to his IrcUser ignore flag.', hostmask) return True except KeyError: # If there's no user... if conf.supybot.defaultIgnore(): log.debug('Ignoring %s due to conf.supybot.defaultIgnore', hostmask) return True if ignores.checkIgnored(hostmask): log.debug('Ignoring %s due to ignore database.', hostmask) return True if ircutils.isChannel(recipient): channel = channels.getChannel(recipient) if channel.checkIgnored(hostmask): log.debug('Ignoring %s due to the channel ignores.', hostmask) return True return False
def _ddgurl(text): # DuckDuckGo has a 'lite' site free of unparseable JavaScript # elements, so we'll use that to our advantage! url = "https://lite.duckduckgo.com/lite?" + urlencode({"q": text}) log.debug("DDG: Using URL %s for search %s", url, text) real_url, data = utils.web.getUrlTargetAndContent(url) data = data.decode("utf-8") parser = DDGHTMLParser() parser.feed(data) # Remove "sponsored link" results return (url, real_url, parser.results)
def dosearch(self, irc, channel, text): google = ddg = title = None if self.registryValue("google", channel) > 0: google = irc.getCallback("google") if not google: log.error( "Lyrics: Error: Google search enabled but plugin not loaded." ) if self.registryValue("ddg", channel) > 0: ddg = irc.getCallback("ddg") if not ddg: log.error( "Lyrics: Error: DDG search enabled but plugin not loaded.") if not google and not ddg: log.error("Lyrics: Google and DDG plugins not loaded.") return None, None query = "site:lyrics.fandom.com/wiki/ %s" % text pattern = re.compile(r"https?://lyrics.fandom.com/wiki/.*") for i in range(1, 3): if google and self.registryValue("google", channel) == i: results = google.decode( google.search(query, irc.network, channel)) for r in results: try: match = re.search(pattern, r["url"]) except TypeError: match = re.search(pattern, r.link) if match: try: title = r["title"].replace(":", " - ").split("|")[0] except TypeError: title = r.title.replace(":", " - ").split("|")[0] log.debug("Lyrics: found link using Google search") break elif self.registryValue("ddg", channel) == i: results = ddg.search_core(query, channel_context=channel, max_results=10, show_snippet=False) for r in results: match = re.search(pattern, r[2]) if match: title = r[0].replace(":", " - ").split("|")[0] log.debug("Lyrics: found link using DDG") break if match and title: return title, match.group(0) else: return None, None
def _ddgurl(text): # DuckDuckGo has a 'lite' site free of unparseable JavaScript # elements, so we'll use that to our advantage! url = "https://duckduckgo.com/lite?" + urlencode({"q": text}) log.debug("DDG: Using URL %s for search %s", url, text) real_url, data = utils.web.getUrlTargetAndContent(url) data = data.decode("utf-8") soup = BeautifulSoup(data) # Remove "sponsored link" results return (url, real_url, [td for td in soup.find_all('td') if 'result-sponsored' not in str(td.parent.get('class'))])