def message(msg): if not msg["body"].find("sauron"): try: self.rooms[msg["room_id"]].speak(repr(self.metrics)) except KeyError: self.rooms[msg["room_id"]] = self.camp.room(msg["room_id"]) logger.info(repr(msg))
def authenticated(self, xs): logger.info('Authenticated %s' % str(self)) self.xmlstream.addObserver('/message', self.message) self.xmlstream.addObserver('/presence', self.presence) self.xmlstream.addObserver('/iq', self.iq) self.xmlstream.addObserver('/*', self.all) presence = domish.Element((None, 'presence')) self.xmlstream.send(presence) msg = domish.Element(('jabber:client', 'message')) msg['from'] = str(self) msg['to'] = '*****@*****.**' msg['type'] = 'chat' msg.addElement('body', 'jabber:client', 'testing') html = domish.Element((None, 'html')) html['xmlns'] = 'http://jabber.org/protocol/xhtml-im' body = domish.Element((None, 'body')) body['xmlns'] = 'http://www.w3.org/1999/xhtml' img = domish.Element((None, 'img')) # The hash should be of the data you send across data = open("seomoz.png", "rb").read().encode('base64') key = '*****@*****.**' % hashlib.sha1(data.replace('\n', '')).hexdigest() self.cids[key] = data img['src'] = 'cid:%s' % key img['alt'] = 'seomoz' body.addChild(img) html.addChild(body) msg.addChild(html) logger.warn(self.msgToString(msg)) self.xmlstream.send(msg)
def message(msg): if not msg['body'].find('sauron'): try: self.rooms[msg['room_id']].speak(repr(self.metrics)) except KeyError: self.rooms[msg['room_id']] = self.camp.room(msg['room_id']) logger.info(repr(msg))
def calculate_req_per_second(self, total_access): # only send results if req greater than 30 if total_access > 30 and self.serializer.has_key('last_total_access') and total_access > self.serializer['last_total_access']: result = abs(total_access - self.serializer['last_total_access']) / self.interval else: logger.info('requests from webserver not enough (< 100 requests) or first run, we dont send any data!') result = None self.serializer['last_total_access'] = total_access return result
def metrics(self, metrics): for name, results in metrics.items(): for key,value in results['results'].items(): logger.info('Pushing %s-%s => %s' % (name, key, repr(value))) v, u = value try: self.api.update_status('%s-%s => %s %s' % (name, key, repr(v), u)) except tweepy.error.TweepError as e: logger.error(repr(e.reason))
def bind(self, msg): '''Yeah, this is ugly as sin, but Element doesn't support xpath''' for e in msg.elements(): if e.name == 'bind': for e in e.elements(): if e.name == 'jid': self.full = str(e) logger.info(self.full) return
def metrics(self, metrics): for name, results in metrics.items(): for key, value in results['results'].items(): logger.info('Pushing %s-%s => %s' % (name, key, repr(value))) v, u = value try: self.api.update_status('%s-%s => %s %s' % (name, key, repr(v), u)) except tweepy.error.TweepError as e: logger.error(repr(e.reason))
def send(self, to, message, subject=None, type='chat'): msg = domish.Element(('jabber:client', 'message')) msg['from'] = str(self) msg['to'] = to msg['type'] = type msg.addElement('body', 'jabber:client', message) if subject: msg.addElement('subject', 'jabber:client', subject) self.xmlstream.send(msg) logger.info('Sent %s message %s' % (to, message))
def metrics(self, metrics): for name, results in metrics.items(): for key, value in results['results'].items(): logger.info('Pushing %s-%s => %s' % (name, key, repr(value))) v, u = value self.conn.put_metric_data(self.namespace, name + '-' + key, unit=u, value=v, dimensions=self.dims)
def values(self): # Alright, first get new stats on the file s = os.fstat(self.f) # The lines we've read lines = [] # Now, see if the file was nuked # I'm not sure how this works. Checking inode might not really capture # what we're talking about. It certainly happens when the file is replaced, # but there /may/ be other times when it changes if s.st_ino != self.stat.st_ino: logger.warn('Inode for %s has changed' % self.path) os.close(self.f) self.f = os.open(self.path, os.O_RDONLY | os.O_NONBLOCK) elif s.st_mtime > self.stat.st_mtime: # If it's been modified since we last checked... r, w, e = select.select([self.f], [], [], 0) # And it's not read-ready, then we have to actually re-open it if len(r) == 0: os.close(self.f) self.f = os.open(self.path, os.O_RDONLY | os.O_NONBLOCK) # Now, remember the current stats self.stat = s # Now, let's check to see if it's ready for some reading content = '' r, w, e = select.select([self.f], [], [], 0) while len(r): content += os.read(self.f, 1024) r, w, e = select.select([self.f], [], [], 0) # Now, split it into lines lines = content.strip().split('\n') # Now that we have all our lines, go ahead and try to match the regex to each line counts = dict([(k, 0) for k in self.patterns]) for line in lines: for k, r in self.patterns.items(): m = r.search(line) if m: try: # Use the last matching group if found counts[k] += int(m.groups()[-1]) except ValueError: logger.warn('Could not parse int from %s. Using 1' % m.gorups()[-1]) counts[k] += 1 except IndexError: logger.info('No groups in regular expression. Using 1') counts[k] += 1 return { 'results': dict([(k, (v, 'Count')) for k, v in counts.items()]) }
def values(self): try: # Alright, first get new stats on the file s = os.lstat(self.path) # The lines we've read lines = [] # Now, see if the file was nuked # I'm not sure how this works. Checking inode might not really capture # what we're talking about. It certainly happens when the file is replaced, # but there /may/ be other times when it changes if s.st_ino != self.stat.st_ino: logger.warn('Inode for %s has changed' % self.path) self.f.close() self.f = file(self.path) lines = self.f.read(s.st_size).strip().split('\n') elif s.st_size < self.stat.st_size: logger.warn( 'File %s has shrunk since last read! Reading from beginning...' % self.path) self.f.seek(0) lines = self.f.read(s.st_size).strip().split('\n') elif s.st_mtime > self.stat.st_mtime: # If the file has been changed since last we looked self.f.seek(self.stat.st_size) lines = self.f.read(s.st_size - self.stat.st_size).strip().split('\n') # Now, remember the current stats self.stat = s # Now that we have all our lines, go ahead and try to match the regex to each line counts = dict([(k, 0) for k in self.patterns]) for line in lines: for k, r in self.patterns.items(): m = r.search(line) if m: try: # Use the last matching group if found counts[k] += int(m.groups()[-1]) except ValueError: logger.warn( 'Could not parse int from %s. Using 1' % m.gorups()[-1]) counts[k] += 1 except IndexError: logger.info( 'No groups in regular expression. Using 1') counts[k] += 1 return { 'results': dict([(k, (v, 'Count')) for k, v in counts.items()]) } except Exception as e: raise MetricException(e)
def values(self): # Alright, first get new stats on the file s = os.fstat(self.f) # The lines we've read lines = [] # Now, see if the file was nuked # I'm not sure how this works. Checking inode might not really capture # what we're talking about. It certainly happens when the file is replaced, # but there /may/ be other times when it changes if s.st_ino != self.stat.st_ino: logger.warn('Inode for %s has changed' % self.path) os.close(self.f) self.f = os.open(self.path, os.O_RDONLY | os.O_NONBLOCK) elif s.st_mtime > self.stat.st_mtime: # If it's been modified since we last checked... r, w, e = select.select([self.f], [], [], 0) # And it's not read-ready, then we have to actually re-open it if len(r) == 0: os.close(self.f) self.f = os.open(self.path, os.O_RDONLY | os.O_NONBLOCK) # Now, remember the current stats self.stat = s # Now, let's check to see if it's ready for some reading content = '' r, w, e = select.select([self.f], [], [], 0) while len(r): content += os.read(self.f, 1024) r, w, e = select.select([self.f], [], [], 0) # Now, split it into lines lines = content.strip().split('\n') # Now that we have all our lines, go ahead and try to match the regex to each line counts = dict([(k, 0) for k in self.patterns]) for line in lines: for k, r in self.patterns.items(): m = r.search(line) if m: try: # Use the last matching group if found counts[k] += int(m.groups()[-1]) except ValueError: logger.warn('Could not parse int from %s. Using 1' % m.gorups()[-1]) counts[k] += 1 except IndexError: logger.info('No groups in regular expression. Using 1') counts[k] += 1 return { 'results' : dict([(k, (v, 'Count')) for k, v in counts.items()]) }
def __init__(self, user, password, host=None, port=5222, resource=None): self.xmlstream = None self.user, self.host = user.split('@') self.server = host or self.host self.resource = resource or socket.gethostname() self.jid = jid.JID(tuple=(self.user, self.host, self.resource)) self.full = self.jid.full() self.cids = {} factory = client.XMPPClientFactory(self.jid, password) factory.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.connected) factory.addBootstrap(xmlstream.STREAM_END_EVENT, self.disconnected) factory.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authenticated) factory.addBootstrap(xmlstream.INIT_FAILED_EVENT, self.init_failed) factory.addBootstrap('/iq/bind/jid', self.bind) logger.info('Connecting %s' % str(self)) reactor.connectTCP(self.server, 5222, factory)
def calculate_req_per_second(self, total_httpd_access): current_access = float(total_httpd_access) # only send results if uptime greater than 70 seconds if int(self.serverstatus_result['Uptime']) > 70: if self.serializer.has_key('last_httpd_total_access') and current_access > self.serializer['last_httpd_total_access']: result = abs(current_access - self.serializer['last_httpd_total_access']) / self.interval else: # fallback to aggregated req per sec if no last_httpd_total_access value is available logger.info('no last state of total accesses or it\'s greater than current, falling back to apaches requests per seconds') result = self.serverstatus_result['ReqPerSec'] else: logger.info('uptime from webserver not enough (>70 seconds), still in warump phase, we dont send any data!') result = None self.serializer['last_httpd_total_access'] = current_access return result
def __init__(self, consumer_key, consumer_secret, access_token=None, access_secret=None): super(Twitter,self).__init__() # https://github.com/tweepy/tweepy/blob/master/ self.auth = tweepy.auth.OAuthHandler(consumer_key, consumer_secret) if not access_token or not access_secret: try: logger.warn('To authenticate, visit : %s' % self.auth.get_authorization_url()) verifier = raw_input('Verifier: ') except tweepy.error.TweepError: raise EmitterException('Failed to request token.') try: logger.info(repr(self.auth.get_access_token(verifier))) except tweepy.error.TweepError: raise EmitterException('Error! Failed to get access token.') else: self.auth.set_access_token(access_token, access_secret) self.api = tweepy.API(self.auth)
def values(self): try: # Alright, first get new stats on the file s = os.lstat(self.path) # The lines we've read lines = [] # Now, see if the file was nuked # I'm not sure how this works. Checking inode might not really capture # what we're talking about. It certainly happens when the file is replaced, # but there /may/ be other times when it changes if s.st_ino != self.stat.st_ino: logger.warn('Inode for %s has changed' % self.path) self.f.close() self.f = file(self.path) lines = self.f.read(s.st_size).strip().split('\n') elif s.st_size < self.stat.st_size: logger.warn('File %s has shrunk since last read! Reading from beginning...' % self.path) self.f.seek(0) lines = self.f.read(s.st_size).strip().split('\n') elif s.st_mtime > self.stat.st_mtime: # If the file has been changed since last we looked self.f.seek(self.stat.st_size) lines = self.f.read(s.st_size - self.stat.st_size).strip().split('\n') # Now, remember the current stats self.stat = s # Now that we have all our lines, go ahead and try to match the regex to each line counts = dict([(k, 0) for k in self.patterns]) for line in lines: for k, r in self.patterns.items(): m = r.search(line) if m: try: # Use the last matching group if found counts[k] += int(m.groups()[-1]) except ValueError: logger.warn('Could not parse int from %s. Using 1' % m.gorups()[-1]) counts[k] += 1 except IndexError: logger.info('No groups in regular expression. Using 1') counts[k] += 1 return { 'results' : dict([(k, (v, 'Count')) for k, v in counts.items()]) } except Exception as e: raise MetricException(e)
def __init__(self, consumer_key, consumer_secret, access_token=None, access_secret=None): super(Twitter, self).__init__() # https://github.com/tweepy/tweepy/blob/master/ self.auth = tweepy.auth.OAuthHandler(consumer_key, consumer_secret) if not access_token or not access_secret: try: logger.warn('To authenticate, visit : %s' % self.auth.get_authorization_url()) verifier = raw_input('Verifier: ') except tweepy.error.TweepError: raise EmitterException('Failed to request token.') try: logger.info(repr(self.auth.get_access_token(verifier))) except tweepy.error.TweepError: raise EmitterException('Error! Failed to get access token.') else: self.auth.set_access_token(access_token, access_secret) self.api = tweepy.API(self.auth)
def updateActions(self, actions): '''Update the actions on this account based on the supplied actions. Actions should a dictionary of Amazon Simple Notification Service topic names, and their associated subscriptions.''' # First, we need a SNS Connection to make this changes conn = SNSConnection(**self.kwargs) # Now make sure each subscription is registered to the topic for name, subscriptions in actions.items(): logger.info('Creating topic %s' % name) # Try to make a topic try: arn = conn.create_topic(name)['CreateTopicResponse'][ 'CreateTopicResult']['TopicArn'] self.actions[name] = arn except KeyError: raise EmitterException('Bad response creating topic %s' % name) if len(subscriptions) == 0: raise EmitterException('No subscriptions for action %s' % name) # Now try to arrange for subscriptions # Oddly enough, calling create_topic doesn't have any effect # if the topic already exists, but calling subscribe() for an # existing subscription causes a second subscription to be added # So, we have to get a list of current subscriptions, and then # make sure to only add the subscription if it's currently there logger.info('Getting a list of current subscriptions...') current = conn.get_all_subscriptions_by_topic(arn) current = current['ListSubscriptionsByTopicResponse'] current = current['ListSubscriptionsByTopicResult'] current = current['Subscriptions'] current = set(s['Endpoint'] for s in current) # For all desired subscriptions not present, subscribe for s in subscriptions: if s['endpoint'] not in current: logger.info('Adding %s to action %s' % (s['endpoint'], name)) conn.subscribe(arn, s.get('protocol', 'email'), s['endpoint']) else: logger.info('%s already subscribed to action' % s['endpoint']) # Check for subscriptions that are active, but not listed... activeUnlisted = set(current) - set( [s['endpoint'] for s in subscriptions]) for s in activeUnlisted: logger.warn('Subscript "%s" active, but not listed in config' % s)
def values(self): try: res = {} while not self.queue.empty(): name, value, unit, method = self.queue.get_nowait() if res.has_key(name): res[name][0] += value res[name][3] += 1 else: res[name] = [value, unit, method, 1] self.queue.task_done() for k,v in res.iteritems(): if v[2] == 'persecond': # argreate to value/s v[0] = float(v[0] / self.interval) elif v[2] == 'avg': v[0] = float(v[0] / v[3]) res[k] = tuple([v[0], v[1]]) if not res: logger.info('No data from external metric listener received') return {'results': res} except Exception as e: raise MetricException(e)
def iq(self, iq): logger.info(self.msgToString(iq)) if iq['type'] == 'get': cid = None for e in iq.elements(): try: cid = e['cid'] break except KeyError: continue if cid: response = domish.Element((None, 'iq')) response['id'] = iq['id'] response['to'] = iq['from'] response['type'] = 'result' data = domish.Element(('urn:xmpp:bob', 'data')) data['cid'] = cid data['type'] = 'image/png' data['max-age'] = '86400' data.addContent('\n') data.addContent(self.cids[cid]) response.addChild(data) logger.info('Sending %s' % self.msgToString(response)) self.xmlstream.send(iq)
def calculate_req_per_second(self, total_httpd_access): current_access = float(total_httpd_access) # only send results if uptime greater than 70 seconds if int(self.serverstatus_result['Uptime']) > 70: if self.tempdict.has_key( 'last_httpd_total_access' ) and current_access > self.tempdict['last_httpd_total_access']: result = abs( current_access - self.tempdict['last_httpd_total_access']) / self.interval else: # fallback to aggregated req per sec if no last_httpd_total_access value is available logger.info( 'no last state of total accesses or it\'s greater than current, falling back to apaches requests per seconds' ) result = self.serverstatus_result['ReqPerSec'] else: logger.info( 'uptime from webserver not enough (>70 seconds), still in warump phase, we dont send any data!' ) result = None self.tempdict['last_httpd_total_access'] = current_access return result
def __init__(self, subdomain, token): super(Emitter, self).__init__() logger.info('Making Campfire...') self.camp = Campfire(subdomain, token) logger.info('Rooms...') self.rooms = {} logger.info('Room...') self.camp.find_room_by_name('Testing').join() self.results = {} try: threading.Thread(target=reactor.run).start() except Exception: pass
def __init__(self, subdomain, token): super(Emitter, self).__init__() logger.info("Making Campfire...") self.camp = Campfire(subdomain, token) logger.info("Rooms...") self.rooms = {} logger.info("Room...") self.camp.find_room_by_name("Testing").join() self.results = {} try: threading.Thread(target=reactor.run).start() except Exception: pass
def updateActions(self, actions): '''Update the actions on this account based on the supplied actions. Actions should a dictionary of Amazon Simple Notification Service topic names, and their associated subscriptions.''' # First, we need a SNS Connection to make this changes conn = SNSConnection(**self.kwargs) # Now make sure each subscription is registered to the topic for name, subscriptions in actions.items(): logger.info('Creating topic %s' % name) # Try to make a topic try: arn = conn.create_topic(name)['CreateTopicResponse']['CreateTopicResult']['TopicArn'] self.actions[name] = arn except KeyError: raise EmitterException('Bad response creating topic %s' % name) if len(subscriptions) == 0: raise EmitterException('No subscriptions for action %s' % name) # Now try to arrange for subscriptions # Oddly enough, calling create_topic doesn't have any effect # if the topic already exists, but calling subscribe() for an # existing subscription causes a second subscription to be added # So, we have to get a list of current subscriptions, and then # make sure to only add the subscription if it's currently there logger.info('Getting a list of current subscriptions...') current = conn.get_all_subscriptions_by_topic(arn) current = current['ListSubscriptionsByTopicResponse'] current = current['ListSubscriptionsByTopicResult'] current = current['Subscriptions'] current = set(s['Endpoint'] for s in current) # For all desired subscriptions not present, subscribe for s in subscriptions: if s['endpoint'] not in current: logger.info('Adding %s to action %s' % (s['endpoint'], name)) conn.subscribe(arn, s.get('protocol', 'email'), s['endpoint']) else: logger.info('%s already subscribed to action' % s['endpoint']) # Check for subscriptions that are active, but not listed... activeUnlisted = set(current) - set([s['endpoint'] for s in subscriptions]) for s in activeUnlisted: logger.warn('Subscript "%s" active, but not listed in config' % s)
def metrics(self, metrics): for k,m in metrics.items(): for metric,value in m['results'].items(): logger.info('\t%s-%s => %s' % (k,metric,repr(value)))
def chat(self, msg): logger.info('Received message %s' % self.msgToString(msg)) self.send(msg['from'], 'hello')
def metrics(self, metrics): for name, results in metrics.items(): for key,value in results['results'].items(): logger.info('Pushing %s-%s => %s' % (name, key, repr(value))) v, u = value self.conn.put_metric_data(self.namespace, name + '-' + key, unit=u, value=v, dimensions=self.dims)
def connected(self, xs): logger.info('Connected %s' % str(self)) self.xmlstream = xs
def disconnected(self, xs): logger.info('Disconnected %s' % str(self))