Esempio n. 1
0
    def set_ttl(self, key, ttl_t):
        # keep keys saved by hour in the future
        ttl_t = divmod(ttl_t, 3600)[0]*3600

        cdb = self.get_ttl_db(ttl_t)
        logger.debug("TTL save", key, "with ttl", ttl_t, "in", cdb, part='kv')
        cdb.Put(key, '')
Esempio n. 2
0
    def do_collector_thread(self):
        logger.log('COLLECTOR thread launched', part='check')
        cur_launchs = {}
        while not stopper.interrupted:
            now = int(time.time())
            for (colname, e) in self.collectors.iteritems():
                colname = e['name']
                inst    = e['inst']
                # maybe a collection is already running
                if colname in cur_launchs:
                    continue
                if now >= e['next_check']:
                    logger.debug('COLLECTOR: launching collector %s' % colname, part='check')
                    t = threader.create_and_launch(inst.main, name='collector-%s' % colname)
                    cur_launchs[colname] = t
                    e['next_check'] += 10
                    e['last_check'] = now

            to_del = []
            for (colname, t) in cur_launchs.iteritems():
                if not t.is_alive():
                    t.join()
                    to_del.append(colname)
            for colname in to_del:
                del cur_launchs[colname]

            time.sleep(1)
Esempio n. 3
0
    def compile(self, expr, check=None):
        # first manage {} thing and look at them
        all_parts = re.findall('{.*?}', expr)

        changes = []

        for p in all_parts:
            p = p[1:-1]

            if p.startswith('collector.'):
                s = p[len('collector.'):]
                v = collectormgr.get_data(s)
                logger.debug('Ask', s, 'got', v)
                changes.append( (p, v) )
            elif p.startswith('configuration.'):
                s = p[len('configuration.'):]
                v = self._found_params(s, check)
                changes.append( (p, v) )
            
        if not len(changes) == len(all_parts):
            raise ValueError('Some parts cannot be changed')

        for (p,v) in changes:
            expr = expr.replace('{%s}' % p, str(v))

        return expr
Esempio n. 4
0
    def launch(self):
        logger.debug("getSystem: start")
        res = {}

        res["hostname"] = platform.node()
        res["fqdn"] = socket.getfqdn()

        res["os"] = {}
        res["os"]["name"] = os.name
        res["os"]["platform"] = sys.platform

        res["cpucount"] = multiprocessing.cpu_count()

        res["linux"] = {"distname": "", "version": "", "id": ""}
        (distname, version, _id) = platform.linux_distribution()
        res["linux"]["distname"] = distname
        res["linux"]["version"] = version
        res["linux"]["id"] = _id

        res["user"] = os.getlogin()
        res["uid"] = os.getuid()
        res["gid"] = os.getgid()

        res["publicip"] = ""
        try:
            res["publicip"] = socket.gethostbyname(socket.gethostname())
        except socket.gaierror:
            pass
        if not res["publicip"] or res["publicip"] == "127.0.0.1":
            res["publicip"] = get_public_address()
        logger.debug("getsystem: completed, returning")
        return res
Esempio n. 5
0
 def connect(self):
     if not self.con:
         try:
             self.con = Client(base_url='unix://var/run/docker.sock')
         except Exception, exp:
             logger.debug('Cannot connect to docker %s' % exp)
             self.con = None
Esempio n. 6
0
 def agent_members():
     response.content_type = 'application/json'
     logger.debug("/agent/members is called", part='http')
     nodes = {}
     with self.nodes_lock:
         nodes = copy.copy(self.nodes)
     return nodes
Esempio n. 7
0
    def launch(self):
        logger.debug('getRabbitMQStatus: start')

        if 'rabbitMQStatusUrl' not in self.config or \
                    'rabbitMQUser' not in self.config or \
                    'rabbitMQPass' not in self.config or \
            self.config['rabbitMQStatusUrl'] == 'http://www.example.com:55672/json':

            logger.debug('getRabbitMQStatus: config not set')
            return False

        logger.debug('getRabbitMQStatus: config set')

        try:
            logger.debug('getRabbitMQStatus: attempting authentication setup')

            manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
            manager.add_password(None, self.config['rabbitMQStatusUrl'], self.config['rabbitMQUser'], self.config['rabbitMQPass'])
            handler = urllib2.HTTPBasicAuthHandler(manager)
            opener = urllib2.build_opener(handler)
            urllib2.install_opener(opener)

            logger.debug('getRabbitMQStatus: attempting urlopen')
            req = urllib2.Request(self.config['rabbitMQStatusUrl'], None, {})

            # Do the request, log any errors
            request = urllib2.urlopen(req)
            response = request.read()

        except urllib2.HTTPError, e:
            logger.error('Unable to get RabbitMQ status - HTTPError = %s', e)
            return False
Esempio n. 8
0
    def launch(self):
        #logger.debug('getIOStats: start')

        iostats = {}

        if sys.platform != 'linux2':
            logger.debug('getIOStats: unsupported platform')
            return False

        #logger.debug('getIOStats: linux2')

        headerRegexp = re.compile(r'([%\\/\-\_a-zA-Z0-9]+)[\s+]?')
        itemRegexp = re.compile(r'^([a-zA-Z0-9\/]+)')
        valueRegexp = re.compile(r'\d+\.\d+')

        try:
            _cmd = 'iostat -d 1 2 -x -k'
            stats = self.execute_shell(_cmd)
            if not stats:
                logger.error('getIOStats: exception in launching command')
                return False

            recentStats = stats.split('Device:')[2].split('\n')
            header = recentStats[0]
            headerNames = re.findall(headerRegexp, header)
            device = None

            for statsIndex in range(1, len(recentStats)):
                row = recentStats[statsIndex]

                if not row:
                    # Ignore blank lines.
                    continue

                deviceMatch = re.match(itemRegexp, row)

                if deviceMatch is not None:
                    # Sometimes device names span two lines.
                    device = deviceMatch.groups()[0]

                values = re.findall(valueRegexp, row.replace(',', '.'))

                if not values:
                    # Sometimes values are on the next line so we encounter
                    # instances of [].
                    continue

                iostats[device] = {}

                for headerIndex in range(0, len(headerNames)):
                    headerName = headerNames[headerIndex]
                    iostats[device][headerName] = float(values[headerIndex])

        except Exception:
            logger.error('getIOStats: exception = %s', traceback.format_exc())
            return False

        #logger.debug('getIOStats: completed, returning')
        return iostats
Esempio n. 9
0
def export(f):
    def inner(*args, **kwargs): #1
        return f(*args, **kwargs) #2
    # Export the function to the allowed functions
    fname = f.__name__
    functions[fname] = inner
    logger.debug('Evaluater: exporting function %s' % fname)
    return inner
Esempio n. 10
0
 def load_package(self, package, file_path):
     logger.debug('Loading package data from file %s' % file_path)
     pname = package.get('name', None)
     if pname is None:
         logger.error('Package data is missing name entry (%s)' % file_path)
         return None
     self.packs[pname] = package
     return pname
Esempio n. 11
0
 def get(self, key):
     try:
         t0 = time.time()
         v = self.db.Get(key)
         logger.debug("TIME kv get", time.time() - t0, part='kv')
         return v
     except KeyError:
         return None
Esempio n. 12
0
 def set_name_if_unset(self, key):
    try:
       self.db.Get(key, fill_cache=False)
    except KeyError:
       self.db.Put(key, '')
       logger.debug('TS propagating a new key', key)
       # now propagate the key to the other ts nodes
       self.clust.stack_new_ts_broadcast(key)
    return False
Esempio n. 13
0
def grep(s, p, regexp=False):
    if not os.path.exists(p):
        return False
    try:
        f = open(p, 'r')
        lines = f.readlines()
    except Exception, exp:
        logger.debug('Trying to grep file %s but cannot open/read it: %s' % (p, exp))
        return False
Esempio n. 14
0
 def _parse_cgroup_file(self, stat_file):
     try:
         logger.debug("Opening cgroup file: %s" % stat_file)
         with open(stat_file) as fp:
             return dict(map(lambda x: x.split(), fp.read().splitlines()))
     except IOError:
         # It is possible that the container got stopped between the API call and now
         logger.info("Can't open %s. Theses metrics for this container are skipped." % stat_file)
         return None
Esempio n. 15
0
 def push_key(self, k, v, ttl=0):
    T0 = time.time()
    STATS.incr('ts.graphite.push-key', 1)
    v64 = base64.b64encode(v)
    logger.debug("PUSH KEY", k, "and value", len(v64))
    #self.clust.put_key(k, v64, allow_udp=True, ttl=ttl)
    self.clust.stack_put_key(k, v64, ttl=ttl)
    STATS.timer('ts.graphite.push-key', (time.time() - T0)*1000)
    return
Esempio n. 16
0
    def launch(self):
        logger.debug('getMySQLStatus: start')

        # Try import MySQLdb - http://sourceforge.net/projects/mysql-python/files/
        try:
            import MySQLdb
        except ImportError, e:
            logger.error('Unable to import MySQLdb')
            return False
Esempio n. 17
0
 def launch_full_sync(self):
     logger.debug("Launch_full_sync:: all nodes %d" % len(self.nodes), part='gossip')
     nodes = {}
     with self.nodes_lock:
         nodes = copy.copy(self.nodes)
     others = [ (n['addr'], n['port']) for n in nodes.values() if n['state'] == 'alive' and n['uuid'] != self.uuid]
     
     if len(others) >= 1:
         other = random.choice(others)
         logger.debug("launch_full_sync::", other, part='gossip')
         self.do_push_pull(other)
Esempio n. 18
0
   def grok_graphite_data(self, data):
      STATS.incr('ts.graphite.grok.data', 1)
      forwards = {}
      for line in data.splitlines():
         elts = line.split(' ')
         elts = [s.strip() for s in elts if s.strip()]

         if len(elts) != 3:
            return
         mname, value, timestamp = elts[0], elts[1], elts[2]
         hkey = hashlib.sha1(mname).hexdigest()
         ts_node_manager = self.clust.find_ts_node(hkey)
         # if it's me that manage this key, I add it in my backend
         if ts_node_manager == self.clust.uuid:
             logger.debug("I am the TS node manager")
             try:
                 timestamp = int(timestamp)
             except ValueError:
                 return
             value = to_best_int_float(value)
             if value is None:
                 continue
             self.tsb.add_value(timestamp, mname, value)
         # not me? stack a forwarder
         else:
             logger.debug("The node manager for this Ts is ", ts_node_manager)
             l = forwards.get(ts_node_manager, [])
             l.append(line)
             forwards[ts_node_manager] = l

      for (uuid, lst) in forwards.iteritems():
          node = self.clust.nodes.get(uuid, None)
          # maybe the node disapear? bail out, we are not lucky
          if node is None:
              continue
          packets = []
          # first compute the packets
          buf = ''
          for line in lst:
              buf += line+'\n'
              if len(buf) > 1024:
                  packets.append(buf)
                  buf = ''
          if buf != '':
              packets.append(buf)

          # UDP
          sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
          for packet in packets:
             # do NOT use the node['port'], it's the internal communication, not the graphite one!
             sock.sendto(packet, (node['addr'], self.graphite_port))
          sock.close()

          '''
Esempio n. 19
0
 def put_result(self, cname, results, metrics, log):
     logger.debug('[COLLECTOR] put result for %s: %s' % (cname, metrics))
     if cname in self.collectors:
         col = self.collectors[cname]
         col['log'] = log
         if results:
             col['results'] = results
             col['metrics'] = metrics
             col['active']  = True
         else:
             col['active']  = False
Esempio n. 20
0
 def open_pidfile(self, write=False):
     try:
         p = os.path.abspath(self.lock_path)
         logger.debug("Opening pid file: %s" % p)
         # Windows do not manage the rw+ mode, so we must open in read mode first, then reopen it write mode...
         if not write and os.path.exists(p):
             self.fpid = open(p, 'r+')
         else:  # If it doesn't exist too, we create it as void
             self.fpid = open(p, 'w+')
     except Exception as err:
         raise Exception('Cannot open pid file: %s' % err)
Esempio n. 21
0
    def launch(self):
        logger.debug('getMongoDBStatus: start')

        if 'MongoDBServer' not in self.config or self.config['MongoDBServer'] == '':
            logger.debug('getMongoDBStatus: config not set')
            return False

        logger.debug('getMongoDBStatus: config set')

        try:
            import pymongo
            from pymongo import Connection

        except ImportError:
            logger.error('Unable to import pymongo library')
            return False

        # The dictionary to be returned.
        mongodb = {}

        try:
            import urlparse
            parsed = urlparse.urlparse(self.config['MongoDBServer'])

            mongoURI = ''

            # Can't use attributes on Python 2.4
            if parsed[0] != 'mongodb':

                mongoURI = 'mongodb://'

                if parsed[2]:

                    if parsed[0]:

                        mongoURI = mongoURI + parsed[0] + ':' + parsed[2]

                    else:
                        mongoURI = mongoURI + parsed[2]

            else:

                mongoURI = self.config['MongoDBServer']

            logger.debug('-- mongoURI: %s', mongoURI)

            conn = Connection(mongoURI, slave_okay=True)

            logger.debug('Connected to MongoDB')

        except Exception, ex:
            logger.error('Unable to connect to MongoDB server %s - Exception = %s', mongoURI, traceback.format_exc())
            return False
Esempio n. 22
0
    def launch(self):
        #logger.debug('get_open_ports: start')

        open_ports = {'tcp': [], 'udp': []}

        if sys.platform != 'linux2':
            logger.debug('get_open_ports: unsupported platform')
            return False

        #logger.debug('get_open_ports: linux2')

        
        try:
            _cmd = 'netstat -tuln'
            netstat = self.execute_shell(_cmd)
            if not netstat:
                logger.error('get_open_ports: exception in launching command')
                return False
            
            for line in netstat.splitlines():
                line = line.strip()

                # Will be something like
                #tcp        0      0 0.0.0.0:27017           0.0.0.0:*               LISTEN
                if not line.startswith('tcp') and not line.startswith('udp'):
                    # Not a good line, skip it
                    continue
                #print "LOOKING AT LINE"
                elts = [ e for e in line.split(' ') if e]
                #print "ELEMENTS", elts
                if len(elts) != 6:
                    #print "BAD LINE", elts
                    continue
                    
                open_port = {}
                open_port['proto']  = elts[0]
                open_port['source'] = elts[3]
                open_port['dest']   = elts[4]
                open_port['state']  = elts[5]

                if open_port['proto'].startswith('tcp'):
                    open_ports['tcp'].append(open_port)
                elif open_port['proto'].startswith('udp'):
                    open_ports['udp'].append(open_port)
                else:
                    print "Unknown protocol??"

        except Exception:
            logger.error('get_open_ports: exception = %s', traceback.format_exc())
            return False

        #logger.debug('get_open_ports: completed, returning')
        return open_ports
Esempio n. 23
0
def get_collectors(self):
    collector_dir = os.path.dirname(__file__)
    p = collector_dir + '/collectors/*py'
    logger.debug('Loading collectors from ', p)
    collector_files = glob.glob(p)
    for f in collector_files:
        fname = os.path.splitext(os.path.basename(f))[0]
        try:
            imp.load_source('collector%s' % fname, f)
        except Exception, exp:
            logger.error('Cannot load collector %s: %s' % (fname, exp))
            continue
Esempio n. 24
0
def get_local(u, local_socket, params={}):
    UnixHTTPConnection.socket_timeout = 5
    url_opener = urllib2.build_opener(UnixSocketHandler())
    p = local_socket
    if params:
        u = "%s?%s" % (u, urllib.urlencode(params))
    uri = "unix:/%s%s" % (p, u)
    logger.debug("Connecting to local http unix socket at: %s" % uri)
    req = urllib2.Request(uri, None)
    request = url_opener.open(req)
    response = request.read()
    code = request.code
    return (code, response)
Esempio n. 25
0
 def agent_join(other):
     response.content_type = 'application/json'
     addr = other
     port = self.port
     if ':' in other:
         parts = other.split(':', 1)
         addr = parts[0]
         port = int(parts[1])
     tgt = (addr, port)
     logger.debug("HTTP: agent join for %s:%s " % (addr, port), part='http')
     r = self.do_push_pull(tgt)
     logger.debug("HTTP: agent join for %s:%s result:%s" % (addr, port, r), part='http')
     return json.dumps(r)
Esempio n. 26
0
    def launch(self):
        now = int(time.time())
        diff = now - self.last_launch
        self.last_launch = now
        
        logger.debug('getKernelStats: start')

        if sys.platform == 'linux2':
            logger.debug('getKernelStats: linux2')

            try:
                logger.debug('getKernelStats: attempting open')
                lines = []
                with open('/proc/stat', 'r') as stats:
                    lines.extend(stats.readlines())
                with open('/proc/vmstat', 'r') as vmstat:
                    lines.extend(vmstat.readlines())
            except IOError, e:
                logger.error('getKernelStat: exception = %s', e)
                return False

            logger.debug('getKernelStat: open success, parsing')

            data = {}
            for line in lines:
                elts = line.split(' ', 1)
                # only look at keys
                if len(elts) != 2:
                    continue
                try:
                    data[elts[0]] = long(elts[1])
                except ValueError: # not an int? skip this value
                    continue
            
            # Now loop through each interface
            by_sec_keys = ['ctxt', 'processes', 'pgfault', 'pgmajfault']
            to_add = {}
            for (k,v) in data.iteritems():
                if k in by_sec_keys:
                    if k in self.store:
                        to_add['%s/s' % k] = (v - self.store[k]) / diff
                    else:
                        to_add['%s/s' % k] = 0
                    self.store[k] = data[k]
            for k in by_sec_keys:
                del data[k]
            data.update(to_add)
            logger.debug('getKernelStats: completed, returning')

            return data
Esempio n. 27
0
 def encrypt(self, data):
     if not self.encryption_key:
         return data
     logger.debug('ENCRYPT with '+self.encryption_key)
     # Be sure the data is x16 lenght
     if len(data) % 16 != 0:
         data += ' ' * (-len(data) % 16)
     try:
         cyph = AES.new(self.encryption_key, AES.MODE_ECB)
         ndata = cyph.encrypt(data)
         return ndata
     except Exception, exp:
         logger.error('Encryption fail:', exp, part='gossip')
         return ''
Esempio n. 28
0
 def load_collector(self, cls):
     colname = cls.__name__.lower()
     
     # If already loaded, skip it
     if colname in self.collectors:
         return
     
     logger.debug('Loading collector %s from class %s' % (colname, cls))
     try:
         # also give it our put result callback
         inst = cls(self.cfg_data, put_result=self.put_result)
     except Exception, exp:
         
         logger.error('Cannot load the %s collector: %s' % (cls, traceback.format_exc()))
         return
Esempio n. 29
0
 def launch_gossip(self):
     # There is no broadcast message to sent so bail out :)
     if len(broadcaster.broadcasts) == 0:
         return
     
     ns = self.nodes.values()
     #ns.sort()
     logger.debug("launch_gossip:: all nodes %d" % len(self.nodes), part='gossip')
     others = [n for n in ns if n['uuid'] != self.uuid]
     # Maybe every one is dead, if o bail out
     if len(others) == 0:
         return
     nb_dest = min(len(others), KGOSSIP)
     dests = random.sample(others, nb_dest)
     for dest in dests:
         logger.debug("launch_gossip::", dest['name'], part='gossip')
         self.do_gossip_push(dest)
Esempio n. 30
0
    def daemonize(self):
        logger.debug("Redirecting stdout and stderr as necessary..")
        if self.debug_path:
            fdtemp = os.open(self.debug_path, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)
        else:
            fdtemp = os.open(REDIRECT_TO, os.O_RDWR)

        os.dup2(fdtemp, 1)  # standard output (1)
        os.dup2(fdtemp, 2)  # standard error (2)
        
        # Now the fork/setsid/fork..
        try:
            pid = os.fork()
        except OSError, e:
            s = "%s [%d]" % (e.strerror, e.errno)
            logger.error(s)
            raise Exception, s