예제 #1
0
class SerieslyStore(object):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, collector):
        db_name = (collector or "") + cluster + (bucket or "") + (server or "")
        for char in "[]/\;.,><&*:%=+@!#^()|?^'\"":
            db_name = db_name.replace(char, "")
        return db_name

    @memoize
    def _get_db(self, db_name):
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("Creating new database: {}".format(db_name))
                self.seriesly.create_db(db_name)
            return self.seriesly[db_name]

    def append(self, data, cluster=None, server=None, bucket=None,
               collector=None):
        db_name = self.build_dbname(cluster, server, bucket, collector)
        db = self._get_db(db_name)
        db.append(data)
예제 #2
0
class SerieslyStore(object):
    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, collector):
        db_name = (collector or "") + cluster + (bucket or "") + (server or "")
        for char in "[]/\;.,><&*:%=+@!#^()|?^'\"":
            db_name = db_name.replace(char, "")
        return db_name

    @memoize
    def _get_db(self, db_name):
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("Creating new database: {}".format(db_name))
                self.seriesly.create_db(db_name)
            return self.seriesly[db_name]

    def append(self, data, cluster=None, server=None, bucket=None, collector=None):
        db_name = self.build_dbname(cluster, server, bucket, collector)
        db = self._get_db(db_name)
        db.append(data)
예제 #3
0
 def start_samplers(self):
     logger.info('Creating seriesly dbs')
     seriesly = Seriesly(host='{}'.format(self.test_config.gateload_settings.seriesly_host))
     for i, _ in enumerate(self.remote.gateways, start=1):
         seriesly.create_db('gateway_{}'.format(i))
         seriesly.create_db('gateload_{}'.format(i))
     self.remote.start_sampling()
예제 #4
0
def resource_monitor(interval=1):

    rest = create_rest()
    nodes = rest.node_statuses()
    atop_db = Seriesly(cfg.SERIESLY_IP, 3133)
 
    if "atop" in atop_db.list_dbs():
        atop_db = atop_db['atop']
    else:
        atop_db.create_db('atop')
        atop_db = atop_db['atop']
        
    for node in nodes:
        restart_atop(node.ip)

    while True:
        for node in nodes:

            # check if atop running (could be new node)
            if isinstance(node.ip, unicode):
                node.ip = str(node.ip)
            if check_atop_proc(node.ip):
                restart_atop(node.ip)

            # get stats from node
            sample = get_atop_sample(node.ip)

            update_node_stats(atop_db, sample, node.ip)
            
            time.sleep(interval)
예제 #5
0
 def start_samplers(self):
     logger.info('Creating seriesly dbs')
     seriesly = Seriesly(
         host='{}'.format(self.test_config.gateload_settings.seriesly_host))
     for i, _ in enumerate(self.remote.gateways, start=1):
         seriesly.create_db('gateway_{}'.format(i))
         seriesly.create_db('gateload_{}'.format(i))
     self.remote.start_sampling()
예제 #6
0
def multi_query(
    count,
    design_doc_name,
    view_name,
    params=None,
    bucket="default",
    password="",
    type_="view",
    batch_size=100,
    hosts=None,
):

    if params is not None:
        params = urllib2.urllib.urlencode(params)

    pool = eventlet.GreenPool(batch_size)

    api = "%s/_design/%s/_%s/%s?%s" % (bucket, design_doc_name, type_, view_name, params)

    qtime = data = url = None

    args = dict(api=api, hosts=hosts)
    for qtime, data, url in pool.imap(send_query, [args for i in xrange(count)]):
        pass

    if cfg.SERIESLY_IP != "" and qtime is not None:
        # store the most recent query response time 'qtime' into seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)

        db = None
        if "fast" in seriesly.list_dbs():
            db = "fast"
        else:
            bucketStatus = app.workload_manager.BucketStatus.from_cache(bucket) or app.workload_manager.BucketStatus(
                bucket
            )
            db = bucketStatus.latency_db
            if db not in seriesly.list_dbs():
                seriesly.create_db(db)

        if db is not None:
            seriesly[db].append({"query_latency": qtime})

    # log to logs/celery-query.log
    try:
        rc = data.read()[0:200]
    except Exception:
        rc = "exception reading query response"

    logger.error("\n")
    logger.error("url: %s" % url)
    logger.error("latency: %s" % qtime)
    logger.error("data: %s" % rc)
예제 #7
0
def multi_query(count,
                design_doc_name,
                view_name,
                params=None,
                bucket="default",
                password="",
                type_="view",
                batch_size=100,
                hosts=None):

    if params is not None:
        params = urllib2.urllib.urlencode(params)

    pool = eventlet.GreenPool(batch_size)

    api = '%s/_design/%s/_%s/%s?%s' % (bucket, design_doc_name, type_,
                                       view_name, params)

    qtime = data = url = None

    args = dict(api=api, hosts=hosts)
    for qtime, data, url in pool.imap(send_query,
                                      [args for i in xrange(count)]):
        pass

    if cfg.SERIESLY_IP != '' and qtime is not None:
        # store the most recent query response time 'qtime' into seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)

        db = None
        if 'fast' in seriesly.list_dbs():
            db = 'fast'
        else:
            bucketStatus = app.workload_manager.BucketStatus.from_cache(
                bucket) or app.workload_manager.BucketStatus(bucket)
            db = bucketStatus.latency_db
            if db not in seriesly.list_dbs():
                seriesly.create_db(db)

        if db is not None:
            seriesly[db].append({'query_latency': qtime})

    # log to logs/celery-query.log
    try:
        rc = data.read()[0:200]
    except Exception:
        rc = "exception reading query response"

    logger.error('\n')
    logger.error('url: %s' % url)
    logger.error('latency: %s' % qtime)
    logger.error('data: %s' % rc)
예제 #8
0
class SerieslyStore(object):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, index, collector):
        db_name = (collector or "") + cluster + (bucket or "") + (index or "") + (server or "")
        for char in "[]/\;.,><&*:%=+@!#^()|?^'\"":
            db_name = db_name.replace(char, "")
        return db_name

    @memoize
    def _get_db(self, db_name):
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("Creating a new database: {}".format(db_name))
                self.seriesly.create_db(db_name)
            return self.seriesly[db_name]

    def append(self, data, cluster=None, server=None, bucket=None, index=None,
               collector=None, timestamp=None):
        db_name = self.build_dbname(cluster, server, bucket, index, collector)
        db = self._get_db(db_name)
        try:
            db.append(data, timestamp=timestamp)
        except (BadRequest, socket.error):  # Ignore bad requests
            pass

    def drop_db(self, cluster=None, server=None, bucket=None, index=None, collector=None):
        db_name = self.build_dbname(cluster, server, bucket, index, collector)
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("DB not present: {}".format(db_name))
                return
            logger.info("Dropping DB: {}".format(db_name))
            self.seriesly.drop_db(db_name)
            return
예제 #9
0
class SerieslyStore(Store):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    def _build_dbname(self, cluster, server, bucket):
        db_name = cluster
        if bucket:
            db_name += bucket
        if server:
            db_name += server.replace(".", "")
        return db_name

    def append(self, data, cluster=None, server=None, bucket=None):
        db_name = self._build_dbname(cluster, server, bucket)
        if db_name not in self.seriesly.list_dbs():
            self.seriesly.create_db(db_name)
        self.seriesly[db_name].append(data)
예제 #10
0
class NsToSeriesly(object):
    def __init__(self, in_host, out_host, database):
        self.url = "http://{0}:8091/".format(in_host) + "pools/default/buckets/default/stats?zoom=minute"

        self.database = database
        self.seriesly = Seriesly(host=out_host)
        if database not in self.seriesly.list_dbs():
            self.seriesly.create_db(database)

    def collect(self):
        r = requests.get(self.url)

        all_stats = r.json["op"]["samples"]
        last_stats = dict((k, v[-1]) for k, v in all_stats.iteritems())

        self.store(last_stats)

    def store(self, data):
        self.seriesly[self.database].append(data)
예제 #11
0
class NsToSeriesly(object):

    def __init__(self, in_host, out_host, database):
        self.url = 'http://{0}:8091/'.format(in_host) + \
            'pools/default/buckets/default/stats?zoom=minute'

        self.database = database
        self.seriesly = Seriesly(host=out_host)
        if database not in self.seriesly.list_dbs():
            self.seriesly.create_db(database)

    def collect(self):
        r = requests.get(self.url)

        all_stats = r.json['op']['samples']
        last_stats = dict((k, v[-1]) for k, v in all_stats.iteritems())

        self.store(last_stats)

    def store(self, data):
        self.seriesly[self.database].append(data)
예제 #12
0
class SerieslyStore(Store):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, collector):
        if collector:
            db_name = collector + cluster
        else:
            db_name = cluster
        if bucket:
            db_name += bucket
        if server:
            db_name += server.replace(".", "")
        return db_name

    def append(self, data, cluster=None, server=None, bucket=None,
               collector=None):
        db_name = self.build_dbname(cluster, server, bucket, collector)
        if db_name not in self.seriesly.list_dbs():
            self.seriesly.create_db(db_name)
        self.seriesly[db_name].append(data)
예제 #13
0
def report_kv_latency(bucket="default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key

            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue),
                                               requeue=True)
                if len(keys) > 0:
                    get_key = str(keys['start'])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port,
                                               bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port,
                                               bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip,
                                                  port, bucket, password)

            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db = None
            if 'fast' in seriesly.list_dbs():
                db = 'fast'
            else:
                bucketStatus = BucketStatus.from_cache(bucket) or BucketStatus(
                    bucket)
                db = bucketStatus.latency_db
                if db not in seriesly.list_dbs():
                    seriesly.create_db(db)

            if db is not None:
                seriesly[db].append({
                    'set_latency': set_latency,
                    'get_latency': get_latency,
                    'delete_latency': delete_latency
                })
예제 #14
0
def report_kv_latency(bucket = "default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key


            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue), requeue = True)
                if len(keys) > 0:
                    get_key = str(keys['start'])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port, bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port, bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip, port, bucket, password)


            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db = None
            if 'fast' in seriesly.list_dbs():
                db='fast'
            else:
                bucketStatus = BucketStatus.from_cache(bucket) or BucketStatus(bucket)
                db = bucketStatus.latency_db
                if db not in seriesly.list_dbs():
                    seriesly.create_db(db)

            if db is not None:
                seriesly[db].append({'set_latency' : set_latency,
                                     'get_latency' : get_latency,
                                     'delete_latency' : delete_latency})
예제 #15
0
# when --purge set delete cc_queue's as well
# as seriesly db
if "--purge" in sys.argv:

    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        dbs = seriesly.list_dbs()
        for db in dbs:
            seriesly.drop_db(db)

        seriesly.create_db('event')

for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print("Cleanup Queue: %s" % q_)
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()

# start local consumer
exchange = cfg.CB_CLUSTER_TAG + "consumers"
RabbitHelper().exchange_declare(exchange, "fanout")
os.system("python consumer.py  &")
예제 #16
0
# when --purge set delete cc_queue's as well
# as seriesly db
if "--purge" in sys.argv:

    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        dbs = seriesly.list_dbs()
        for db in dbs:
            seriesly.drop_db(db)

        seriesly.create_db('event')



for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print "Cleanup Queue: %s" % q_
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()

# start sdk server
os.system("python sdkserver.py  &")