Пример #1
0
def generate_index_file(storage_folder, test_file):
    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()
    num_phases = len(phases_info.keys())
    run_id = phases_info[1]['desc']
    run_id = run_id.replace(" ", "_")
    run_id = run_id.replace(",", "_")
    content = ""

    json_data = open(test_file)
    tests = json.load(json_data)

    for i in range(num_phases)[1:]:
        sub_folder = storage_folder + "phase" + str(i) + "/"
        content += "<a style=\"font-family:arial;color:black;font-size:20px;\" href=\"%s\">%s</a><p>" % (
            "phase" + str(i), "phase" + str(i))
        if str(i) in tests["phases"]:
            content += json.dumps(
                tests["phases"][str(i)], indent=10, sort_keys=True) + "<p>"
        files = [
            f for f in os.listdir(sub_folder)
            if os.path.isfile(os.path.join(sub_folder, f))
        ]
        for f in files:
            content += "<a href=\"%s\">&nbsp;&nbsp;&nbsp;&nbsp;%s</a><p>" % (
                "phase" + str(i) + "/" + f, f)

    html_path = storage_folder + "index.html"
    file1 = open(html_path, 'w')
    file1.write(index_html % content)
Пример #2
0
def get_run_info(desc):
    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']

    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()

    run_info = ''
    #will take the first name/desc value; but we assume that their values are the same for all phases
    if desc == 'name':
        for phase in phases_info:
            if 'name' in phases_info[phase]:
                run_info = phases_info[phase]['name']
                break
    if desc == 'build':
        for phase in phases_info:
            if 'desc' in phases_info[phase]:
                run_info = phases_info[phase]['desc']
                break
    run_info = run_info.replace(" ", "_")
    run_info = run_info.replace(",", "_")

    return run_info
Пример #3
0
class SerieslyStore(object):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, collector):
        db_name = (collector or "") + cluster + (bucket or "") + (server or "")
        for char in "[]/\;.,><&*:%=+@!#^()|?^'\"":
            db_name = db_name.replace(char, "")
        return db_name

    @memoize
    def _get_db(self, db_name):
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("Creating new database: {}".format(db_name))
                self.seriesly.create_db(db_name)
            return self.seriesly[db_name]

    def append(self, data, cluster=None, server=None, bucket=None,
               collector=None):
        db_name = self.build_dbname(cluster, server, bucket, collector)
        db = self._get_db(db_name)
        db.append(data)
Пример #4
0
def main(db_name):
    # parse database name from cli arguments
    #db_name = parse_args()

    # initialize seriesly client
    db = Seriesly()[db_name]

    # get a set of all unique keys
    all_docs = db.get_all()
    all_keys = set(key for doc in all_docs.itervalues()
                       for key in doc.iterkeys())

    # plot all metrics to PNG images
    outdir = mkdtemp()
    for metric in all_keys:
        print metric
        if '/' not in metric:  # views and xdcr stuff
            keys, values = get_metric(db, metric)
            plot_metric(metric, keys, values, outdir)

    try:
        subprocess.call(['convert', '{0}/*'.format(outdir), 'report.pdf'])
        print "PDF report was successfully generated!"
    except OSError:
        print "All images saved to: {0}".format(outdir)
    return outdir
Пример #5
0
def resource_monitor(interval=1):

    rest = create_rest()
    nodes = rest.node_statuses()
    atop_db = Seriesly(cfg.SERIESLY_IP, 3133)
 
    if "atop" in atop_db.list_dbs():
        atop_db = atop_db['atop']
    else:
        atop_db.create_db('atop')
        atop_db = atop_db['atop']
        
    for node in nodes:
        restart_atop(node.ip)

    while True:
        for node in nodes:

            # check if atop running (could be new node)
            if isinstance(node.ip, unicode):
                node.ip = str(node.ip)
            if check_atop_proc(node.ip):
                restart_atop(node.ip)

            # get stats from node
            sample = get_atop_sample(node.ip)

            update_node_stats(atop_db, sample, node.ip)
            
            time.sleep(interval)
Пример #6
0
class SerieslyStore(object):
    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, collector):
        db_name = (collector or "") + cluster + (bucket or "") + (server or "")
        for char in "[]/\;.,><&*:%=+@!#^()|?^'\"":
            db_name = db_name.replace(char, "")
        return db_name

    @memoize
    def _get_db(self, db_name):
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("Creating new database: {}".format(db_name))
                self.seriesly.create_db(db_name)
            return self.seriesly[db_name]

    def append(self, data, cluster=None, server=None, bucket=None, collector=None):
        db_name = self.build_dbname(cluster, server, bucket, collector)
        db = self._get_db(db_name)
        db.append(data)
Пример #7
0
def generate_index_file(storage_folder, test_file):
    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()
    num_phases = len(phases_info.keys())
    run_id = phases_info[1]['desc']
    run_id = run_id.replace(" ", "_")
    run_id = run_id.replace(",", "_")
    content = ""

    json_data = open(test_file)
    tests = json.load(json_data)

    for i in range(num_phases)[1:]:
        sub_folder = storage_folder + "phase" + str(i) + "/"
        content += "<a style=\"font-family:arial;color:black;font-size:20px;\" href=\"%s\">%s</a><p>" % ("phase" + str(i) , "phase" + str(i))
        if str(i) in tests["phases"]:
            content += json.dumps(tests["phases"][str(i)], indent=10, sort_keys=True) + "<p>"
        files = [ f for f in os.listdir(sub_folder) if os.path.isfile(os.path.join(sub_folder, f))]
        for f in files:
            content += "<a href=\"%s\">&nbsp;&nbsp;&nbsp;&nbsp;%s</a><p>" % ("phase" + str(i) + "/" + f, f)

    html_path = storage_folder + "index.html"
    file1 = open(html_path, 'w')
    file1.write(index_html % content)
Пример #8
0
 def start_samplers(self):
     logger.info('Creating seriesly dbs')
     seriesly = Seriesly(host='{}'.format(self.test_config.gateload_settings.seriesly_host))
     for i, _ in enumerate(self.remote.gateways, start=1):
         seriesly.create_db('gateway_{}'.format(i))
         seriesly.create_db('gateload_{}'.format(i))
     self.remote.start_sampling()
Пример #9
0
def get_run_info(desc):
    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']

    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()

    run_info = ''
    #will take the first name/desc value; but we assume that their values are the same for all phases
    if desc == 'name':
        for phase in phases_info:
            if 'name' in phases_info[phase]:
                run_info = phases_info[phase]['name']
                break
    if desc == 'build':
        for phase in phases_info:
            if 'desc' in phases_info[phase]:
                run_info = phases_info[phase]['desc']
                break
    run_info = run_info.replace(" ", "_")
    run_info = run_info.replace(",", "_")

    return run_info
Пример #10
0
def main():
    # parse database name from cli arguments
    db_name = parse_args()

    # initialize seriesly client
    db = Seriesly()[db_name]

    # get a set of all unique keys
    all_docs = db.get_all()
    all_keys = set(key for doc in all_docs.itervalues()
                   for key in doc.iterkeys())

    # plot all metrics to PNG images
    outdir = mkdtemp()
    for metric in all_keys:
        print metric
        if '/' not in metric:  # views and xdcr stuff
            keys, values = get_metric(db, metric)
            plot_metric(metric, keys, values, outdir)

    try:
        subprocess.call(['convert', '{0}/*'.format(outdir), 'report.pdf'])
        print "PDF report was successfully generated!"
    except OSError:
        print "All images saved to: {0}".format(outdir)
Пример #11
0
    def __init__(self, in_host, out_host, database):
        self.url = 'http://{0}:8091/'.format(in_host) + \
            'pools/default/buckets/default/stats?zoom=minute'

        self.database = database
        self.seriesly = Seriesly(host=out_host)
        if database not in self.seriesly.list_dbs():
            self.seriesly.create_db(database)
Пример #12
0
def plot_all_phases(cluster_name, buckets):

    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']

    # Get system test phase info and plot phase by phase
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.values():
        phases_info[int(list(doc.keys())[0])] = list(doc.values())[0]
    list(phases_info.keys()).sort()

    num_phases = len(list(phases_info.keys()))

    run_id = store_report.get_run_info('name')

    if not os.path.exists("%s" % run_id):
        os.makedirs("%s" % run_id)
    else:
        shutil.rmtree("%s" % run_id)
        os.makedirs("%s" % run_id)

    for i in list(phases_info.keys()):
        start_time = phases_info[i][[
            name for name in list(phases_info[i].keys())
            if (name != 'name' and name != 'desc')
        ][0]]
        start_time = int(start_time[:10])
        end_time = 0
        if i == list(phases_info.keys())[-1]:
            end_time = str(time.time())
            end_time = int(end_time[:10])
        else:
            end_time = phases_info[i + 1][[
                name for name in list(phases_info[i + 1].keys())
                if (name != 'name' and name != 'desc')
            ][0]]
            end_time = int(end_time[:10])

        start_time_snapshot = datetime.datetime.fromtimestamp(
            start_time).strftime('%m/%d/%Y %H:%M')
        end_time_snapshot = datetime.datetime.fromtimestamp(end_time).strftime(
            '%m/%d/%Y %H:%M')

        snapshot_name = "phase-%d-%s" % (i, [
            name for name in list(phases_info[i].keys())
            if (name != 'name' and name != 'desc')
        ][0])

        plot_use_cbmonitor(snapshot_name, cluster_name, start_time_snapshot,
                           end_time_snapshot)

        prepare_folder_report(run_id, i)

        store_90th_avg_value(buckets, start_time, end_time, run_id, i)

    storage_folder = os.getcwd() + "/" + run_id + "/"
    print("data stored in %s" % (storage_folder))
    return storage_folder
Пример #13
0
def plot_all_phases(db_name, host_ip, bucket_name):
    # initialize seriesly client
    db = Seriesly()[db_name]
    db_event = Seriesly()['event']

    # plot all metrics to PNG images
    outdir = mkdtemp()
    
    # get a set of all unique keys based on time range
    all_docs = db.get_all()
    all_keys = set(key for doc in all_docs.itervalues()
                    for key in doc.iterkeys())

    # get system test phase info and plot phase by phase
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()

    phases = []
    for v in phases_info.itervalues():
        phases.append(v)
    num_phases = len(phases)
    run_id = ''

    for i in range(num_phases):
        if i == 0:
            run_id = phases[i]['run_id']

        start_time = phases[i].values()[0]
        start_time = int(start_time[:10])
        end_time = 0
        if i == num_phases-1:
            end_time = str(time.time())
            end_time = int(end_time[:10])
        else:
            end_time = phases[i+1].values()[0]
            end_time = int(end_time[:10])

        for metric in all_keys:
            #print metric
            if '/' not in metric:  # views and xdcr stuff
                query = get_query(metric, host_ip, bucket_name, start_time, end_time)
                if len(query) > 0:
                    plot_metric(db, metric, query, outdir, i,  phases[i].keys()[0])

#                try:
#                    subprocess.call(['convert', '{0}/*'.format(outdir), 'report.pdf'])
#                    print "PDF report was successfully generated!"
#                except OSError:
    plot_metric_single_value("average", outdir, num_phases)
    plot_metric_single_value("90th", outdir, num_phases)
    plot_metric_single_value("absolute_time", outdir, num_phases)

    print "All images saved to: {0}".format(outdir)
    return outdir, run_id
Пример #14
0
 def __init__(self, host, dbslow, dbfast):
     self.slow = {}
     self.fast = {}
     self.dbslow = dbslow
     self.dbfast = dbfast
     self.seriesly = Seriesly(host=host)
     try:
         dbs = self.seriesly.list_dbs()
     except seriesly.exceptions.ConnectionError, e:
         logging.error("unable to connect to seriesly server: %s" % e)
         return
Пример #15
0
def multi_query(
    count,
    design_doc_name,
    view_name,
    params=None,
    bucket="default",
    password="",
    type_="view",
    batch_size=100,
    hosts=None,
):

    if params is not None:
        params = urllib2.urllib.urlencode(params)

    pool = eventlet.GreenPool(batch_size)

    api = "%s/_design/%s/_%s/%s?%s" % (bucket, design_doc_name, type_, view_name, params)

    qtime = data = url = None

    args = dict(api=api, hosts=hosts)
    for qtime, data, url in pool.imap(send_query, [args for i in xrange(count)]):
        pass

    if cfg.SERIESLY_IP != "" and qtime is not None:
        # store the most recent query response time 'qtime' into seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)

        db = None
        if "fast" in seriesly.list_dbs():
            db = "fast"
        else:
            bucketStatus = app.workload_manager.BucketStatus.from_cache(bucket) or app.workload_manager.BucketStatus(
                bucket
            )
            db = bucketStatus.latency_db
            if db not in seriesly.list_dbs():
                seriesly.create_db(db)

        if db is not None:
            seriesly[db].append({"query_latency": qtime})

    # log to logs/celery-query.log
    try:
        rc = data.read()[0:200]
    except Exception:
        rc = "exception reading query response"

    logger.error("\n")
    logger.error("url: %s" % url)
    logger.error("latency: %s" % qtime)
    logger.error("data: %s" % rc)
Пример #16
0
def plot_all_phases(db_name, host_ip, bucket_name):
    # initialize seriesly client
    db = Seriesly()[db_name]
    db_event = Seriesly()['event']

    # plot all metrics to PNG images
    outdir = mkdtemp()
    
    # get a set of all unique keys based on time range
    all_docs = db.get_all()
    all_keys = set(key for doc in all_docs.itervalues()
                    for key in doc.iterkeys())

    # get system test phase info and plot phase by phase
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()

    phases = []
    for v in phases_info.itervalues():
        phases.append(v)
    num_phases = len(phases)
    run_id = ''

    for i in range(num_phases)[1:]:
        if i == 1:
            run_id = phases[i]['run_id']

        start_time = phases[i].values()[0]
        start_time = int(start_time[:10])
        end_time = 0
        if i == num_phases-1:
            end_time = str(time.time())
            end_time = int(end_time[:10])
        else:
            end_time = phases[i+1].values()[0]
            end_time = int(end_time[:10])

        for metric in all_keys:
            #print metric
            if '/' not in metric:  # views and xdcr stuff
                query = get_query(metric, host_ip, bucket_name, start_time, end_time)
                if len(query) > 0:
                    plot_metric(db, metric, query, outdir, i,  phases[i].keys()[0])

    for key in TABLE.keys():
        plot_metric_single_value(key, outdir, num_phases)

    print "All images saved to: {0}".format(outdir)
    return outdir, run_id
Пример #17
0
def multi_query(count, design_doc_name, view_name, params = None, bucket = "default", password = "", type_ = "view", batch_size = 100, hosts = None):

    if params is not None:
        params = urllib2.urllib.urlencode(params)

    pool = eventlet.GreenPool(batch_size)

    api = '%s/_design/%s/_%s/%s?%s' % (bucket,
                                       design_doc_name, type_,
                                       view_name, params)

    qtime = data = url = None

    args = dict(api=api, hosts=hosts)
    for qtime, data, url in pool.imap(send_query, [args for i in xrange(count)]):
        pass

    if cfg.SERIESLY_IP != '' and qtime is not None:
        # store the most recent query response time 'qtime' into seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        #TODO: do not hardcode fast...we should have per/testdbs
        db='fast'
        seriesly[db].append({'query_latency' : qtime})

    # log to logs/celery-query.log
    try:
        rc = data.read()[0:200]
    except Exception:
        rc = "exception reading query response"

    logger.error('\n')
    logger.error('url: %s' % url)
    logger.error('latency: %s' % qtime)
    logger.error('data: %s' % rc)
Пример #18
0
    def __init__(self, in_host, out_host, database):
        self.url = "http://{0}:8091/".format(in_host) + "pools/default/buckets/default/stats?zoom=minute"

        self.database = database
        self.seriesly = Seriesly(host=out_host)
        if database not in self.seriesly.list_dbs():
            self.seriesly.create_db(database)
Пример #19
0
 def __init__(self, test):
     self.seriesly = Seriesly(CBMONITOR_HOST)
     self.test_config = test.test_config
     self.metric_title = test.test_config.test_case.metric_title
     self.cluster_spec = test.cluster_spec
     self.cluster_names = test.cbagent.clusters.keys()
     self.build = test.build
     self.master_node = test.master_node
Пример #20
0
def plot_all_phases(cluster_name, buckets):

    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']

    # Get system test phase info and plot phase by phase
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()

    num_phases = len(phases_info.keys())

    run_id = store_report.get_run_info('name')

    if not os.path.exists("%s" % run_id):
        os.makedirs("%s" % run_id)
    else:
        shutil.rmtree("%s" % run_id)
        os.makedirs("%s" % run_id)

    for i in phases_info.keys():
        start_time = phases_info[i][[name for name in phases_info[i].keys() if (name != 'name' and name != 'desc')][0]]
        start_time = int(start_time[:10])
        end_time = 0
        if i == phases_info.keys()[-1]:
            end_time = str(time.time())
            end_time = int(end_time[:10])
        else:
            end_time = phases_info[i + 1][[name for name in phases_info[i + 1].keys() if (name != 'name' and name != 'desc')][0]]
            end_time = int(end_time[:10])

        start_time_snapshot = datetime.datetime.fromtimestamp(start_time).strftime('%m/%d/%Y %H:%M')
        end_time_snapshot = datetime.datetime.fromtimestamp(end_time).strftime('%m/%d/%Y %H:%M')

        snapshot_name = "phase-%d-%s" % (i, [name for name in phases_info[i].keys() if (name != 'name' and name != 'desc')][0])

        plot_use_cbmonitor(snapshot_name, cluster_name, start_time_snapshot, end_time_snapshot)

        prepare_folder_report(run_id, i)

        store_90th_avg_value(buckets, start_time, end_time, run_id, i)

    storage_folder = os.getcwd() + "/" + run_id + "/"
    print "data stored in %s" % (storage_folder)
    return storage_folder
Пример #21
0
 def __init__(self, test):
     self.seriesly = Seriesly(SERIESLY['host'])
     self.test_config = test.test_config
     self.metric_title = test.test_config.test_case.metric_title
     self.cluster_spec = test.cluster_spec
     self.cluster_names = test.cbagent.clusters.keys()
     self.build = test.build
     self.master_node = test.master_node
Пример #22
0
class SerieslyStore(Store):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    def _build_dbname(self, cluster, server, bucket):
        db_name = cluster
        if bucket:
            db_name += bucket
        if server:
            db_name += server.replace(".", "")
        return db_name

    def append(self, data, cluster=None, server=None, bucket=None):
        db_name = self._build_dbname(cluster, server, bucket)
        if db_name not in self.seriesly.list_dbs():
            self.seriesly.create_db(db_name)
        self.seriesly[db_name].append(data)
Пример #23
0
def add_phase_to_db(phase, phase_key, name, desc):

    if cfg.SERIESLY_IP != '':
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        seriesly.event.append({
            str(phase_key): {
                str(phase['name']): str(time.time()),
                'run_id': name + '-' + desc
            }
        })
Пример #24
0
class NsToSeriesly(object):
    def __init__(self, in_host, out_host, database):
        self.url = "http://{0}:8091/".format(in_host) + "pools/default/buckets/default/stats?zoom=minute"

        self.database = database
        self.seriesly = Seriesly(host=out_host)
        if database not in self.seriesly.list_dbs():
            self.seriesly.create_db(database)

    def collect(self):
        r = requests.get(self.url)

        all_stats = r.json["op"]["samples"]
        last_stats = dict((k, v[-1]) for k, v in all_stats.iteritems())

        self.store(last_stats)

    def store(self, data):
        self.seriesly[self.database].append(data)
Пример #25
0
 def __init__(self, host, dbslow, dbfast):
     self.slow = {}
     self.fast = {}
     self.dbslow = dbslow
     self.dbfast = dbfast
     self.seriesly = Seriesly(host=host)
     try:
         dbs = self.seriesly.list_dbs()
     except seriesly.exceptions.ConnectionError, e:
         logging.error("unable to connect to seriesly server: %s" % e)
         return
Пример #26
0
    def __init__(self):
        self.db = Seriesly()

        self.fig = figure()
        self.fig.set_size_inches(4.66, 2.625)

        self.urls = list()
        self.images = list()

        self.eventlet_pool = GreenPool()
        self.mp_pool = Pool(cpu_count())
Пример #27
0
class NsToSeriesly(object):

    def __init__(self, in_host, out_host, database):
        self.url = 'http://{0}:8091/'.format(in_host) + \
            'pools/default/buckets/default/stats?zoom=minute'

        self.database = database
        self.seriesly = Seriesly(host=out_host)
        if database not in self.seriesly.list_dbs():
            self.seriesly.create_db(database)

    def collect(self):
        r = requests.get(self.url)

        all_stats = r.json['op']['samples']
        last_stats = dict((k, v[-1]) for k, v in all_stats.iteritems())

        self.store(last_stats)

    def store(self, data):
        self.seriesly[self.database].append(data)
Пример #28
0
 def start_samplers(self):
     logger.info('Creating seriesly dbs')
     seriesly = Seriesly(
         host='{}'.format(self.test_config.gateload_settings.seriesly_host))
     for i, _ in enumerate(self.remote.gateways, start=1):
         seriesly.create_db('gateway_{}'.format(i))
         seriesly.create_db('gateload_{}'.format(i))
     self.remote.start_sampling()
Пример #29
0
class SerieslyStore(Store):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, collector):
        if collector:
            db_name = collector + cluster
        else:
            db_name = cluster
        if bucket:
            db_name += bucket
        if server:
            db_name += server.replace(".", "")
        return db_name

    def append(self, data, cluster=None, server=None, bucket=None,
               collector=None):
        db_name = self.build_dbname(cluster, server, bucket, collector)
        if db_name not in self.seriesly.list_dbs():
            self.seriesly.create_db(db_name)
        self.seriesly[db_name].append(data)
Пример #30
0
class SerieslyStore(object):
    def __init__(self, host, dbslow, dbfast):
        self.slow = {}
        self.fast = {}
        self.dbslow = dbslow
        self.dbfast = dbfast
        self.seriesly = Seriesly(host=host)
        try:
            dbs = self.seriesly.list_dbs()
        except seriesly.exceptions.ConnectionError, e:
            logging.error("unable to connect to seriesly server: %s" % e)
            return

        if dbslow not in dbs:
            self.seriesly.create_db(dbslow)
        if dbfast not in dbs:
            self.seriesly.create_db(dbfast)
Пример #31
0
class SerieslyStore(object):
    def __init__(self, host, dbslow, dbfast):
        self.slow = {}
        self.fast = {}
        self.dbslow = dbslow
        self.dbfast = dbfast
        self.seriesly = Seriesly(host=host)
        try:
            dbs = self.seriesly.list_dbs()
        except seriesly.exceptions.ConnectionError, e:
            logging.error("unable to connect to seriesly server: %s" % e)
            return

        if dbslow not in dbs:
            self.seriesly.create_db(dbslow)
        if dbfast not in dbs:
            self.seriesly.create_db(dbfast)
Пример #32
0
def multi_query(count,
                design_doc_name,
                view_name,
                params=None,
                bucket="default",
                password="",
                type_="view",
                batch_size=100,
                hosts=None):

    if params is not None:
        params = urllib2.urllib.urlencode(params)

    pool = eventlet.GreenPool(batch_size)

    api = '%s/_design/%s/_%s/%s?%s' % (bucket, design_doc_name, type_,
                                       view_name, params)

    qtime = data = url = None

    args = dict(api=api, hosts=hosts)
    for qtime, data, url in pool.imap(send_query,
                                      [args for i in xrange(count)]):
        pass

    if cfg.SERIESLY_IP != '' and qtime is not None:
        # store the most recent query response time 'qtime' into seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)

        db = None
        if 'fast' in seriesly.list_dbs():
            db = 'fast'
        else:
            bucketStatus = app.workload_manager.BucketStatus.from_cache(
                bucket) or app.workload_manager.BucketStatus(bucket)
            db = bucketStatus.latency_db
            if db not in seriesly.list_dbs():
                seriesly.create_db(db)

        if db is not None:
            seriesly[db].append({'query_latency': qtime})

    # log to logs/celery-query.log
    try:
        rc = data.read()[0:200]
    except Exception:
        rc = "exception reading query response"

    logger.error('\n')
    logger.error('url: %s' % url)
    logger.error('latency: %s' % qtime)
    logger.error('data: %s' % rc)
Пример #33
0
class SerieslyStore(object):

    def __init__(self, host):
        self.seriesly = Seriesly(host)

    @staticmethod
    def build_dbname(cluster, server, bucket, index, collector):
        db_name = (collector or "") + cluster + (bucket or "") + (index or "") + (server or "")
        for char in "[]/\;.,><&*:%=+@!#^()|?^'\"":
            db_name = db_name.replace(char, "")
        return db_name

    @memoize
    def _get_db(self, db_name):
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("Creating a new database: {}".format(db_name))
                self.seriesly.create_db(db_name)
            return self.seriesly[db_name]

    def append(self, data, cluster=None, server=None, bucket=None, index=None,
               collector=None, timestamp=None):
        db_name = self.build_dbname(cluster, server, bucket, index, collector)
        db = self._get_db(db_name)
        try:
            db.append(data, timestamp=timestamp)
        except (BadRequest, socket.error):  # Ignore bad requests
            pass

    def drop_db(self, cluster=None, server=None, bucket=None, index=None, collector=None):
        db_name = self.build_dbname(cluster, server, bucket, index, collector)
        try:
            existing_dbs = self.seriesly.list_dbs()
        except ConnectionError as e:
            logger.interrupt("seriesly not available: {}".format(e))
        else:
            if db_name not in existing_dbs:
                logger.info("DB not present: {}".format(db_name))
                return
            logger.info("Dropping DB: {}".format(db_name))
            self.seriesly.drop_db(db_name)
            return
Пример #34
0
from seriesly import Seriesly
from optparse import OptionParser
import sys
sys.path.append(".")
import testcfg as cfg
import datetime
import pandas as pd
import pygal
import os
import shutil

conn = Seriesly(cfg.SERIESLY_IP, 3133)
"""
" retrieve timeseries data from seriesly
"""


def getDBData(db):
    db = conn[db]
    data = db.get_all()
    return (data, None)[len(data) == 0]


"""
" sort data by its timestamp keys
"""


def sortDBData(data):

    sorted_data = []
Пример #35
0
    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        os.system("curl -X DELETE http://{0}:3133/fast".format(
            cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/slow".format(
            cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/event".format(
            cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/atop".format(
            cfg.SERIESLY_IP))

        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        seriesly.create_db('event')

for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print "Cleanup Queue: %s" % q_
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()

# start sdk server
os.system("python sdkserver.py  &")
Пример #36
0
def store_90th_avg_value(buckets, start_time, end_time, run_id, i):
    ips = get_cluster_ips()
    ns_server_stats = None
    atop_stats = None
    latency_stats = ['set_latency', 'get_latency', 'delete_latency', 'query_latency']
    dict_90th = {}
    dict_avg = {}

    dict_90th['ns_server'] = {}
    dict_avg['ns_server'] = {}
    time.sleep(2)
    connection = Seriesly(cfg.SERIESLY_IP, 3133)
    for bucket in buckets:
        toolbar_width = 41
        sys.stdout.write("[%s] indicator\n" % ("*" * toolbar_width))
        sys.stdout.flush()
        dict_90th['ns_server'][bucket] = {}
        dict_avg['ns_server'][bucket] = {}

        #empty ip string appended with bucket name is the ns_server db name for entire cluster
        cluster_ips = ips
        cluster_ips.append('')
        for ip in cluster_ips:
            ns_server_db = "ns_serverdefault" + bucket + ip
            if ":" in ns_server_db:
                ns_server_db = ns_server_db[0:ns_server_db.find(":")]
            if ip == '':
                ip = 'cluster'
            dict_90th['ns_server'][bucket][ip] = {}
            dict_avg['ns_server'][bucket][ip] = {}
            if  ns_server_db not in connection.list_dbs():
                print "db %s was not found" % (ns_server_db)
                continue
            db = connection[ns_server_db]
            if ns_server_stats is None:
                ns_server_stats = db.get_all().values()[0].keys()
            print "Store ns server stats for bucket %s on %s" % (bucket, ip)
            sys.stdout.write("[")
            num = 1
            for metric in ns_server_stats:
                dict_90th['ns_server'][bucket][ip][metric] = store_90th_value(db, metric, start_time, end_time)
                dict_avg['ns_server'][bucket][ip][metric] = store_avg_value(db, metric, start_time, end_time)
                if num % (len(ns_server_stats) / toolbar_width) == 0:
                    sys.stdout.write("=")
                    sys.stdout.flush()
                    time.sleep(0.5)
                num += 1
            sys.stdout.write("]\n")


    dict_90th['atop'] = {}
    dict_avg['atop'] = {}
    for ip in ips:
        atop_db = "atopdefault" + ip
        dict_90th['atop'][ip] = {}
        dict_avg['atop'][ip] = {}
        if ":" in atop_db:
           atop_db = atop_db[0:atop_db.find(":")]
        if  atop_db not in connection.list_dbs():
            print "db %s was not found" % (atop_db)
            continue
        db = connection[atop_db]
        if atop_stats is None:
            atop_stats = db.get_all().values()[0].keys()
        print "Store atop stats for node %s" % (ip)
        for metric in atop_stats:
            dict_90th['atop'][ip][metric] = store_90th_value(db, metric, start_time, end_time)
            dict_avg['atop'][ip][metric] = store_avg_value(db, metric, start_time, end_time)

    dict_90th['latency'] = {}
    dict_avg['latency'] = {}
    for bucket in buckets:
        dict_90th['latency'][bucket] = {}
        dict_avg['latency'][bucket] = {}
        latency_db = "%slatency" % bucket
        if latency_db not in connection.list_dbs():
            print "db %s was not found" % (latency_db)
            continue
        db = connection[latency_db]
        print "Store latency stats for bucket %s" % (bucket)

        for metric in latency_stats:
            dict_90th['latency'][bucket][metric] = store_90th_value(db, metric, start_time, end_time)
            dict_avg['latency'][bucket][metric] = store_avg_value(db, metric, start_time, end_time)


    os.system('rm -f %s/phase%d/*.txt' % (run_id, i))
    json.dump(dict_90th, open("%s/phase%d/90percentile.txt" % (run_id, i), 'w'))
    json.dump(dict_avg, open("%s/phase%d/average.txt" % (run_id, i), 'w'))
    del dict_90th
    del dict_avg
Пример #37
0
# when --purge set delete cc_queue's as well
# as seriesly db
if "--purge" in sys.argv:

    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        os.system("curl -X DELETE http://{0}:3133/fast".format(cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/slow".format(cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/event".format(cfg.SERIESLY_IP))
        os.system("curl -X DELETE http://{0}:3133/atop".format(cfg.SERIESLY_IP))

        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        seriesly.create_db('event')



for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print "Cleanup Queue: %s" % q_
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()

# start sdk server
Пример #38
0
    os.system("ps aux | grep %s | awk '{print $2}' | xargs kill" % proc)


# delete queues (note using --purge will remove cc_queues)
queues = CacheHelper.task_queues() + CacheHelper.miss_queues()

# when --purge set delete cc_queue's as well
# as seriesly db
if "--purge" in sys.argv:

    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        dbs = seriesly.list_dbs()
        for db in dbs:
            seriesly.drop_db(db)

        seriesly.create_db('event')



for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print "Cleanup Queue: %s" % q_
    except Exception as ex:
        pass
Пример #39
0
        return val


metrics = [
    "secondary_scanlatency20M_fdb_nyx",
    "secondary_scanlatency20M_multiple_fdb_nyx",
    "secondary_scanlatency_rebalance20M_fdb_nyx",
    "secondary_scanlatency_stalefalse_20M_fdb_nyx",
    "secondary_doc_indexing_latency_20M_moi_80th_nyx_query",
    "secondary_scanlatency20M_moi_nyx",
    "secondary_scanlatency20M_multiple_moi_nyx",
    "secondary_scanlatency_rebalance20M_moi_nyx",
    "secondary_scanlatency_stalefalse_20M_100Kops_moi_nyx",
]

s = Seriesly(host='cbmonitor.sc.couchbase.com')

b = Bucket("couchbase://cbmonitor.sc.couchbase.com/benchmarks",
           password="******")

for metric in metrics:
    print "********* Metric: " + metric
    q = N1QLQuery(
        'SELECT id,snapshots FROM benchmarks WHERE metric = "{}";'.format(
            metric))

    for row in b.n1ql_query(q):
        doc_id = row['id']
        snapshot = row['snapshots'][0]

        if len(row['snapshots']) > 1:
Пример #40
0
def report_kv_latency(bucket = "default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key


            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue), requeue = True)
                if len(keys) > 0:
                    get_key = str(keys['start'])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port, bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port, bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip, port, bucket, password)


            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db = None
            if 'fast' in seriesly.list_dbs():
                db='fast'
            else:
                bucketStatus = BucketStatus.from_cache(bucket) or BucketStatus(bucket)
                db = bucketStatus.latency_db
                if db not in seriesly.list_dbs():
                    seriesly.create_db(db)

            if db is not None:
                seriesly[db].append({'set_latency' : set_latency,
                                     'get_latency' : get_latency,
                                     'delete_latency' : delete_latency})
Пример #41
0
 def __init__(self, host):
     self.seriesly = Seriesly(host)
Пример #42
0
def store_90th_avg_value(buckets, start_time, end_time, run_id, i):
    ips = get_cluster_ips()
    ns_server_stats = None
    atop_stats = None
    latency_stats = [
        'set_latency', 'get_latency', 'delete_latency', 'query_latency'
    ]
    dict_90th = {}
    dict_avg = {}

    dict_90th['ns_server'] = {}
    dict_avg['ns_server'] = {}
    time.sleep(2)
    connection = Seriesly(cfg.SERIESLY_IP, 3133)
    for bucket in buckets:
        toolbar_width = 41
        sys.stdout.write("[%s] indicator\n" % ("*" * toolbar_width))
        sys.stdout.flush()
        dict_90th['ns_server'][bucket] = {}
        dict_avg['ns_server'][bucket] = {}

        #empty ip string appended with bucket name is the ns_server db name for entire cluster
        cluster_ips = ips
        cluster_ips.append('')
        for ip in cluster_ips:
            ns_server_db = "ns_serverdefault" + bucket + ip
            if ":" in ns_server_db:
                ns_server_db = ns_server_db[0:ns_server_db.find(":")]
            if ip == '':
                ip = 'cluster'
            dict_90th['ns_server'][bucket][ip] = {}
            dict_avg['ns_server'][bucket][ip] = {}
            if ns_server_db not in connection.list_dbs():
                print("db %s was not found" % (ns_server_db))
                continue
            db = connection[ns_server_db]
            if ns_server_stats is None:
                ns_server_stats = list(db.get_all().values())[0].keys()
            print("Store ns server stats for bucket %s on %s" % (bucket, ip))
            sys.stdout.write("[")
            num = 1
            for metric in ns_server_stats:
                dict_90th['ns_server'][bucket][ip][metric] = store_90th_value(
                    db, metric, start_time, end_time)
                dict_avg['ns_server'][bucket][ip][metric] = store_avg_value(
                    db, metric, start_time, end_time)
                if num % (len(ns_server_stats) // toolbar_width) == 0:
                    sys.stdout.write("=")
                    sys.stdout.flush()
                    time.sleep(0.5)
                num += 1
            sys.stdout.write("]\n")

    dict_90th['atop'] = {}
    dict_avg['atop'] = {}
    for ip in ips:
        atop_db = "atopdefault" + ip
        dict_90th['atop'][ip] = {}
        dict_avg['atop'][ip] = {}
        if ":" in atop_db:
            atop_db = atop_db[0:atop_db.find(":")]
        if atop_db not in connection.list_dbs():
            print("db %s was not found" % (atop_db))
            continue
        db = connection[atop_db]
        if atop_stats is None:
            atop_stats = list(db.get_all().values())[0].keys()
        print("Store atop stats for node %s" % (ip))
        for metric in atop_stats:
            dict_90th['atop'][ip][metric] = store_90th_value(
                db, metric, start_time, end_time)
            dict_avg['atop'][ip][metric] = store_avg_value(
                db, metric, start_time, end_time)

    dict_90th['latency'] = {}
    dict_avg['latency'] = {}
    for bucket in buckets:
        dict_90th['latency'][bucket] = {}
        dict_avg['latency'][bucket] = {}
        latency_db = "%slatency" % bucket
        if latency_db not in connection.list_dbs():
            print("db %s was not found" % (latency_db))
            continue
        db = connection[latency_db]
        print("Store latency stats for bucket %s" % (bucket))

        for metric in latency_stats:
            dict_90th['latency'][bucket][metric] = store_90th_value(
                db, metric, start_time, end_time)
            dict_avg['latency'][bucket][metric] = store_avg_value(
                db, metric, start_time, end_time)

    os.system('rm -f %s/phase%d/*.txt' % (run_id, i))
    json.dump(dict_90th, open("%s/phase%d/90percentile.txt" % (run_id, i),
                              'w'))
    json.dump(dict_avg, open("%s/phase%d/average.txt" % (run_id, i), 'w'))
    del dict_90th
    del dict_avg
Пример #43
0
 def __init__(self, *args, **kwargs):
     super(SgwMetricHelper, self).__init__(*args, **kwargs)
     self.seriesly = Seriesly(
         self.test_config.gateload_settings.seriesly_host)
Пример #44
0
from seriesly import Seriesly

db = Seriesly()["fast"]
doc = db.get_all()
time = {}
for k, v in doc.iteritems():
    if "mc-curr_items" in v.keys() and "mc-host" in v.keys():
        time[k] = {"item": v["mc-curr_items"], "ip": v["mc-host"]}
    else:
        time[k] = {"item": "No items"}

for k, v in sorted(time.iteritems()):
    print k, v
Пример #45
0
def report_kv_latency(bucket="default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key

            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue),
                                               requeue=True)
                if len(keys) > 0:
                    get_key = str(keys['start'])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port,
                                               bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port,
                                               bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip,
                                                  port, bucket, password)

            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db = None
            if 'fast' in seriesly.list_dbs():
                db = 'fast'
            else:
                bucketStatus = BucketStatus.from_cache(bucket) or BucketStatus(
                    bucket)
                db = bucketStatus.latency_db
                if db not in seriesly.list_dbs():
                    seriesly.create_db(db)

            if db is not None:
                seriesly[db].append({
                    'set_latency': set_latency,
                    'get_latency': get_latency,
                    'delete_latency': delete_latency
                })
Пример #46
0
for proc in kill_procs:
    os.system("ps aux | grep %s | awk '{print $2}' | xargs kill" % proc)

# delete queues (note using --purge will remove cc_queues)
queues = CacheHelper.task_queues() + CacheHelper.miss_queues()

# when --purge set delete cc_queue's as well
# as seriesly db
if "--purge" in sys.argv:

    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        dbs = seriesly.list_dbs()
        for db in dbs:
            seriesly.drop_db(db)

        seriesly.create_db('event')

for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print("Cleanup Queue: %s" % q_)
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()
Пример #47
0
def main():
    seriesly = Seriesly()
    all_dbs = seriesly.list_dbs()
    for i, db in enumerate(all_dbs, start=1):
        logger.info("{}/{}: {}".format(i, len(all_dbs), db.strip()))
        seriesly[db.strip()].compact()
Пример #48
0
def init_client():
    world.client = Seriesly(host=world.config.get('database', 'host'),
                            port=world.config.get('database', 'port'))
Пример #49
0
 def __init__(self, host):
     self.seriesly = Seriesly(host)
Пример #50
0
def main():
    s = Seriesly(StatsSettings.SERIESLY['host'])
    for db in s.list_dbs():
        logger.info('Compacting {}'.format(db))
        result = s[db].compact()
        logger.info('Compaction finished: {}'.format(result))