Exemplo n.º 1
0
def multi_query(count, design_doc_name, view_name, params = None, bucket = "default", password = "", type_ = "view", batch_size = 100, hosts = None):

    if params is not None:
        params = urllib2.urllib.urlencode(params)

    pool = eventlet.GreenPool(batch_size)

    api = '%s/_design/%s/_%s/%s?%s' % (bucket,
                                       design_doc_name, type_,
                                       view_name, params)

    qtime = data = url = None

    args = dict(api=api, hosts=hosts)
    for qtime, data, url in pool.imap(send_query, [args for i in xrange(count)]):
        pass

    if cfg.SERIESLY_IP != '' and qtime is not None:
        # store the most recent query response time 'qtime' into seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        #TODO: do not hardcode fast...we should have per/testdbs
        db='fast'
        seriesly[db].append({'query_latency' : qtime})

    # log to logs/celery-query.log
    try:
        rc = data.read()[0:200]
    except Exception:
        rc = "exception reading query response"

    logger.error('\n')
    logger.error('url: %s' % url)
    logger.error('latency: %s' % qtime)
    logger.error('data: %s' % rc)
Exemplo n.º 2
0
def generate_index_file(storage_folder, test_file):
    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()
    num_phases = len(phases_info.keys())
    run_id = phases_info[1]['desc']
    run_id = run_id.replace(" ", "_")
    run_id = run_id.replace(",", "_")
    content = ""

    json_data = open(test_file)
    tests = json.load(json_data)

    for i in range(num_phases)[1:]:
        sub_folder = storage_folder + "phase" + str(i) + "/"
        content += "<a style=\"font-family:arial;color:black;font-size:20px;\" href=\"%s\">%s</a><p>" % (
            "phase" + str(i), "phase" + str(i))
        if str(i) in tests["phases"]:
            content += json.dumps(
                tests["phases"][str(i)], indent=10, sort_keys=True) + "<p>"
        files = [
            f for f in os.listdir(sub_folder)
            if os.path.isfile(os.path.join(sub_folder, f))
        ]
        for f in files:
            content += "<a href=\"%s\">&nbsp;&nbsp;&nbsp;&nbsp;%s</a><p>" % (
                "phase" + str(i) + "/" + f, f)

    html_path = storage_folder + "index.html"
    file1 = open(html_path, 'w')
    file1.write(index_html % content)
Exemplo n.º 3
0
def main():
    # parse database name from cli arguments
    db_name = parse_args()

    # initialize seriesly client
    db = Seriesly()[db_name]

    # get a set of all unique keys
    all_docs = db.get_all()
    all_keys = set(key for doc in all_docs.itervalues()
                   for key in doc.iterkeys())

    # plot all metrics to PNG images
    outdir = mkdtemp()
    for metric in all_keys:
        print metric
        if '/' not in metric:  # views and xdcr stuff
            keys, values = get_metric(db, metric)
            plot_metric(metric, keys, values, outdir)

    try:
        subprocess.call(['convert', '{0}/*'.format(outdir), 'report.pdf'])
        print "PDF report was successfully generated!"
    except OSError:
        print "All images saved to: {0}".format(outdir)
Exemplo n.º 4
0
def get_run_info(desc):
    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']

    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.itervalues():
        phases_info[int(doc.keys()[0])] = doc.values()[0]
    phases_info.keys().sort()

    run_info = ''
    #will take the first name/desc value; but we assume that their values are the same for all phases
    if desc == 'name':
        for phase in phases_info:
            if 'name' in phases_info[phase]:
                run_info = phases_info[phase]['name']
                break
    if desc == 'build':
        for phase in phases_info:
            if 'desc' in phases_info[phase]:
                run_info = phases_info[phase]['desc']
                break
    run_info = run_info.replace(" ", "_")
    run_info = run_info.replace(",", "_")

    return run_info
Exemplo n.º 5
0
    def __init__(self, in_host, out_host, database):
        self.url = 'http://{0}:8091/'.format(in_host) + \
            'pools/default/buckets/default/stats?zoom=minute'

        self.database = database
        self.seriesly = Seriesly(host=out_host)
        if database not in self.seriesly.list_dbs():
            self.seriesly.create_db(database)
Exemplo n.º 6
0
 def __init__(self, test):
     self.seriesly = Seriesly(CBMONITOR_HOST)
     self.test_config = test.test_config
     self.metric_title = test.test_config.test_case.metric_title
     self.cluster_spec = test.cluster_spec
     self.cluster_names = test.cbagent.clusters.keys()
     self.build = test.build
     self.master_node = test.master_node
Exemplo n.º 7
0
 def __init__(self, test):
     self.seriesly = Seriesly(SERIESLY['host'])
     self.test_config = test.test_config
     self.metric_title = test.test_config.test_case.metric_title
     self.cluster_spec = test.cluster_spec
     self.cluster_names = test.cbagent.clusters.keys()
     self.build = test.build
     self.master_node = test.master_node
Exemplo n.º 8
0
 def start_samplers(self):
     logger.info('Creating seriesly dbs')
     seriesly = Seriesly(
         host='{}'.format(self.test_config.gateload_settings.seriesly_host))
     for i, _ in enumerate(self.remote.gateways, start=1):
         seriesly.create_db('gateway_{}'.format(i))
         seriesly.create_db('gateload_{}'.format(i))
     self.remote.start_sampling()
Exemplo n.º 9
0
def plot_all_phases(cluster_name, buckets):

    db_event = Seriesly(cfg.SERIESLY_IP, 3133)['event']

    # Get system test phase info and plot phase by phase
    all_event_docs = db_event.get_all()
    phases_info = {}
    for doc in all_event_docs.values():
        phases_info[int(list(doc.keys())[0])] = list(doc.values())[0]
    list(phases_info.keys()).sort()

    num_phases = len(list(phases_info.keys()))

    run_id = store_report.get_run_info('name')

    if not os.path.exists("%s" % run_id):
        os.makedirs("%s" % run_id)
    else:
        shutil.rmtree("%s" % run_id)
        os.makedirs("%s" % run_id)

    for i in list(phases_info.keys()):
        start_time = phases_info[i][[
            name for name in list(phases_info[i].keys())
            if (name != 'name' and name != 'desc')
        ][0]]
        start_time = int(start_time[:10])
        end_time = 0
        if i == list(phases_info.keys())[-1]:
            end_time = str(time.time())
            end_time = int(end_time[:10])
        else:
            end_time = phases_info[i + 1][[
                name for name in list(phases_info[i + 1].keys())
                if (name != 'name' and name != 'desc')
            ][0]]
            end_time = int(end_time[:10])

        start_time_snapshot = datetime.datetime.fromtimestamp(
            start_time).strftime('%m/%d/%Y %H:%M')
        end_time_snapshot = datetime.datetime.fromtimestamp(end_time).strftime(
            '%m/%d/%Y %H:%M')

        snapshot_name = "phase-%d-%s" % (i, [
            name for name in list(phases_info[i].keys())
            if (name != 'name' and name != 'desc')
        ][0])

        plot_use_cbmonitor(snapshot_name, cluster_name, start_time_snapshot,
                           end_time_snapshot)

        prepare_folder_report(run_id, i)

        store_90th_avg_value(buckets, start_time, end_time, run_id, i)

    storage_folder = os.getcwd() + "/" + run_id + "/"
    print("data stored in %s" % (storage_folder))
    return storage_folder
Exemplo n.º 10
0
def add_phase_to_db(phase, phase_key, name, desc):

    if cfg.SERIESLY_IP != '':
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        seriesly.event.append({
            str(phase_key): {
                str(phase['name']): str(time.time()),
                'run_id': name + '-' + desc
            }
        })
Exemplo n.º 11
0
 def __init__(self, host, dbslow, dbfast):
     self.slow = {}
     self.fast = {}
     self.dbslow = dbslow
     self.dbfast = dbfast
     self.seriesly = Seriesly(host=host)
     try:
         dbs = self.seriesly.list_dbs()
     except seriesly.exceptions.ConnectionError, e:
         logging.error("unable to connect to seriesly server: %s" % e)
         return
Exemplo n.º 12
0
    def __init__(self):
        self.db = Seriesly()

        self.fig = figure()
        self.fig.set_size_inches(4.66, 2.625)

        self.urls = list()
        self.images = list()

        self.eventlet_pool = GreenPool()
        self.mp_pool = Pool(cpu_count())
Exemplo n.º 13
0
def multi_query(count,
                design_doc_name,
                view_name,
                params=None,
                bucket="default",
                password="",
                type_="view",
                batch_size=100,
                hosts=None):

    if params is not None:
        params = urllib2.urllib.urlencode(params)

    pool = eventlet.GreenPool(batch_size)

    api = '%s/_design/%s/_%s/%s?%s' % (bucket, design_doc_name, type_,
                                       view_name, params)

    qtime = data = url = None

    args = dict(api=api, hosts=hosts)
    for qtime, data, url in pool.imap(send_query,
                                      [args for i in xrange(count)]):
        pass

    if cfg.SERIESLY_IP != '' and qtime is not None:
        # store the most recent query response time 'qtime' into seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)

        db = None
        if 'fast' in seriesly.list_dbs():
            db = 'fast'
        else:
            bucketStatus = app.workload_manager.BucketStatus.from_cache(
                bucket) or app.workload_manager.BucketStatus(bucket)
            db = bucketStatus.latency_db
            if db not in seriesly.list_dbs():
                seriesly.create_db(db)

        if db is not None:
            seriesly[db].append({'query_latency': qtime})

    # log to logs/celery-query.log
    try:
        rc = data.read()[0:200]
    except Exception:
        rc = "exception reading query response"

    logger.error('\n')
    logger.error('url: %s' % url)
    logger.error('latency: %s' % qtime)
    logger.error('data: %s' % rc)
Exemplo n.º 14
0
 def __init__(self, host):
     self.seriesly = Seriesly(host)
Exemplo n.º 15
0
        return val


metrics = [
    "secondary_scanlatency20M_fdb_nyx",
    "secondary_scanlatency20M_multiple_fdb_nyx",
    "secondary_scanlatency_rebalance20M_fdb_nyx",
    "secondary_scanlatency_stalefalse_20M_fdb_nyx",
    "secondary_doc_indexing_latency_20M_moi_80th_nyx_query",
    "secondary_scanlatency20M_moi_nyx",
    "secondary_scanlatency20M_multiple_moi_nyx",
    "secondary_scanlatency_rebalance20M_moi_nyx",
    "secondary_scanlatency_stalefalse_20M_100Kops_moi_nyx",
]

s = Seriesly(host='cbmonitor.sc.couchbase.com')

b = Bucket("couchbase://cbmonitor.sc.couchbase.com/benchmarks",
           password="******")

for metric in metrics:
    print "********* Metric: " + metric
    q = N1QLQuery(
        'SELECT id,snapshots FROM benchmarks WHERE metric = "{}";'.format(
            metric))

    for row in b.n1ql_query(q):
        doc_id = row['id']
        snapshot = row['snapshots'][0]

        if len(row['snapshots']) > 1:
Exemplo n.º 16
0
def report_kv_latency(bucket="default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key

            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue),
                                               requeue=True)
                if len(keys) > 0:
                    get_key = str(keys['start'])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port,
                                               bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port,
                                               bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip,
                                                  port, bucket, password)

            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db = None
            if 'fast' in seriesly.list_dbs():
                db = 'fast'
            else:
                bucketStatus = BucketStatus.from_cache(bucket) or BucketStatus(
                    bucket)
                db = bucketStatus.latency_db
                if db not in seriesly.list_dbs():
                    seriesly.create_db(db)

            if db is not None:
                seriesly[db].append({
                    'set_latency': set_latency,
                    'get_latency': get_latency,
                    'delete_latency': delete_latency
                })
Exemplo n.º 17
0
def store_90th_avg_value(buckets, start_time, end_time, run_id, i):
    ips = get_cluster_ips()
    ns_server_stats = None
    atop_stats = None
    latency_stats = [
        'set_latency', 'get_latency', 'delete_latency', 'query_latency'
    ]
    dict_90th = {}
    dict_avg = {}

    dict_90th['ns_server'] = {}
    dict_avg['ns_server'] = {}
    time.sleep(2)
    connection = Seriesly(cfg.SERIESLY_IP, 3133)
    for bucket in buckets:
        toolbar_width = 41
        sys.stdout.write("[%s] indicator\n" % ("*" * toolbar_width))
        sys.stdout.flush()
        dict_90th['ns_server'][bucket] = {}
        dict_avg['ns_server'][bucket] = {}

        #empty ip string appended with bucket name is the ns_server db name for entire cluster
        cluster_ips = ips
        cluster_ips.append('')
        for ip in cluster_ips:
            ns_server_db = "ns_serverdefault" + bucket + ip
            if ":" in ns_server_db:
                ns_server_db = ns_server_db[0:ns_server_db.find(":")]
            if ip == '':
                ip = 'cluster'
            dict_90th['ns_server'][bucket][ip] = {}
            dict_avg['ns_server'][bucket][ip] = {}
            if ns_server_db not in connection.list_dbs():
                print("db %s was not found" % (ns_server_db))
                continue
            db = connection[ns_server_db]
            if ns_server_stats is None:
                ns_server_stats = list(db.get_all().values())[0].keys()
            print("Store ns server stats for bucket %s on %s" % (bucket, ip))
            sys.stdout.write("[")
            num = 1
            for metric in ns_server_stats:
                dict_90th['ns_server'][bucket][ip][metric] = store_90th_value(
                    db, metric, start_time, end_time)
                dict_avg['ns_server'][bucket][ip][metric] = store_avg_value(
                    db, metric, start_time, end_time)
                if num % (len(ns_server_stats) // toolbar_width) == 0:
                    sys.stdout.write("=")
                    sys.stdout.flush()
                    time.sleep(0.5)
                num += 1
            sys.stdout.write("]\n")

    dict_90th['atop'] = {}
    dict_avg['atop'] = {}
    for ip in ips:
        atop_db = "atopdefault" + ip
        dict_90th['atop'][ip] = {}
        dict_avg['atop'][ip] = {}
        if ":" in atop_db:
            atop_db = atop_db[0:atop_db.find(":")]
        if atop_db not in connection.list_dbs():
            print("db %s was not found" % (atop_db))
            continue
        db = connection[atop_db]
        if atop_stats is None:
            atop_stats = list(db.get_all().values())[0].keys()
        print("Store atop stats for node %s" % (ip))
        for metric in atop_stats:
            dict_90th['atop'][ip][metric] = store_90th_value(
                db, metric, start_time, end_time)
            dict_avg['atop'][ip][metric] = store_avg_value(
                db, metric, start_time, end_time)

    dict_90th['latency'] = {}
    dict_avg['latency'] = {}
    for bucket in buckets:
        dict_90th['latency'][bucket] = {}
        dict_avg['latency'][bucket] = {}
        latency_db = "%slatency" % bucket
        if latency_db not in connection.list_dbs():
            print("db %s was not found" % (latency_db))
            continue
        db = connection[latency_db]
        print("Store latency stats for bucket %s" % (bucket))

        for metric in latency_stats:
            dict_90th['latency'][bucket][metric] = store_90th_value(
                db, metric, start_time, end_time)
            dict_avg['latency'][bucket][metric] = store_avg_value(
                db, metric, start_time, end_time)

    os.system('rm -f %s/phase%d/*.txt' % (run_id, i))
    json.dump(dict_90th, open("%s/phase%d/90percentile.txt" % (run_id, i),
                              'w'))
    json.dump(dict_avg, open("%s/phase%d/average.txt" % (run_id, i), 'w'))
    del dict_90th
    del dict_avg
Exemplo n.º 18
0
 def __init__(self, *args, **kwargs):
     super(SgwMetricHelper, self).__init__(*args, **kwargs)
     self.seriesly = Seriesly(
         self.test_config.gateload_settings.seriesly_host)
Exemplo n.º 19
0
def main():
    seriesly = Seriesly()
    all_dbs = seriesly.list_dbs()
    for i, db in enumerate(all_dbs, start=1):
        logger.info("{}/{}: {}".format(i, len(all_dbs), db.strip()))
        seriesly[db.strip()].compact()
Exemplo n.º 20
0
for proc in kill_procs:
    os.system("ps aux | grep %s | awk '{print $2}' | xargs kill" % proc)

# delete queues (note using --purge will remove cc_queues)
queues = CacheHelper.task_queues() + CacheHelper.miss_queues()

# when --purge set delete cc_queue's as well
# as seriesly db
if "--purge" in sys.argv:

    queues = set(CacheHelper.queues())

    # cleaning up seriesly database (fast and slow created by cbtop)
    if cfg.SERIESLY_IP != '':
        from seriesly import Seriesly
        seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
        dbs = seriesly.list_dbs()
        for db in dbs:
            seriesly.drop_db(db)

        seriesly.create_db('event')

for q_ in queues:
    try:
        RabbitHelper().delete(q_)
        print("Cleanup Queue: %s" % q_)
    except Exception as ex:
        pass

# clean up cache
CacheHelper.cacheClean()
Exemplo n.º 21
0
def init_client():
    world.client = Seriesly(host=world.config.get('database', 'host'),
                            port=world.config.get('database', 'port'))