def alarm_performance_test():
    if num_processes < num_definitions:
        return False, "Number of agents ({0}) must be >= number of definitions ({1})".format(num_processes,
                                                                                             num_definitions)

    try:
        print('Authenticating with keystone on {}'.format(keystone['auth_url']))
        ks_client = ksclient.KSClient(**keystone)
    except Exception as ex:
        return False, 'Failed to authenticate: {}'.format(ex)

    mon_client = client.Client('2_0', urls[0], token=ks_client.token)

    print('Removing old alarm definitions for {}'.format(alarm_def_name))
    cleanup(mon_client, alarm_def_name)

    alarm_def_id_list = []
    print('Creating alarm definitions')
    for i in xrange(num_definitions):
        expression = alarm_def_expression.format(metric_name+str(i))
        alarm_def_id = create_alarm_definition(mon_client, alarm_def_name+str(i), expression)
        if not alarm_def_id:
            return False, "Failed to create alarm definition"
        alarm_def_id_list.append(alarm_def_id)

    sent_q = multiprocessing.Queue()

    process_list = []
    for i in xrange(num_processes):
        p = multiprocessing.Process(target=agent_sim_process(i, num_requests, num_metrics, urls[(i % len(urls))],
                                                             keystone, queue=sent_q,
                                                             metric_creator=MetricCreatorAlarmPerf).run)
        process_list.append(p)

    start_datetime = datetime.datetime.now()
    start_datetime = start_datetime - datetime.timedelta(microseconds=start_datetime.microsecond)
    print("Starting test at: " + start_datetime.isoformat())
    start_time = time.time()

    for p in process_list:
        p.start()

    try:
        for p in process_list:
            try:
                p.join()
            except Exception:
                pass

    except KeyboardInterrupt:
        return False, "User interrupt"

    final_time = time.time()

    # There is some chance that not all metrics were sent (lost connection, bad status, etc.)
    total_metrics_sent = aggregate_sent_metric_count(sent_q)
    print('Sent {} metrics in {} seconds'.format(total_metrics_sent,final_time-start_time))
    if total_metrics_sent <= 0:
        return False, "Failed to send metrics"

    print('Waiting for alarms to be created')
    alarm_count = 0
    last_count = 0
    last_change = time.time()
    while alarm_count < total_metrics_sent:
        alarm_count = 0
        for id in alarm_def_id_list:
            num = len(mon_client.alarms.list(alarm_definition_id=id))
            alarm_count += num
        if alarm_count > last_count:
            last_change = time.time()
            last_count = alarm_count

        if (last_change + max_wait_time) <= time.time():
            metrics_found = 0
            for i in xrange(num_definitions):
                val = len(mon_client.metrics.list_measurements(start_time=start_datetime.isoformat(), name=metric_name+str(i),
                                                       merge_metrics=True)[0]['measurements'])
                metrics_found += val
            return False, "Max wait time exceeded, {0} / {1} alarms found".format(alarm_count, metrics_found)
        time.sleep(1)

    delta = last_change - start_time

    tot_met = 0
    for i in xrange(num_definitions):
        metrics = mon_client.metrics.list_measurements(start_time=start_datetime.isoformat(), name=metric_name+str(i),
                                                       merge_metrics=True)
        tot_met += len(metrics[0]['measurements'])

    print("Metrics from api: {}".format(tot_met))
    print("-----Test Results-----")
    print("{} alarms in {} seconds".format(alarm_count, delta))
    print("{} per second".format(alarm_count/delta))

    if cleanup_after_test:
        cleanup(mon_client, alarm_def_name)
    return True, ""
Exemple #2
0
def metric_performance_test():

    auth = identity.Password(auth_url=keystone['auth_url'],
                             username=keystone['username'],
                             password=keystone['password'],
                             project_name=keystone['project'],
                             user_domain_id='default',
                             project_domain_id='default')
    sess = session.Session(auth=auth)

    mon_client = client.Client('2_0', urls[0], session=sess)

    sent_q = multiprocessing.Queue()

    process_list = []
    for i in xrange(num_processes):
        p = multiprocessing.Process(
            target=agent_sim_process(i,
                                     num_requests,
                                     num_metrics,
                                     urls[(i % len(urls))],
                                     keystone,
                                     queue=sent_q,
                                     metric_creator=MetricCreatorMetricPerf,
                                     token=sess).run)
        process_list.append(p)

    start_datetime = datetime.datetime.now()
    start_datetime = start_datetime - datetime.timedelta(
        microseconds=start_datetime.microsecond)
    print("Starting test at: " + start_datetime.isoformat())
    start_time = time.time()

    for p in process_list:
        p.start()

    try:
        for p in process_list:
            try:
                p.join()
            except Exception:
                pass

    except KeyboardInterrupt:
        return False, "User interrupt"

    total_metrics_sent = aggregate_sent_metric_count(sent_q)

    metrics_found = 0
    last_count = 0
    last_change = time.time()
    while metrics_found < total_metrics_sent:

        metrics_found = 0
        try:
            stats = mon_client.metrics.list_statistics(
                statistics="count",
                start_time=start_datetime.isoformat(),
                name="metric_perf",
                merge_metrics=True)
            metrics_found = stats[0]['statistics'][0][1]
        except Exception as ex:
            print("Failed to retrieve metrics from api\n{}".format(ex))

        if metrics_found > last_count:
            last_change = time.time()
            last_count = metrics_found

        if (last_change + max_wait_time) <= time.time():
            return False, "Max wait time exceeded, {0} / {1} metrics found".format(
                metrics_found, total_metrics_sent)
        time.sleep(1)

    final_time = time.time()
    print("-----Test Results-----")
    print("{} metrics in {} seconds".format(metrics_found,
                                            final_time - start_time))
    print("{} per second".format(metrics_found / (final_time - start_time)))

    return True, ""
def metric_performance_test():

    try:
        print('Authenticating with keystone on {}'.format(keystone['auth_url']))
        ks_client = ksclient.KSClient(**keystone)
    except Exception as ex:
        return False, 'Failed to authenticate: {}'.format(ex)

    mon_client = client.Client('2_0', urls[0], token=ks_client.token)

    sent_q = multiprocessing.Queue()

    process_list = []
    for i in xrange(num_processes):
        p = multiprocessing.Process(target=agent_sim_process(i, num_requests, num_metrics, urls[(i % len(urls))],
                                                             keystone, queue=sent_q,
                                                             metric_creator=MetricCreatorMetricPerf).run)
        process_list.append(p)

    start_datetime = datetime.datetime.now()
    start_datetime = start_datetime - datetime.timedelta(microseconds=start_datetime.microsecond)
    print("Starting test at: " + start_datetime.isoformat())
    start_time = time.time()

    for p in process_list:
        p.start()

    try:
        for p in process_list:
            try:
                p.join()
            except Exception:
                pass

    except KeyboardInterrupt:
        return False, "User interrupt"



    total_metrics_sent = aggregate_sent_metric_count(sent_q)

    metrics_found = 0
    last_count = 0
    last_change = time.time()
    while metrics_found < total_metrics_sent:

        metrics_found = 0
        try:
            stats = mon_client.metrics.list_statistics(statistics="count",
                                                       start_time=start_datetime.isoformat(),
                                                       name="metric_perf",
                                                       merge_metrics=True)
            metrics_found = stats[0]['statistics'][0][1]
        except Exception as ex:
            print("Failed to retrieve metrics from api\n{}".format(ex))

        if metrics_found > last_count:
            last_change = time.time()
            last_count = metrics_found

        if (last_change + max_wait_time) <= time.time():
            return False, "Max wait time exceeded, {0} / {1} metrics found".format(metrics_found, total_metrics_sent)
        time.sleep(1)

    final_time = time.time()
    print("-----Test Results-----")
    print("{} metrics in {} seconds".format(metrics_found, final_time-start_time))
    print("{} per second".format(metrics_found / (final_time - start_time)))

    return True, ""