Example #1
0
def stress_openstack(tests, duration):
    """
    Workload driver. Executes an action function against a nova-cluster.

    """
    logfiles = admin_manager.config.stress.target_logfiles
    log_check_interval = int(admin_manager.config.stress.log_check_interval)
    if logfiles:
        controller = admin_manager.config.stress.target_controller
        computes = _get_compute_nodes(controller)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node)
    processes = []
    for test in tests:
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for _ in xrange(test.get('threads', 1)):
            if test.get('use_isolated_tenants', False):
                username = rand_name("stress_user")
                tenant_name = rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                _, tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username,
                                            password,
                                            tenant['id'],
                                            "email")
                manager = clients.Manager(username=username,
                                          password="******",
                                          tenant_name=tenant_name)
            target = get_action_function(test['action'])
            p = multiprocessing.Process(target=target,
                                        args=(manager, logger),
                                        kwargs=test.get('kwargs', {}))
            processes.append(p)
            p.start()
    end_time = time.time() + duration
    had_errors = False
    while True:
        remaining = end_time - time.time()
        if remaining <= 0:
            break
        time.sleep(min(remaining, log_check_interval))
        if not logfiles:
            continue
        errors = _error_in_logs(logfiles, computes)
        if errors:
            had_errors = True
            break
    for p in processes:
        p.terminate()
    if not had_errors:
        logger.info("cleaning up")
        cleanup.cleanup()
Example #2
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    logfiles = admin_manager.config.stress.target_logfiles
    log_check_interval = int(admin_manager.config.stress.log_check_interval)
    if logfiles:
        controller = admin_manager.config.stress.target_controller
        computes = _get_compute_nodes(controller)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node)
    for test in tests:
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in xrange(test.get('threads', 1)):
            if test.get('use_isolated_tenants', False):
                username = rand_name("stress_user")
                tenant_name = rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                _, tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username,
                                            password,
                                            tenant['id'],
                                            "email")
                manager = clients.Manager(username=username,
                                          password="******",
                                          tenant_name=tenant_name)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, logger, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(kwargs.iteritems()))

            logger.debug("calling Target Object %s" %
                         test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic,))

            process = {'process': p,
                       'p_number': p_number,
                       'action': test['action'],
                       'statistic': shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    while True:
        if max_runs is None:
            remaining = end_time - time.time()
            if remaining <= 0:
                break
        else:
            remaining = log_check_interval
            all_proc_term = True
            for process in processes:
                if process['process'].is_alive():
                    all_proc_term = False
                    break
            if all_proc_term:
                break

        time.sleep(min(remaining, log_check_interval))
        if stop_on_error:
            for process in processes:
                if process['statistic']['fails'] > 0:
                    break

        if not logfiles:
            continue
        errors = _error_in_logs(logfiles, computes)
        if errors:
            had_errors = True
            break

    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    logger.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        logger.info(" Process %d (%s): Run %d actions (%d failed)" %
                    (process['p_number'],
                     process['action'],
                     process['statistic']['runs'],
                     process['statistic']['fails']))
    logger.info("Summary:")
    logger.info("Run %d actions (%d failed)" %
                (sum_runs, sum_fails))

    if not had_errors:
        logger.info("cleaning up")
        cleanup.cleanup(logger)
    if had_errors:
        return 1
    else:
        return 0
Example #3
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """Workload driver. Executes an action function against a nova-cluster."""
    admin_manager = credentials.AdminManager()

    ssh_user = CONF.stress.target_ssh_user
    ssh_key = CONF.stress.target_private_key_path
    logfiles = CONF.stress.target_logfiles
    log_check_interval = int(CONF.stress.log_check_interval)
    default_thread_num = int(CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    skip = False
    for test in tests:
        for service in test.get('required_services', []):
            if not CONF.service_available.get(service):
                skip = True
                break
        if skip:
            break
        # TODO(andreaf) This has to be reworked to use the credential
        # provider interface. For now only tests marked as 'use_admin' will
        # work.
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            raise NotImplemented('Non admin tests are not supported')
        for p_number in moves.xrange(test.get('threads', default_thread_num)):
            if test.get('use_isolated_tenants', False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                if CONF.identity.auth_version == 'v2':
                    identity_client = admin_manager.identity_client
                    projects_client = admin_manager.tenants_client
                    roles_client = admin_manager.roles_client
                    users_client = admin_manager.users_client
                    domains_client = None
                else:
                    identity_client = admin_manager.identity_v3_client
                    projects_client = admin_manager.projects_client
                    roles_client = admin_manager.roles_v3_client
                    users_client = admin_manager.users_v3_client
                    domains_client = admin_manager.domains_client
                domain = (identity_client.auth_provider.credentials.
                          get('project_domain_name', 'Default'))
                credentials_client = cred_client.get_creds_client(
                    identity_client, projects_client, users_client,
                    roles_client, domains_client, project_domain_name=domain)
                project = credentials_client.create_project(
                    name=tenant_name, description=tenant_name)
                user = credentials_client.create_user(username, password,
                                                      project, "email")
                # Add roles specified in config file
                for conf_role in CONF.auth.tempest_roles:
                    credentials_client.assign_user_role(user, project,
                                                        conf_role)
                creds = credentials_client.get_credentials(user, project,
                                                           password)
                manager = clients.Manager(credentials=creds)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(six.iteritems(kwargs)))

            LOG.debug("calling Target Object %s" %
                      test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic,))

            process = {'process': p,
                       'p_number': p_number,
                       'action': test_run.action,
                       'statistic': shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    try:
        while True:
            if max_runs is None:
                remaining = end_time - time.time()
                if remaining <= 0:
                    break
            else:
                remaining = log_check_interval
                all_proc_term = True
                for process in processes:
                    if process['process'].is_alive():
                        all_proc_term = False
                        break
                if all_proc_term:
                    break

            time.sleep(min(remaining, log_check_interval))
            if stop_on_error:
                if any([True for proc in processes
                        if proc['statistic']['fails'] > 0]):
                    break

            if not logfiles:
                continue
            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
                                  stop_on_error):
                had_errors = True
                break
    except KeyboardInterrupt:
        LOG.warning("Interrupted, going to print statistics and exit ...")

    if stop_on_error:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        print ("Process %d (%s): Run %d actions (%d failed)" % (
               process['p_number'],
               process['action'],
               process['statistic']['runs'],
               process['statistic']['fails']))
    print ("Summary:")
    print ("Run %d actions (%d failed)" % (sum_runs, sum_fails))

    if not had_errors and CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Example #4
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    admin_manager = clients.AdminManager()

    ssh_user = CONF.stress.target_ssh_user
    ssh_key = CONF.stress.target_private_key_path
    logfiles = CONF.stress.target_logfiles
    log_check_interval = int(CONF.stress.log_check_interval)
    default_thread_num = int(CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    for test in tests:
        if test.get("use_admin", False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in moves.xrange(test.get("threads", default_thread_num)):
            if test.get("use_isolated_tenants", False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username, password, tenant["id"], "email")
                creds = auth.get_credentials(username=username, password=password, tenant_name=tenant_name)
                manager = clients.Manager(credentials=creds)

            test_obj = importutils.import_class(test["action"])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get("kwargs", {})
            test_run.setUp(**dict(kwargs.iteritems()))

            LOG.debug("calling Target Object %s" % test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic["runs"] = 0
            shared_statistic["fails"] = 0

            p = multiprocessing.Process(target=test_run.execute, args=(shared_statistic,))

            process = {"process": p, "p_number": p_number, "action": test_run.action, "statistic": shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    try:
        while True:
            if max_runs is None:
                remaining = end_time - time.time()
                if remaining <= 0:
                    break
            else:
                remaining = log_check_interval
                all_proc_term = True
                for process in processes:
                    if process["process"].is_alive():
                        all_proc_term = False
                        break
                if all_proc_term:
                    break

            time.sleep(min(remaining, log_check_interval))
            if stop_on_error:
                if any([True for proc in processes if proc["statistic"]["fails"] > 0]):
                    break

            if not logfiles:
                continue
            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key, stop_on_error):
                had_errors = True
                break
    except KeyboardInterrupt:
        LOG.warning("Interrupted, going to print statistics and exit ...")

    if stop_on_error:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process["statistic"]["fails"] > 0:
            had_errors = True
        sum_runs += process["statistic"]["runs"]
        sum_fails += process["statistic"]["fails"]
        LOG.info(
            " Process %d (%s): Run %d actions (%d failed)"
            % (process["p_number"], process["action"], process["statistic"]["runs"], process["statistic"]["fails"])
        )
    LOG.info("Summary:")
    LOG.info("Run %d actions (%d failed)" % (sum_runs, sum_fails))

    if not had_errors and CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Example #5
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    admin_manager = clients.AdminManager()

    ssh_user = CONF.stress.target_ssh_user
    ssh_key = CONF.stress.target_private_key_path
    logfiles = CONF.stress.target_logfiles
    log_check_interval = int(CONF.stress.log_check_interval)
    default_thread_num = int(CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    for test in tests:
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in moves.xrange(test.get('threads', default_thread_num)):
            if test.get('use_isolated_tenants', False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                _, tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username,
                                            password,
                                            tenant['id'],
                                            "email")
                manager = clients.Manager(username=username,
                                          password="******",
                                          tenant_name=tenant_name)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(kwargs.iteritems()))

            LOG.debug("calling Target Object %s" %
                      test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic,))

            process = {'process': p,
                       'p_number': p_number,
                       'action': test_run.action,
                       'statistic': shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    while True:
        if max_runs is None:
            remaining = end_time - time.time()
            if remaining <= 0:
                break
        else:
            remaining = log_check_interval
            all_proc_term = True
            for process in processes:
                if process['process'].is_alive():
                    all_proc_term = False
                    break
            if all_proc_term:
                break

        time.sleep(min(remaining, log_check_interval))
        if stop_on_error:
            for process in processes:
                if process['statistic']['fails'] > 0:
                    break

        if not logfiles:
            continue
        if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
                              stop_on_error):
            had_errors = True
            break

    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
                 (process['p_number'],
                  process['action'],
                  process['statistic']['runs'],
                     process['statistic']['fails']))
    LOG.info("Summary:")
    LOG.info("Run %d actions (%d failed)" %
             (sum_runs, sum_fails))

    if not had_errors and CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Example #6
0
#!/usr/bin/env python

# Copyright 2013 Quanta Research Cambridge, Inc.
#
#    Licensed under the Apache License, Version 2.0 (the "License");
#    you may not use this file except in compliance with the License.
#    You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS,
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.

from tempest.stress import cleanup

cleanup.cleanup()
Example #7
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """Workload driver. Executes an action function against a nova-cluster."""
    admin_manager = credentials.AdminManager()

    ssh_user = CONF.stress.target_ssh_user
    ssh_key = CONF.stress.target_private_key_path
    logfiles = CONF.stress.target_logfiles
    log_check_interval = int(CONF.stress.log_check_interval)
    default_thread_num = int(CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    skip = False
    for test in tests:
        for service in test.get('required_services', []):
            if not CONF.service_available.get(service):
                skip = True
                break
        if skip:
            break
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = credentials.ConfiguredUserManager()
        for p_number in moves.xrange(test.get('threads', default_thread_num)):
            if test.get('use_isolated_tenants', False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                if CONF.identity.auth_version == 'v2':
                    identity_client = admin_manager.identity_client
                    projects_client = admin_manager.tenants_client
                    roles_client = admin_manager.roles_client
                    users_client = admin_manager.users_client
                else:
                    identity_client = admin_manager.identity_v3_client
                    projects_client = None
                    roles_client = None
                    users_client = None
                domain = (identity_client.auth_provider.credentials.get(
                    'project_domain_name', 'Default'))
                credentials_client = cred_client.get_creds_client(
                    identity_client,
                    projects_client,
                    roles_client,
                    users_client,
                    project_domain_name=domain)
                project = credentials_client.create_project(
                    name=tenant_name, description=tenant_name)
                user = credentials_client.create_user(username, password,
                                                      project, "email")
                # Add roles specified in config file
                for conf_role in CONF.auth.tempest_roles:
                    credentials_client.assign_user_role(
                        user, project, conf_role)
                creds = credentials_client.get_credentials(
                    user, project, password)
                manager = clients.Manager(credentials=creds)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(six.iteritems(kwargs)))

            LOG.debug("calling Target Object %s" % test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic, ))

            process = {
                'process': p,
                'p_number': p_number,
                'action': test_run.action,
                'statistic': shared_statistic
            }

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    try:
        while True:
            if max_runs is None:
                remaining = end_time - time.time()
                if remaining <= 0:
                    break
            else:
                remaining = log_check_interval
                all_proc_term = True
                for process in processes:
                    if process['process'].is_alive():
                        all_proc_term = False
                        break
                if all_proc_term:
                    break

            time.sleep(min(remaining, log_check_interval))
            if stop_on_error:
                if any([
                        True for proc in processes
                        if proc['statistic']['fails'] > 0
                ]):
                    break

            if not logfiles:
                continue
            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
                                  stop_on_error):
                had_errors = True
                break
    except KeyboardInterrupt:
        LOG.warning("Interrupted, going to print statistics and exit ...")

    if stop_on_error:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        print("Process %d (%s): Run %d actions (%d failed)" %
              (process['p_number'], process['action'],
               process['statistic']['runs'], process['statistic']['fails']))
    print("Summary:")
    print("Run %d actions (%d failed)" % (sum_runs, sum_fails))

    if not had_errors and CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Example #8
0
#!/usr/bin/env python

# Copyright 2013 Quanta Research Cambridge, Inc.
#
#    Licensed under the Apache License, Version 2.0 (the "License");
#    you may not use this file except in compliance with the License.
#    You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS,
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.

import logging

from tempest.stress import cleanup

_console = logging.StreamHandler()
_console.setLevel(logging.DEBUG)
# add the handler to the root logger
logger = logging.getLogger('tempest.stress.cleanup')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)

cleanup.cleanup(logger)