def run(**kwargs): config_name = kwargs.get('config_name', None) groups = kwargs.get('run_groups', None) old_groups = kwargs.get('groups', None) explain = kwargs.get('explain', None) groups_to_run = [] groups.extend(old_groups or []) for g in groups: if config_name: register_system_test_cases( groups=[g], configs=[config_name]) groups_to_run.append("{0}({1})".format(g, config_name)) else: register_system_test_cases(groups=[g]) groups_to_run.append(g) if not set(groups_to_run) < set(get_groups()): sys.exit('There are no cases mapped to current group, ' 'please be sure that you put right test group name.') if explain: print_explain(groups) else: register(groups=["run_system_test"], depends_on_groups=groups_to_run) TestProgram(groups=['run_system_test'], argv=clean_argv()).run_and_exit()
def run(**kwargs): config_name = kwargs.get('config_name', None) groups = kwargs.get('run_groups', []) old_groups = kwargs.get('groups', None) explain = kwargs.get('explain', None) groups_to_run = [] groups.extend(old_groups or []) # Collect from pytest only once! pytest.main(['--collect-only', 'fuel_tests', ]) from fuel_tests.tests.conftest import test_names for g in set(groups): if g in test_names: sys.exit(pytest.main('-m {}'.format(g))) if config_name: register_system_test_cases( groups=[g], configs=[config_name]) groups_to_run.append("{0}({1})".format(g, config_name)) else: register_system_test_cases(groups=[g]) groups_to_run.append(g) if not set([split_group_config(i)[0] if split_group_config(i) else i for i in groups_to_run]) < set(get_groups()): sys.exit('There are no cases mapped to current group, ' 'please be sure that you put right test group name.') if explain: print_explain(groups) else: register(groups=["run_system_test"], depends_on_groups=groups_to_run) TestProgram(groups=['run_system_test'], argv=clean_argv_proboscis()).run_and_exit()
def import_tests(): # TODO(tim.simpson): Import these again once white box test functionality # is restored. # from tests.dns import check_domain # from tests.dns import concurrency # from tests.dns import conversion # The DNS stuff is problematic. Not loading the other tests allow us to # run its functional tests only. ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True' if not ADD_DOMAINS: from tests.api import delete_all from tests.api import instances_pagination from tests.api import instances_states from tests.dns import dns from tests import initialize from tests.smoke import instance from tests.volumes import driver # Groups that exist as core int-tests are registered from the # trove.tests.int_tests module from trove.tests import int_tests # Groups defined in trove/integration, or any other externally # defined groups can be registered here heavy_black_box_groups = [ "dbaas.api.instances.pagination", "dbaas.api.instances.delete", "dbaas.api.instances.status", "dbaas.api.instances.down", "dbaas.api.mgmt.hosts.update", "fake.dbaas.api.mgmt.instances", "fake.dbaas.api.mgmt.accounts.broken", "fake.dbaas.api.mgmt.allaccounts" ] proboscis.register(groups=["heavy_blackbox"], depends_on_groups=heavy_black_box_groups)
def define_custom_groups(): # Should move to system_test.__init__.py after upgrade devops to 2.9.13 groups_list = [ {"groups": ["system_test.ceph_ha"], "depends": [ "system_test.deploy_and_check_radosgw." "ceph_all_on_neutron_vlan"]}, {"groups": ["filling_root"], "depends": [ "system_test.failover.filling_root." "ceph_all_on_neutron_vlan"]}, {"groups": ["system_test.strength"], "depends": [ "system_test.failover.destroy_controllers." "first.ceph_all_on_neutron_vlan", "system_test.failover.destroy_controllers." "second.1ctrl_ceph_2ctrl_1comp_1comp_ceph_neutronVLAN"]}, {"groups": ["fuel_master_migrate"], "depends": [ "system_test.fuel_migration.1ctrl_1comp_neutronVLAN", "system_test.fuel_migration.1ctrl_1comp_neutronTUN"]} ] for new_group in groups_list: register(groups=new_group['groups'], depends_on_groups=new_group['depends'])
def run_tests(): from tests.nodes_tests import NodesTests from tests.obm_tests import OBMTests from tests.amqp_tests import AMQPTests from tests.lookups_tests import LookupsTests from tests.profiles_tests import ProfilesTests from tests.config_tests import ConfigTests from tests.workflowTasks_tests import WorkflowTasksTests from tests.workflows_tests import WorkflowsTests groups_list = [ 'integraton' ] depends_list = [ 'nodes.tests', 'obm.tests', 'amqp.tests', 'lookups.tests', 'profiles.tests' 'config.tests', 'workflowTasks.tests', 'workflows.tests' ] register(groups=groups_list, depends_on_groups=depends_list) TestProgram().run_and_exit()
def run(**kwargs): config_name = kwargs.get('config_name', None) groups = kwargs.get('run_groups', []) old_groups = kwargs.get('groups', None) explain = kwargs.get('explain', None) groups_to_run = [] groups.extend(old_groups or []) for g in set(groups): if config_name: register_system_test_cases( groups=[g], configs=[config_name]) groups_to_run.append("{0}({1})".format(g, config_name)) else: register_system_test_cases(groups=[g]) groups_to_run.append(g) if not set([split_group_config(i)[0] if split_group_config(i) else i for i in groups_to_run]) < set(get_groups()): sys.exit('There are no cases mapped to current group, ' 'please be sure that you put right test group name.') if explain: print_explain(groups) else: register(groups=["run_system_test"], depends_on_groups=groups_to_run) TestProgram(groups=['run_system_test'], argv=clean_argv()).run_and_exit()
def run_tests(api_ver, selected): if api_ver == '2': import benchmark.api_v2_0 as benchmark else: import benchmark.api_v1_1_tests register(groups=['poller'], depends_on_groups=['benchmark.poller']) register(groups=['discovery'], depends_on_groups=['benchmark.discovery']) if selected == False: # Three test groups need to run sequentially, # while proboscis schedules tests in different group at a mixed manner. # Adding dependencies among groups is not prefered since they also can be executed along. # So TestProgram needs to be called three times for different groups. # TestProgram calls sys.exit() when finishing, thus subprocess is created for each group. for case in ['poller', 'discovery']: child_pid = os.fork() if (child_pid == 0): TestProgram(groups=[case]).run_and_exit() pid, status = os.waitpid(child_pid, 0) benchmark.ansible_ctl.dispose() else: TestProgram().run_and_exit()
def puppet_modules_mapping(modules): """ find fuel-qa system test which have maximum coverage for edited puppet modules and register that group with "review_in_fuel_library" name modules - dictionary of puppet modules edited in review Example: modules = {'horizon':'fuel-library/deployment/Puppetfile'} """ # open yaml with covered modules with open("gates_tests/helpers/puppet_module_mapping.yaml", "r") as f: mapping = yaml.load(f) if modules and isinstance(modules, dict): all_modules = set([j for i in mapping.values() for j in i]) logger.debug( "List of puppet modules covered by system_tests {}".format( all_modules)) logger.info( "List of modules edited in review {}".format(modules.keys())) # checking that module from review covered by system_test for module in modules.keys(): if module.split('.')[0] not in all_modules: logger.warning( "{}:{} module not exist or not covered by system_test" .format(module, modules[module])) # find test group which has better coverage of modules from review formatted_modules = [module.split('.')[0] for module in modules] system_test = "bvt_2" max_intersection = 0 if not ("ceph" in modules and {"roles/cinder.pp", "cinder", "openstack-cinder"} & set(modules)): for test in mapping: test_intersection = len( set(mapping[test]).intersection(set(formatted_modules))) if test_intersection > max_intersection: max_intersection = test_intersection system_test = test # To completely check ceph module we can't mix ceph and cinder togeher else: logger.warning( "We cannot check cinder and ceph together {}" .format(modules)) system_test = "bvt_2" else: logger.warning("There no modules that changed in review " "so just run default system test") system_test = "bvt_2" logger.info( "Puppet modules from review {}" " will be checked by next system test: {}".format( modules, system_test)) register(groups=['review_in_fuel_library'], depends_on_groups=[system_test])
def run_tests(): from tests import service_tests # Now create some groups of groups. register(groups=["integration"], depends_on_groups=["service.initialize", "service.tests", "service.shutdown"]) register(groups=["slow"], depends_on_groups=["fast", "integration"]) # Run Proboscis and exit. TestProgram().run_and_exit()
def puppet_modules_mapping(modules): """ find fuel-qa system test which have maximum coverage for edited puppet modules and register that group with "review_in_fuel_library" name modules - dictionary of puppet modules edited in review Example: modules = {'horizon':'fuel-library/deployment/Puppetfile'} """ # open yaml with covered modules with open("gates_tests/helpers/puppet_module_mapping.yaml", "r") as f: mapping = yaml.load(f) if modules and isinstance(modules, dict): all_modules = set([j for i in mapping.values() for j in i]) logger.debug( "List of puppet modules covered by system_tests {}".format( all_modules)) logger.info("List of modules edited in review {}".format( modules.keys())) # checking that module from review covered by system_test for module in modules.keys(): if module.split('.')[0] not in all_modules: logger.warning( "{}:{} module not exist or not covered by system_test". format(module, modules[module])) # find test group which has better coverage of modules from review formatted_modules = [module.split('.')[0] for module in modules] system_test = "bvt_2" max_intersection = 0 if not ("ceph" in modules and {"roles/cinder.pp", "cinder", "openstack-cinder"} & set(modules)): for test in mapping: test_intersection = len( set(mapping[test]).intersection(set(formatted_modules))) if test_intersection > max_intersection: max_intersection = test_intersection system_test = test # To completely check ceph module we can't mix ceph and cinder togeher else: logger.warning( "We cannot check cinder and ceph together {}".format(modules)) system_test = "bvt_2" else: logger.warning("There no modules that changed in review " "so just run default system test") system_test = "bvt_2" logger.info("Puppet modules from review {}" " will be checked by next system test: {}".format( modules, system_test)) register(groups=['review_in_fuel_library'], depends_on_groups=[system_test])
def openstack_puppet_project_mapping(project): """ find fuel-qa system test which have maximum coverage for edited openstack/puppet-project and register that group with "review_in_openstack_puppet_project" name project - puppet project edited in review Example: project = "openstack/puppet-openstacklib" """ # open yaml with covered projects with open( "gates_tests/helpers/openstack_puppet_projects_mapping.yaml", "r") as f: mapping = yaml.load(f) all_projects = set(list(itertools.chain.from_iterable( [mapping[test_group]['modules'] for test_group in mapping]))) logger.debug( "List of openstack/puppet-projects " "covered by system_tests {}".format( all_projects)) logger.info( "Edited project in review - '{}'".format(project)) # checking that project from review covered by system_test if project not in all_projects: logger.warning( "{} project not exist or not covered by system_test" .format(project)) # find test group which cover project edited in review system_test = "bvt_2" for test in mapping: if project in mapping[test]['projects']: system_test = test break devops_template = mapping[system_test]['devops_settings_template'] import gates_tests path_to_template = os.path.join( os.path.dirname(os.path.abspath(gates_tests.__file__)), devops_template) logger.debug("devops template is {}".format(path_to_template)) os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template logger.info( "Edited project in review - '{}'" " will be checked by next system test: {}".format( project, system_test)) register(groups=['review_in_openstack_puppet_project'], depends_on_groups=[system_test])
def define_custom_groups(): # Should move to system_test.__init__.py after upgrade devops to 2.9.13 groups_list = [ {"groups": ["system_test.deploy_env"], "depends": [ "system_test.deploy_env(" "3_controllers_2compute_neutron_env)"]} ] for new_group in groups_list: register(groups=new_group['groups'], depends_on_groups=new_group['depends'])
def openstack_puppet_project_mapping(project): """ find fuel-qa system test which have maximum coverage for edited openstack/puppet-project and register that group with "review_in_openstack_puppet_project" name project - puppet project edited in review Example: project = "openstack/puppet-openstacklib" """ # open yaml with covered projects with open("gates_tests/helpers/openstack_puppet_projects_mapping.yaml", "r") as f: mapping = yaml.load(f) all_projects = set( list( itertools.chain.from_iterable([ mapping[test_group]['modules'] for test_group in mapping ]))) logger.debug("List of openstack/puppet-projects " "covered by system_tests {}".format(all_projects)) logger.info("Edited project in review - '{}'".format(project)) # checking that project from review covered by system_test if project not in all_projects: logger.warning( "{} project not exist or not covered by system_test".format( project)) # find test group which cover project edited in review system_test = "bvt_2" for test in mapping: if project in mapping[test]['projects']: system_test = test break devops_template = mapping[system_test]['devops_settings_template'] import gates_tests path_to_template = os.path.join( os.path.dirname(os.path.abspath(gates_tests.__file__)), devops_template) logger.debug("devops template is {}".format(path_to_template)) os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template logger.info("Edited project in review - '{}'" " will be checked by next system test: {}".format( project, system_test)) register(groups=['review_in_openstack_puppet_project'], depends_on_groups=[system_test])
def run_tests(group=['smoke-tests']): import tests.api.v1_1 as api_1_1 import tests.api.v2_0 as api_2_0 import tests.api.redfish_1_0 as api_redfish_1_0 register(groups=['api-v1.1'], depends_on_groups=api_1_1.tests) register(groups=['api-v2.0'], depends_on_groups=api_2_0.tests) register(groups=['api-redfish-1.0'], depends_on_groups=api_redfish_1_0.tests) register(groups=['smoke-tests'], depends_on_groups=['api-v1.1','api-v2.0','api-redfish-1.0']) register(groups=['regression-tests'], depends_on_groups=[ 'smoke-tests' ] + \ [ test for test in api_1_1.regression_tests + api_2_0.regression_tests ]) TestProgram(groups=group).run_and_exit()
def register(group_names, *test_groups, **kwargs): if kwargs: register(group_names, kwargs.values()) for suffix, grp_set in kwargs.items(): # Recursively call without the kwargs register([name + '_' + suffix for name in group_names], *grp_set) return # Do the actual registration here proboscis.register(groups=build_group(group_names), depends_on_groups=build_group(*test_groups)) # Now register the same groups with '-' instead of '_' proboscis.register(groups=build_group( [name.replace('_', '-') for name in group_names]), depends_on_groups=build_group(*test_groups))
def add(group, systest_group, config_name, validate_config=True): """Add user friendly group :type group_name: str :type systest_group: str :type config_name: str """ # from proboscis.decorators import DEFAULT_REGISTRY if validate_config and config_name not in yamls: raise NameError("Config {} not found".format(config_name)) register(groups=[group], depends_on_groups=[ "{systest_group}({config_name})".format( systest_group=systest_group, config_name=config_name)])
def run_tests(): from tests.nodes_tests import NodesTests from tests.obm_tests import OBMTests from tests.amqp_tests import AMQPTests from tests.lookups_tests import LookupsTests groups_list = [ 'integraton' ] depends_list = [ 'nodes.tests', 'obm.tests', 'amqp.tests', 'lookups.tests' ] register(groups=groups_list, depends_on_groups=depends_list) TestProgram().run_and_exit()
def run_tests(): from tests.nodes_tests import NodesTests from tests.obm_tests import OBMTests from tests.amqp_tests import AMQPTests from tests.lookups_tests import LookupsTests from tests.profiles_tests import ProfilesTests from tests.config_tests import ConfigTests from tests.workflowTasks_tests import WorkflowTasksTests from tests.workflows_tests import WorkflowsTests groups_list = ['integraton'] depends_list = [ 'nodes.tests', 'obm.tests', 'amqp.tests', 'lookups.tests', 'profiles.tests' 'config.tests', 'workflowTasks.tests', 'workflows.tests' ] register(groups=groups_list, depends_on_groups=depends_list) TestProgram().run_and_exit()
def add(group, systest_group, config_name, validate_config=True): """Add user friendly group :type group_name: str :type systest_group: str :type config_name: str """ # from proboscis.decorators import DEFAULT_REGISTRY if validate_config and config_name not in yamls: raise NameError("Config {} not found".format(config_name)) register_system_test_cases(groups=[systest_group], configs=[config_name]) register(groups=[group], depends_on_groups=[ "{systest_group}({config_name})".format( systest_group=systest_group, config_name=config_name) ])
def add(group, systest_group, config_name, validate_config=True): """Add user friendly group :type group: str :type systest_group: str :type config_name: str :type validate_config: bool """ if validate_config and config_name not in yamls: raise NameError("Config {} not found".format(config_name)) register_system_test_cases(groups=[systest_group], configs=[config_name]) register(groups=[group], depends_on_groups=[ "{systest_group}({config_name})".format( systest_group=systest_group, config_name=config_name) ])
def add(group, systest_group, config_name, validate_config=True): """Add user friendly group :type group_name: str :type systest_group: str :type config_name: str """ if validate_config and config_name not in yamls: raise NameError("Config {} not found".format(config_name)) register_system_test_cases(groups=[systest_group], configs=[config_name]) register(groups=[group], depends_on_groups=[ "{systest_group}({config_name})".format( systest_group=systest_group, config_name=config_name)])
def run(**kwargs): config_name = kwargs.get('config_name', None) groups = kwargs.get('run_groups', None) old_groups = kwargs.get('groups', None) explain = kwargs.get('explain', None) groups_to_run = [] groups.extend(old_groups or []) for g in groups: if config_name: register_system_test_cases(groups=[g], configs=[config_name]) groups_to_run.append("{0}({1})".format(g, config_name)) else: register_system_test_cases(groups=[g]) groups_to_run.append(g) if explain: print_explain(groups) else: register(groups=["run_system_test"], depends_on_groups=groups_to_run) TestProgram(groups=['run_system_test'], argv=clean_argv()).run_and_exit()
def run_tests(): from exampleOne import ProboscisClass from exampleTwo import ProboscisClassTwo register(groups=['3a'], depends_on_groups=['a']) register(groups=['3b'], depends_on_groups=['b']) register(groups=['3c'], depends_on_groups=['c'])
def run_tests(api_ver, selected): if api_ver == '2': # No actual case yet import benchmark.tests.api_v2_0_tests else: import benchmark.tests.api_v1_1_tests register(groups=['poller'], depends_on_groups=['benchmark.poller']) register(groups=['discovery'], depends_on_groups=['benchmark.discovery']) register(groups=['bootstrap'], depends_on_groups=['benchmark.bootstrap']) if selected == False: # Three test groups need to run sequentially, # while proboscis schedules tests in different group at a mixed manner. # Adding dependencies among groups is not prefered since they also can be executed separately. # So TestProgram needs to be called three times for different groups. # TestProgram calls sys.exit() when finishing, thus subprocess is created for each group. for case in ['poller', 'discovery', 'bootstrap']: child_pid = os.fork() if (child_pid == 0): TestProgram(groups=[case]).run_and_exit() pid, status = os.waitpid(child_pid, 0) if (status != 0): break else: TestProgram().run_and_exit() benchmark.tests.ansible_ctl.dispose()
def import_tests(): # TODO(tim.simpson): Import these again once white box test functionality # is restored. # from tests.dns import check_domain # from tests.dns import concurrency # from tests.dns import conversion # The DNS stuff is problematic. Not loading the other tests allow us to # run its functional tests only. ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True' if not ADD_DOMAINS: from tests.api import delete_all from tests.api import instances_pagination from tests.api import instances_quotas from tests.api import instances_states from tests.dns import dns from tests import initialize from tests.smoke import instance from tests.volumes import driver # Groups that exist as core int-tests are registered from the # trove.tests.int_tests module from trove.tests import int_tests # Groups defined in trove-integration, or any other externally # defined groups can be registered here heavy_black_box_groups = [ "dbaas.api.instances.pagination", "dbaas.api.instances.delete", "dbaas.api.instances.status", "dbaas.api.instances.down", "dbaas.api.mgmt.hosts.update", "fake.dbaas.api.mgmt.instances", "fake.dbaas.api.mgmt.accounts.broken", "fake.dbaas.api.mgmt.allaccounts" ] proboscis.register(groups=["heavy_blackbox"], depends_on_groups=heavy_black_box_groups)
def run_tests(group=['smoke-tests']): import tests.api.v2_0 as api_2_0 register(groups=['api-v2.0'], depends_on_groups=api_2_0.tests) register(groups=['smoke-tests'], depends_on_groups=['api-v2.0']) register(groups=['regression-tests'], depends_on_groups=['smoke-tests'] + [test for test in api_2_0.regression_tests]) TestProgram(groups=group).run_and_exit()
def run_tests(): import tests.api.v1_1 as api_1_1 import tests.api.v2_0 as api_2_0 import tests.api.redfish_1_0 as api_redfish_1_0 register(groups=['api-v1.1'], depends_on_groups=api_1_1.tests) register(groups=['api-v2.0'], depends_on_groups=api_2_0.tests) register(groups=['api-redfish-1.0'], depends_on_groups=api_redfish_1_0.tests) TestProgram().run_and_exit()
def run_tests(api_ver, selected): if api_ver == '2': import benchmark.api_v2_0 as benchmark else: import benchmark.api_v1_1 as benchmark register(groups=['poller'], depends_on_groups=benchmark.benchmark_poller_tests) register(groups=['discovery'], depends_on_groups=benchmark.benchmark_discovery_tests) register(groups=['bootstrap'], depends_on_groups=benchmark.benchmark_bootstrap_tests) if selected == False: TestProgram(groups=['poller','discovery','bootstrap']).run_and_exit() else: TestProgram().run_and_exit()
"dbaas.guest.initialize", # instances.GROUP_START, "dbaas.preinstance", "dbaas_quotas", "dbaas.api.security_groups", backups.GROUP, "dbaas.api.instances.actions.resize.instance", # TODO(SlickNik): The restart tests fail intermittently so pulling # them out of the blackbox group temporarily. Refer to Trove bug: # https://bugs.launchpad.net/trove/+bug/1204233 # "dbaas.api.instances.actions.restart", "dbaas.api.instances.actions.stop", "dbaas.guest.shutdown", versions.GROUP, "dbaas.guest.start.test", ] proboscis.register(groups=["blackbox"], depends_on_groups=black_box_groups) simple_black_box_groups = [ "services.initialize", flavors.GROUP, versions.GROUP, GROUP_START_SIMPLE, "dbaas.api.mgmt.admin", "dbaas.preinstance" ] proboscis.register(groups=["simple_blackbox"], depends_on_groups=simple_black_box_groups) black_box_mgmt_groups = [ accounts.GROUP, hosts.GROUP,
from tests.scheduler import driver from tests.scheduler import SCHEDULER_DRIVER_GROUP from tests.volumes import driver from tests.volumes import VOLUMES_DRIVER from tests.compute import guest_initialize_failure from tests.openvz import compute_reboot_vz as compute_reboot from tests import util black_box_groups = [ #flavors.GROUP, "services.initialize", "dbaas.guest.initialize", #instances.GROUP_START, "dbaas.preinstance", versions.GROUP, ] proboscis.register(groups=["blackbox"], depends_on_groups=black_box_groups) # This is for the old white-box tests... host_ovz_groups = [ "dbaas.guest", compute_reboot.GROUP, "dbaas.guest.dns", SCHEDULER_DRIVER_GROUP, VOLUMES_DRIVER, guest_initialize_failure.GROUP, volume_reaping.GROUP ] if WHITE_BOX and util.should_run_rsdns_tests(): host_ovz_groups += ["rsdns.conversion", "rsdns.domains", "rsdns.eventlet"] proboscis.register(groups=["host.ovz"],
backups.GROUP, replication.GROUP, configurations.GROUP, datastores.GROUP, instances_actions.GROUP_RESIZE, # TODO(SlickNik): The restart tests fail intermittently so pulling # them out of the blackbox group temporarily. Refer to Trove bug: # https://bugs.launchpad.net/trove/+bug/1204233 # instances_actions.GROUP_RESTART, instances_actions.GROUP_STOP_MYSQL, instances.GROUP_STOP, versions.GROUP, instances.GROUP_GUEST, datastore_versions.GROUP, ] proboscis.register(groups=["blackbox", "mysql"], depends_on_groups=black_box_groups) simple_black_box_groups = [ GROUP_SERVICES_INITIALIZE, flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, admin_required.GROUP, datastore_versions.GROUP, ] proboscis.register(groups=["simple_blackbox"], depends_on_groups=simple_black_box_groups) black_box_mgmt_groups = [ accounts.GROUP, hosts.GROUP,
def register(datastores, *test_groups): proboscis.register(groups=build_group(datastores), depends_on_groups=build_group(*test_groups))
# NOTE(lxkong): Remove the module related tests(module_groups) for now because # of no use case. common_groups.extend([guest_log_groups, instance_init_groups]) integration_groups = [ tests.DBAAS_API_VERSIONS, tests.DBAAS_API_DATASTORES, tests.DBAAS_API_MGMT_DATASTORES, tests.DBAAS_API_INSTANCES, tests.DBAAS_API_USERS_ROOT, tests.DBAAS_API_USERS, tests.DBAAS_API_USERS_ACCESS, tests.DBAAS_API_DATABASES, tests.DBAAS_API_INSTANCE_ACTIONS, tests.DBAAS_API_BACKUPS, tests.DBAAS_API_CONFIGURATIONS, tests.DBAAS_API_REPLICATION, tests.DBAAS_API_INSTANCES_DELETE ] # We intentionally make the functional tests running in series and dependent # on each other, so that one test case failure will stop the whole testing. proboscis.register(groups=["mysql"], depends_on_groups=integration_groups) register(["mysql_supported"], single=[ instance_create_group.GROUP, backup_group.GROUP, configuration_group.GROUP, database_actions_group.GROUP, guest_log_group.GROUP, instance_actions_group.GROUP, instance_error_create_group.GROUP, instance_force_delete_group.GROUP, root_actions_group.GROUP, user_actions_group.GROUP, instance_delete_group.GROUP ], multi=[replication_group.GROUP, instance_delete_group.GROUP]) register(["mariadb_supported"], single=[ instance_create_group.GROUP, backup_group.GROUP,
def map_test(target): assert_is_not_none(settings.PATCHING_BUG_ID, "Bug ID wasn't specified, can't start patching tests!") errata = get_errata(path=settings.PATCHING_APPLY_TESTS, bug_id=settings.PATCHING_BUG_ID) verify_errata(errata) if not any(target == e_target["type"] for e_target in errata["targets"]): skip_patching_test(target, errata["target"]) env_distro = settings.OPENSTACK_RELEASE master_distro = settings.OPENSTACK_RELEASE_CENTOS if "affected-pkgs" in errata.keys(): if target == "master": settings.PATCHING_PKGS = set( [re.split("=|<|>", package)[0] for package in errata["affected-pkgs"][master_distro.lower()]] ) else: settings.PATCHING_PKGS = set( [re.split("=|<|>", package)[0] for package in errata["affected-pkgs"][env_distro.lower()]] ) available_env_packages = set() available_master_packages = set() for repo in settings.PATCHING_MIRRORS: logger.debug('Checking packages from "{0}" repository'.format(repo)) available_env_packages.update(get_repository_packages(repo, env_distro)) for repo in settings.PATCHING_MASTER_MIRRORS: logger.debug('Checking packages from "{0}" repository'.format(repo)) available_master_packages.update(get_repository_packages(repo, master_distro)) available_packages = available_env_packages | available_master_packages if not settings.PATCHING_PKGS: if target == "master": settings.PATCHING_PKGS = available_master_packages else: settings.PATCHING_PKGS = available_env_packages else: assert_true( settings.PATCHING_PKGS <= available_packages, "Patching repositories don't contain all packages need" "ed for tests. Need: {0}, available: {1}, missed: {2}." "".format(settings.PATCHING_PKGS, available_packages, settings.PATCHING_PKGS - available_packages), ) assert_not_equal( len(settings.PATCHING_PKGS), 0, "No packages found in repository(s) for patching:" " '{0} {1}'".format(settings.PATCHING_MIRRORS, settings.PATCHING_MASTER_MIRRORS), ) if target == "master": tests_groups = get_packages_tests(settings.PATCHING_PKGS, master_distro, target) else: tests_groups = get_packages_tests(settings.PATCHING_PKGS, env_distro, target) if "rally" in errata.keys(): if len(errata["rally"]) > 0: settings.PATCHING_RUN_RALLY = True settings.RALLY_TAGS = errata["rally"] if settings.PATCHING_CUSTOM_TEST: deployment_test = settings.PATCHING_CUSTOM_TEST settings.PATCHING_SNAPSHOT = "patching_after_{0}".format(deployment_test) register(groups=["prepare_patching_environment"], depends_on_groups=[deployment_test]) register(groups=["prepare_patching_master_environment"], depends_on_groups=[deployment_test]) else: program = TestProgram(argv=["none"]) deployment_test = None for my_test in program.plan.tests: if all(patching_group in my_test.entry.info.groups for patching_group in tests_groups): deployment_test = my_test break if deployment_test: settings.PATCHING_SNAPSHOT = "patching_after_{0}".format(deployment_test.entry.method.im_func.func_name) if target == "master": register(groups=["prepare_patching_master_environment"], depends_on=[deployment_test.entry.home]) else: register(groups=["prepare_patching_environment"], depends_on=[deployment_test.entry.home]) else: raise Exception("Test with groups {0} not found.".format(tests_groups))
hosts.GROUP, storage.GROUP, "services.initialize", "dbaas.guest.initialize", # instances.GROUP_START, "dbaas.preinstance", "dbaas.api.instances.actions.resize.instance", "dbaas.api.instances.actions.restart", "dbaas.api.instances.actions.stop", "dbaas.api.instances.actions.reboot", "dbaas.guest.shutdown", versions.GROUP, "dbaas.guest.start.test", "dbaas.api.mgmt.admin", "dbaas.api.mgmt.instances", ] proboscis.register(groups=["blackbox"], depends_on_groups=black_box_groups) heavy_black_box_groups = [ "dbaas.api.instances.pagination", "dbaas.api.instances.quotas", "dbaas.api.instances.delete", "dbaas.api.instances.status", "dbaas.api.instances.down", "dbaas.api.mgmt.hosts.update", "fake.dbaas.api.mgmt.instances", "fake.dbaas.api.mgmt.accounts.broken", "fake.dbaas.api.mgmt.allaccounts" ] proboscis.register(groups=["heavy_blackbox"], depends_on_groups=heavy_black_box_groups) # This is for the old white-box tests... host_ovz_groups = [ "dbaas.guest", compute_reboot.GROUP, "dbaas.guest.dns",
def import_tests(): # TODO(tim.simpson): Import these again once white box test functionality # is restored. # from tests.dns import check_domain # from tests.dns import concurrency # from tests.dns import conversion # The DNS stuff is problematic. Not loading the other tests allow us to # run its functional tests only. ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True' if not ADD_DOMAINS: from tests import initialize from tests.api import delete_all from trove.tests.api import flavors from trove.tests.api import versions from trove.tests.api import instances from trove.tests.api import instances_actions from trove.tests.api import instances_mysql_down from tests.api import instances_pagination from trove.tests.api import instances_delete from tests.api import instances_quotas from tests.api import instances_states from trove.tests.api import databases from trove.tests.api import root from trove.tests.api import users from trove.tests.api import user_access from trove.tests.api import backups from trove.tests.api import configurations from trove.tests.api.mgmt import accounts from trove.tests.api.mgmt import admin_required from trove.tests.api.mgmt import hosts from trove.tests.api.mgmt import instances as mgmt_instances from trove.tests.api.mgmt import storage from tests.dns import dns from tests.volumes import driver from tests.smoke import instance black_box_groups = [ flavors.GROUP, users.GROUP, user_access.GROUP, databases.GROUP, root.GROUP, "services.initialize", instances.GROUP_START, "dbaas_quotas", "dbaas.api.security_groups", backups.GROUP, configurations.GROUP, "dbaas.api.instances.actions.resize.instance", # TODO(SlickNik): The restart tests fail intermittently so pulling # them out of the blackbox group temporarily. Refer to Trove bug: # https://bugs.launchpad.net/trove/+bug/1204233 # "dbaas.api.instances.actions.restart", instances_actions.GROUP_STOP_MYSQL, instances.GROUP_STOP, versions.GROUP, "dbaas.guest.start.test", ] proboscis.register(groups=["blackbox"], depends_on_groups=black_box_groups) simple_black_box_groups = [ "services.initialize", flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, "dbaas.api.mgmt.admin", ] proboscis.register(groups=["simple_blackbox"], depends_on_groups=simple_black_box_groups) black_box_mgmt_groups = [ accounts.GROUP, hosts.GROUP, storage.GROUP, instances_actions.GROUP_REBOOT, admin_required.GROUP, mgmt_instances.GROUP, ] proboscis.register(groups=["blackbox_mgmt"], depends_on_groups=black_box_mgmt_groups) heavy_black_box_groups = [ "dbaas.api.instances.pagination", "dbaas.api.instances.delete", "dbaas.api.instances.status", "dbaas.api.instances.down", "dbaas.api.mgmt.hosts.update", "fake.dbaas.api.mgmt.instances", "fake.dbaas.api.mgmt.accounts.broken", "fake.dbaas.api.mgmt.allaccounts" ] proboscis.register(groups=["heavy_blackbox"], depends_on_groups=heavy_black_box_groups) cassandra_groups = [ "services.initialize", flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, ] proboscis.register(groups=["cassandra"], depends_on_groups=cassandra_groups)
def map_test(target): assert_is_not_none(settings.PATCHING_BUG_ID, "Bug ID wasn't specified, can't start patching tests!") errata = get_errata(path=settings.PATCHING_APPLY_TESTS, bug_id=settings.PATCHING_BUG_ID) verify_errata(errata) if not any(target == e_target['type'] for e_target in errata['targets']): skip_patching_test(target, errata['target']) env_distro = settings.OPENSTACK_RELEASE master_distro = settings.OPENSTACK_RELEASE_CENTOS if 'affected-pkgs' in errata.keys(): if target == 'master': settings.PATCHING_PKGS = set([ re.split('=|<|>', package)[0] for package in errata['affected-pkgs'][master_distro.lower()] ]) else: settings.PATCHING_PKGS = set([ re.split('=|<|>', package)[0] for package in errata['affected-pkgs'][env_distro.lower()] ]) available_env_packages = set() available_master_packages = set() for repo in settings.PATCHING_MIRRORS: logger.debug('Checking packages from "{0}" repository'.format(repo)) available_env_packages.update(get_repository_packages( repo, env_distro)) for repo in settings.PATCHING_MASTER_MIRRORS: logger.debug('Checking packages from "{0}" repository'.format(repo)) available_master_packages.update( get_repository_packages(repo, master_distro)) available_packages = available_env_packages | available_master_packages if not settings.PATCHING_PKGS: if target == 'master': settings.PATCHING_PKGS = available_master_packages else: settings.PATCHING_PKGS = available_env_packages else: assert_true( settings.PATCHING_PKGS <= available_packages, "Patching repositories don't contain all packages need" "ed for tests. Need: {0}, available: {1}, missed: {2}." "".format(settings.PATCHING_PKGS, available_packages, settings.PATCHING_PKGS - available_packages)) assert_not_equal( len(settings.PATCHING_PKGS), 0, "No packages found in repository(s) for patching:" " '{0} {1}'".format(settings.PATCHING_MIRRORS, settings.PATCHING_MASTER_MIRRORS)) if target == 'master': tests_groups = get_packages_tests(settings.PATCHING_PKGS, master_distro, target) else: tests_groups = get_packages_tests(settings.PATCHING_PKGS, env_distro, target) if 'rally' in errata.keys(): if len(errata['rally']) > 0: settings.PATCHING_RUN_RALLY = True settings.RALLY_TAGS = errata['rally'] if settings.PATCHING_CUSTOM_TEST: deployment_test = settings.PATCHING_CUSTOM_TEST settings.PATCHING_SNAPSHOT = \ 'patching_after_{0}'.format(deployment_test) register(groups=['prepare_patching_environment'], depends_on_groups=[deployment_test]) register(groups=['prepare_patching_master_environment'], depends_on_groups=[deployment_test]) else: program = TestProgram(argv=['none']) deployment_test = None for my_test in program.plan.tests: if all(patching_group in my_test.entry.info.groups for patching_group in tests_groups): deployment_test = my_test break if deployment_test: settings.PATCHING_SNAPSHOT = 'patching_after_{0}'.format( deployment_test.entry.method.im_func.func_name) if target == 'master': register(groups=['prepare_patching_master_environment'], depends_on=[deployment_test.entry.home]) else: register(groups=['prepare_patching_environment'], depends_on=[deployment_test.entry.home]) else: raise Exception( "Test with groups {0} not found.".format(tests_groups))
def map_test(target): assert_is_not_none(settings.PATCHING_BUG_ID, "Bug ID wasn't specified, can't start patching tests!") errata = get_errata(path=settings.PATCHING_APPLY_TESTS, bug_id=settings.PATCHING_BUG_ID) verify_errata(errata) if not any(target == e_target['type'] for e_target in errata['targets']): skip_patching_test(target, errata['target']) if target == 'master': # On master node we have only CentOS containers, so always check # only CentOS packages available for update distro = settings.OPENSTACK_RELEASE_CENTOS else: distro = settings.OPENSTACK_RELEASE if settings.PATCHING_CUSTOM_TEST: deployment_test = settings.PATCHING_CUSTOM_TEST settings.PATCHING_SNAPSHOT = \ 'patching_after_{0}'.format(deployment_test) register(groups=['prepare_patching_environment'], depends_on_groups=[deployment_test]) register(groups=['prepare_master_environment'], depends_on_groups=[deployment_test]) else: if 'fixed-pkgs' in errata.keys(): settings.PATCHING_PKGS = set( [re.split('=|<|>', package)[0] for package in errata['fixed-pkgs'][distro.lower()]]) available_packages = set() logger.debug('{0}'.format(settings.PATCHING_MIRRORS)) for repo in settings.PATCHING_MIRRORS: logger.debug( 'Checking packages from "{0}" repository'.format(repo)) available_packages.update(get_repository_packages(repo, distro)) if not settings.PATCHING_PKGS: settings.PATCHING_PKGS = available_packages else: assert_true(settings.PATCHING_PKGS <= available_packages, "Patching repositories don't contain" " all packages needed " "for tests. Need: {0}, but available: {1}.".format( settings.PATCHING_PKGS, available_packages)) assert_not_equal(len(settings.PATCHING_PKGS), 0, "No packages found in repository(s) for patching:" " '{0}'".format(settings.PATCHING_MIRRORS)) tests_groups = get_packages_tests(settings.PATCHING_PKGS, distro, target) program = TestProgram(argv=['none']) deployment_test = None for my_test in program.plan.tests: if all(patching_group in my_test.entry.info.groups for patching_group in tests_groups): deployment_test = my_test break if deployment_test: settings.PATCHING_SNAPSHOT = 'patching_after_{0}'.format( deployment_test.entry.method.im_func.func_name) if target == 'master': register(groups=['prepare_master_environment'], depends_on=[deployment_test.entry.home]) else: register(groups=['prepare_patching_environment'], depends_on=[deployment_test.entry.home]) else: raise Exception( "Test with groups {0} not found.".format(tests_groups))
def import_tests(): # TODO(tim.simpson): Import these again once white box test functionality # is restored. # from tests.dns import check_domain # from tests.dns import concurrency # from tests.dns import conversion # The DNS stuff is problematic. Not loading the other tests allow us to # run its functional tests only. ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True' if not ADD_DOMAINS: from tests import initialize from tests.api import delete_all from trove.tests.api import flavors from trove.tests.api import versions from trove.tests.api import instances from trove.tests.api import instances_actions from trove.tests.api import instances_mysql_down from tests.api import instances_pagination from trove.tests.api import instances_delete from tests.api import instances_quotas from tests.api import instances_states from trove.tests.api import databases from trove.tests.api import root from trove.tests.api import users from trove.tests.api import user_access from trove.tests.api import backups from trove.tests.api import configurations from trove.tests.api import datastores from trove.tests.api.mgmt import accounts from trove.tests.api.mgmt import admin_required from trove.tests.api.mgmt import hosts from trove.tests.api.mgmt import instances as mgmt_instances from trove.tests.api.mgmt import storage from tests.dns import dns from tests.volumes import driver from tests.smoke import instance black_box_groups = [ flavors.GROUP, users.GROUP, user_access.GROUP, databases.GROUP, root.GROUP, "services.initialize", instances.GROUP_START, "dbaas_quotas", "dbaas.api.security_groups", backups.GROUP, configurations.GROUP, datastores.GROUP, "dbaas.api.instances.actions.resize.instance", # TODO(SlickNik): The restart tests fail intermittently so pulling # them out of the blackbox group temporarily. Refer to Trove bug: # https://bugs.launchpad.net/trove/+bug/1204233 # "dbaas.api.instances.actions.restart", instances_actions.GROUP_STOP_MYSQL, instances.GROUP_STOP, versions.GROUP, "dbaas.guest.start.test", ] proboscis.register(groups=["blackbox"], depends_on_groups=black_box_groups) simple_black_box_groups = [ "services.initialize", flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, "dbaas.api.mgmt.admin", ] proboscis.register(groups=["simple_blackbox"], depends_on_groups=simple_black_box_groups) black_box_mgmt_groups = [ accounts.GROUP, hosts.GROUP, storage.GROUP, instances_actions.GROUP_REBOOT, admin_required.GROUP, mgmt_instances.GROUP, ] proboscis.register(groups=["blackbox_mgmt"], depends_on_groups=black_box_mgmt_groups) heavy_black_box_groups = [ "dbaas.api.instances.pagination", "dbaas.api.instances.delete", "dbaas.api.instances.status", "dbaas.api.instances.down", "dbaas.api.mgmt.hosts.update", "fake.dbaas.api.mgmt.instances", "fake.dbaas.api.mgmt.accounts.broken", "fake.dbaas.api.mgmt.allaccounts" ] proboscis.register(groups=["heavy_blackbox"], depends_on_groups=heavy_black_box_groups) cassandra_groups = [ "services.initialize", flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, ] proboscis.register(groups=["cassandra"], depends_on_groups=cassandra_groups) couchbase_groups = [ "services.initialize", flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, ] proboscis.register(groups=["couchbase"], depends_on_groups=couchbase_groups) mongodb_groups = [ "services.initialize", flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, ] proboscis.register(groups=["mongodb"], depends_on_groups=mongodb_groups)
instances.GROUP_SECURITY_GROUPS, backups.GROUP, replication.GROUP, configurations.GROUP, datastores.GROUP, instances_actions.GROUP_RESIZE, # TODO(SlickNik): The restart tests fail intermittently so pulling # them out of the blackbox group temporarily. Refer to Trove bug: # https://bugs.launchpad.net/trove/+bug/1204233 # instances_actions.GROUP_RESTART, instances_actions.GROUP_STOP_MYSQL, instances.GROUP_STOP, versions.GROUP, instances.GROUP_GUEST, ] proboscis.register(groups=["blackbox", "mysql"], depends_on_groups=black_box_groups) simple_black_box_groups = [ GROUP_SERVICES_INITIALIZE, flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, admin_required.GROUP, ] proboscis.register(groups=["simple_blackbox"], depends_on_groups=simple_black_box_groups) black_box_mgmt_groups = [ accounts.GROUP, hosts.GROUP, storage.GROUP, instances_actions.GROUP_REBOOT,
def fuel_library_modules_mapping(modules): """ find fuel-qa system test which have maximum coverage for edited puppet modules and register that group with "review_in_fuel_library" name modules - dictionary of puppet modules edited in review Example: modules = {'horizon':'fuel-library/deployment/Puppetfile'} """ # open yaml with covered modules with open("gates_tests/helpers/fuel_library_modules_mapping.yaml", "r") as f: mapping = yaml.load(f) if modules and isinstance(modules, dict): all_modules = set( list( itertools.chain.from_iterable([ mapping[test_group]['modules'] for test_group in mapping ]))) logger.debug( "List of puppet modules covered by system_tests {}".format( all_modules)) logger.info("List of modules edited in review {}".format( modules.keys())) # checking that module from review covered by system_test for module in modules.keys(): if module not in all_modules: logger.warning( "{}:{} module not exist or not covered by system_test". format(module, modules[module])) # find test group which has better coverage of modules from review system_test = "bvt_2" max_intersection = 0 if not ("ceph" in modules and {"roles/cinder.pp", "cinder", "openstack-cinder"} & set(modules)): for test in mapping: test_intersection = len( set(mapping[test]['modules']).intersection(set(modules))) if test_intersection > max_intersection: max_intersection = test_intersection system_test = test devops_template = mapping[system_test]['devops_settings_template'] import gates_tests path_to_template = os.path.join( os.path.dirname(os.path.abspath(gates_tests.__file__)), devops_template) logger.debug("devops template is {}".format(path_to_template)) os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template # To completely check ceph module we can't mix ceph and cinder togeher else: logger.warning( "We cannot check cinder and ceph together {}".format(modules)) system_test = "bvt_2" else: logger.warning("There no modules that changed in review " "so just run default system test") system_test = "bvt_2" logger.info("Puppet modules from review {}" " will be checked by next system test: {}".format( modules, system_test)) register(groups=['review_in_fuel_library'], depends_on_groups=[system_test])
instances.GROUP_SECURITY_GROUPS, backups.GROUP, replication.GROUP, configurations.GROUP, datastores.GROUP, instances_actions.GROUP_RESIZE, # TODO(SlickNik): The restart tests fail intermittently so pulling # them out of the blackbox group temporarily. Refer to Trove bug: # https://bugs.launchpad.net/trove/+bug/1204233 # instances_actions.GROUP_RESTART, instances_actions.GROUP_STOP_MYSQL, instances.GROUP_STOP, versions.GROUP, instances.GROUP_GUEST, ] proboscis.register(groups=["blackbox", "mysql"], depends_on_groups=black_box_groups) simple_black_box_groups = [ GROUP_SERVICES_INITIALIZE, flavors.GROUP, versions.GROUP, instances.GROUP_START_SIMPLE, admin_required.GROUP, ] proboscis.register(groups=["simple_blackbox"], depends_on_groups=simple_black_box_groups) black_box_mgmt_groups = [ accounts.GROUP, hosts.GROUP, storage.GROUP,
def fuel_library_modules_mapping(modules): """ find fuel-qa system test which have maximum coverage for edited puppet modules and register that group with "review_in_fuel_library" name modules - dictionary of puppet modules edited in review Example: modules = {'horizon':'fuel-library/deployment/Puppetfile'} """ # open yaml with covered modules with open( "gates_tests/helpers/fuel_library_modules_mapping.yaml", "r") as f: mapping = yaml.load(f) if modules and isinstance(modules, dict): all_modules = set(list(itertools.chain.from_iterable( [mapping[test_group]['modules'] for test_group in mapping]))) logger.debug( "List of puppet modules covered by system_tests {}".format( all_modules)) logger.info( "List of modules edited in review {}".format(modules.keys())) # checking that module from review covered by system_test for module in modules.keys(): if module not in all_modules: logger.warning( "{}:{} module not exist or not covered by system_test" .format(module, modules[module])) # find test group which has better coverage of modules from review system_test = "bvt_2" max_intersection = 0 if not ("ceph" in modules and {"roles/cinder.pp", "cinder", "openstack-cinder"} & set(modules)): for test in mapping: test_intersection = len( set(mapping[test]['modules']).intersection(set(modules))) if test_intersection > max_intersection: max_intersection = test_intersection system_test = test devops_template = mapping[system_test]['devops_settings_template'] import gates_tests path_to_template = os.path.join( os.path.dirname(os.path.abspath(gates_tests.__file__)), devops_template) logger.debug("devops template is {}".format(path_to_template)) os.environ['DEVOPS_SETTINGS_TEMPLATE'] = path_to_template # To completely check ceph module we can't mix ceph and cinder togeher else: logger.warning( "We cannot check cinder and ceph together {}" .format(modules)) system_test = "bvt_2" else: logger.warning("There no modules that changed in review " "so just run default system test") system_test = "bvt_2" logger.info( "Puppet modules from review {}" " will be checked by next system test: {}".format( modules, system_test)) register(groups=['review_in_fuel_library'], depends_on_groups=[system_test])