Esempio n. 1
0
    def skip_checks(cls):
        super(VolumeManageAdminTest, cls).skip_checks()

        if not CONF.volume_feature_enabled.manage_volume:
            raise cls.skipException("Manage volume tests are disabled")

        if len(CONF.volume.manage_volume_ref) != 2:
            msg = ("Manage volume ref is not correctly configured, "
                   "it should be a list of two elements")
            raise exceptions.InvalidConfiguration(msg)
 def setup_clients(cls):
     super(FlavorsV2NegativeTest, cls).setup_clients()
     if CONF.image_feature_enabled.api_v1:
         cls.images_client = cls.os.image_client
     elif CONF.image_feature_enabled.api_v2:
         cls.images_client = cls.os.image_client_v2
     else:
         raise lib_exc.InvalidConfiguration(
             'Either api_v1 or api_v2 must be True in '
             '[image-feature-enabled].')
Esempio n. 3
0
 def setup_clients(cls):
     super(ImagesMetadataRbacTest, cls).setup_clients()
     if CONF.image_feature_enabled.api_v2:
         cls.glance_image_client = cls.os_primary.image_client_v2
     elif CONF.image_feature_enabled.api_v1:
         cls.glance_image_client = cls.os_primary.image_client
     else:
         raise lib_exc.InvalidConfiguration(
             'Either api_v1 or api_v2 must be True in '
             '[image-feature-enabled].')
Esempio n. 4
0
def get_configured_admin_credentials(fill_in=True, identity_version=None):
    """Get admin credentials from the config file

    Read credentials from configuration, builds a Credentials object based on
    the specified or configured version

    :param fill_in: If True, a request to the Token API is submitted, and the
                    credential object is filled in with all names and IDs from
                    the token API response.
    :param identity_version: The identity version to talk to and the type of
                             credentials object to be created. 'v2' or 'v3'.
    :returns: An object of a sub-type of `auth.Credentials`
    """
    identity_version = identity_version or CONF.identity.auth_version

    if identity_version not in ('v2', 'v3'):
        raise exceptions.InvalidConfiguration('Unsupported auth version: %s' %
                                              identity_version)

    conf_attributes = ['username', 'password', 'project_name']

    if identity_version == 'v3':
        conf_attributes.append('domain_name')
        conf_attributes.append('user_domain_name')
        conf_attributes.append('project_domain_name')
        conf_attributes.append('system')
    # Read the parts of credentials from config
    params = config.service_client_config()
    for attr in conf_attributes:
        params[attr] = getattr(CONF.auth, 'admin_' + attr)
    # Build and validate credentials. We are reading configured credentials,
    # so validate them even if fill_in is False
    credentials = get_credentials(fill_in=fill_in,
                                  identity_version=identity_version,
                                  **params)
    if not fill_in:
        if not credentials.is_valid():
            msg = ("The admin credentials are incorrectly set in the config "
                   "file for identity version %s. Double check that all "
                   "required values are assigned.")
            raise exceptions.InvalidConfiguration(msg % identity_version)
    return credentials
    def setUp(self):
        base.verify_test_has_appropriate_tags(self)
        if self.ipv6_enabled and not CONF.share.run_ipv6_tests:
            raise self.skipException("IPv6 tests are disabled")
        if self.protocol not in CONF.share.enable_protocols:
            message = "%s tests are disabled" % self.protocol
            raise self.skipException(message)
        if self.protocol not in CONF.share.enable_ip_rules_for_protocols:
            message = ("%s tests for access rules other than IP are disabled" %
                       self.protocol)
            raise self.skipException(message)
        super(ShareScenarioTest, self).setUp()

        self.image_id = None
        # Setup image and flavor the test instance
        # Support both configured and injected values
        self.floating_ips = {}

        if not hasattr(self, 'flavor_ref'):
            self.flavor_ref = CONF.share.client_vm_flavor_ref

        if CONF.share.image_with_share_tools == 'centos':
            self.image_ref = self._create_centos_based_glance_image()
        elif CONF.share.image_with_share_tools:
            images = self.compute_images_client.list_images()["images"]
            for img in images:
                if img["name"] == CONF.share.image_with_share_tools:
                    self.image_id = img['id']
                    break
            if not self.image_id:
                msg = ("Image %s not found. Expecting an image including "
                       "required share tools." %
                       CONF.share.image_with_share_tools)
                raise exceptions.InvalidConfiguration(message=msg)
        self.ssh_user = CONF.share.image_username
        LOG.debug('Starting test for i:{image_id}, f:{flavor}. '
                  'user: {ssh_user}'.format(image_id=self.image_id,
                                            flavor=self.flavor_ref,
                                            ssh_user=self.ssh_user))

        self.security_group = self._create_security_group()
        self.network = self._create_network(namestart="manila-share")
        self.subnet = self._create_subnet(
            network=self.network,
            namestart="manila-share-sub",
            ip_version=self.ip_version,
            use_default_subnetpool=self.ipv6_enabled)
        router = self._get_router()
        self._create_router_interface(subnet_id=self.subnet['id'],
                                      router_id=router['id'])

        if CONF.share.multitenancy_enabled:
            # Skip if DHSS=False
            self.share_network = self.create_share_network()
 def verify_unallocated_floating_ip_range(cls, ip_range):
     # Verify whether configure floating IP range is not already allocated.
     body = cls.client.list_floating_ips_bulk()['floating_ip_info']
     allocated_ips_list = map(lambda x: x['address'], body)
     for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts():
         if str(ip_addr) in allocated_ips_list:
             msg = ("Configured unallocated floating IP range is already "
                    "allocated. Configure the correct unallocated range "
                    "as 'floating_ip_range'")
             raise exceptions.InvalidConfiguration(msg)
     return
Esempio n. 7
0
 def setup_clients(cls):
     super(VolumesActionsTest, cls).setup_clients()
     if CONF.service_available.glance:
         # Check if glance v1 is available to determine which client to use.
         if CONF.image_feature_enabled.api_v1:
             cls.image_client = cls.os.image_client
         elif CONF.image_feature_enabled.api_v2:
             cls.image_client = cls.os.image_client_v2
         else:
             raise exceptions.InvalidConfiguration(
                 'Either api_v1 or api_v2 must be True in '
                 '[image-feature-enabled].')
Esempio n. 8
0
def wait_for_bm_node_status(client,
                            node_id,
                            attr,
                            status,
                            timeout=None,
                            interval=None):
    """Waits for a baremetal node attribute to reach given status.

    :param client: an instance of tempest plugin BaremetalClient.
    :param node_id: identifier of the node.
    :param attr: node's API-visible attribute to check status of.
    :param status: desired status.
    :param timeout: the timeout after which the check is considered as failed.
        Defaults to client.build_timeout.
    :param interval: an interval between show_node calls for status check.
        Defaults to client.build_interval.

    The client should have a show_node(node_id) method to get the node.
    """
    if timeout is None:
        timeout = client.build_timeout
    if interval is None:
        interval = client.build_interval
    if timeout < 0 or interval < 0:
        raise lib_exc.InvalidConfiguration(
            'timeout and interval should be >= 0 or None, current values are: '
            '%(timeout)s, %(interval)s respectively.' %
            dict(timeout=timeout, interval=interval))

    start = int(time.time())
    _, node = client.show_node(node_id)

    while node[attr] != status:
        status_curr = node[attr]
        if status_curr == status:
            return

        if int(time.time()) - start >= timeout:
            message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
                       'within the required time (%(timeout)s s).' % {
                           'node_id': node_id,
                           'attr': attr,
                           'status': status,
                           'timeout': client.build_timeout
                       })
            message += ' Current state of %s: %s.' % (attr, status_curr)
            caller = misc_utils.find_test_caller()
            if caller:
                message = '(%s) %s' % (caller, message)
            raise lib_exc.TimeoutException(message)

        time.sleep(interval)
        _, node = client.show_node(node_id)
Esempio n. 9
0
 def resource_setup(cls):
     super(L3AgentSchedulerTestJSON, cls).resource_setup()
     agents = cls.admin_agents_client.list_agents(
         agent_type=AGENT_TYPE)['agents']
     for agent in agents:
         if agent['configurations']['agent_mode'] in AGENT_MODES:
             cls.agent = agent
             break
     else:
         msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
         raise exceptions.InvalidConfiguration(msg)
     cls.router = cls.create_router()
Esempio n. 10
0
    def create_networks(self, networks_client=None,
                        routers_client=None, subnets_client=None,
                        tenant_id=None, dns_nameservers=None,
                        port_security_enabled=True):
        """Create a network with a subnet connected to a router.

        The baremetal driver is a special case since all nodes are
        on the same shared network.

        :param tenant_id: id of tenant to create resources in.
        :param dns_nameservers: list of dns servers to send to subnet.
        :returns: network, subnet, router
        """
        if CONF.network.shared_physical_network:
            # NOTE(Shrews): This exception is for environments where tenant
            # credential isolation is available, but network separation is
            # not (the current baremetal case). Likely can be removed when
            # test account mgmt is reworked:
            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
            if not CONF.compute.fixed_network_name:
                m = 'fixed_network_name must be specified in config'
                raise lib_exc.InvalidConfiguration(m)
            network = self._get_network_by_name(
                CONF.compute.fixed_network_name)
            router = None
            subnet = None
        else:
            network = self._create_network(
                networks_client=networks_client,
                tenant_id=tenant_id,
                port_security_enabled=port_security_enabled)
            router = self._get_router(client=routers_client,
                                      tenant_id=tenant_id)
            subnet_kwargs = dict(network=network,
                                 subnets_client=subnets_client,
                                 routers_client=routers_client)
            # use explicit check because empty list is a valid option
            if dns_nameservers is not None:
                subnet_kwargs['dns_nameservers'] = dns_nameservers
            subnet = self._create_subnet(**subnet_kwargs)
            if not routers_client:
                routers_client = self.routers_client
            router_id = router['id']
            routers_client.add_router_interface(router_id,
                                                subnet_id=subnet['id'])

            # save a cleanup job to remove this association between
            # router and subnet
            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                            routers_client.remove_router_interface, router_id,
                            subnet_id=subnet['id'])
        return network, subnet, router
Esempio n. 11
0
 def allowed(self, rule_name, role):
     if self.roles_dict is None:
         raise exceptions.InvalidConfiguration(
             "Roles dictionary parsed from requirements YAML file is "
             "empty. Ensure the requirements YAML file is correctly "
             "formatted.")
     try:
         _api = self.roles_dict[rule_name]
         return role in _api
     except KeyError:
         raise KeyError("'%s' API is not defined in the requirements YAML "
                        "file" % rule_name)
     return False
 def resource_setup(cls):
     super(BaremetalStandaloneScenarioTest, cls).resource_setup()
     base.set_baremetal_api_microversion(cls.api_microversion)
     for v in cls.mandatory_attr:
         if getattr(cls, v) is None:
             raise lib_exc.InvalidConfiguration(
                 "Mandatory attribute %s not set." % v)
     image_checksum = None
     if not uuidutils.is_uuid_like(cls.image_ref):
         image_checksum = cls.image_checksum
     cls.node = cls.boot_node(cls.driver, cls.image_ref,
                              image_checksum=image_checksum)
     cls.node_ip = cls.add_floatingip_to_node(cls.node['uuid'])
Esempio n. 13
0
 def setup_clients(cls):
     super(ImagesMetadataTestJSON, cls).setup_clients()
     # Check if glance v1 is available to determine which client to use. We
     # prefer glance v1 for the compute API tests since the compute image
     # API proxy was written for glance v1.
     if CONF.image_feature_enabled.api_v1:
         cls.glance_client = cls.os_primary.image_client
     elif CONF.image_feature_enabled.api_v2:
         cls.glance_client = cls.os_primary.image_client_v2
     else:
         raise exceptions.InvalidConfiguration(
             'Either api_v1 or api_v2 must be True in '
             '[image-feature-enabled].')
     cls.client = cls.compute_images_client
Esempio n. 14
0
 def setup_clients(cls):
     super(BaseV2ComputeTest, cls).setup_clients()
     cls.servers_client = cls.os_primary.servers_client
     cls.server_groups_client = cls.os_primary.server_groups_client
     cls.flavors_client = cls.os_primary.flavors_client
     cls.compute_images_client = cls.os_primary.compute_images_client
     cls.extensions_client = cls.os_primary.extensions_client
     cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client
     cls.floating_ips_client = cls.os_primary.compute_floating_ips_client
     cls.keypairs_client = cls.os_primary.keypairs_client
     cls.security_group_rules_client = (
         cls.os_primary.compute_security_group_rules_client)
     cls.security_groups_client =\
         cls.os_primary.compute_security_groups_client
     cls.quotas_client = cls.os_primary.quotas_client
     cls.compute_networks_client = cls.os_primary.compute_networks_client
     cls.limits_client = cls.os_primary.limits_client
     cls.volumes_extensions_client =\
         cls.os_primary.volumes_extensions_client
     cls.snapshots_extensions_client =\
         cls.os_primary.snapshots_extensions_client
     cls.interfaces_client = cls.os_primary.interfaces_client
     cls.fixed_ips_client = cls.os_primary.fixed_ips_client
     cls.availability_zone_client = cls.os_primary.availability_zone_client
     cls.agents_client = cls.os_primary.agents_client
     cls.aggregates_client = cls.os_primary.aggregates_client
     cls.services_client = cls.os_primary.services_client
     cls.instance_usages_audit_log_client = (
         cls.os_primary.instance_usages_audit_log_client)
     cls.hypervisor_client = cls.os_primary.hypervisor_client
     cls.certificates_client = cls.os_primary.certificates_client
     cls.migrations_client = cls.os_primary.migrations_client
     cls.security_group_default_rules_client = (
         cls.os_primary.security_group_default_rules_client)
     cls.versions_client = cls.os_primary.compute_versions_client
     if CONF.service_available.cinder:
         cls.volumes_client = cls.os_primary.volumes_client_latest
         cls.attachments_client = cls.os_primary.attachments_client_latest
         cls.snapshots_client = cls.os_primary.snapshots_client_latest
     if CONF.service_available.glance:
         if CONF.image_feature_enabled.api_v1:
             cls.images_client = cls.os_primary.image_client
         elif CONF.image_feature_enabled.api_v2:
             cls.images_client = cls.os_primary.image_client_v2
         else:
             raise lib_exc.InvalidConfiguration(
                 'Either api_v1 or api_v2 must be True in '
                 '[image-feature-enabled].')
     cls._check_depends_on_nova_network()
Esempio n. 15
0
    def test_schedule_to_all_nodes(self):
        available_zone = \
            self.os_admin.availability_zone_client.list_availability_zones(
                detail=True)['availabilityZoneInfo']
        hosts = []
        for zone in available_zone:
            if zone['zoneState']['available']:
                for host in zone['hosts']:
                    if 'nova-compute' in zone['hosts'][host] and \
                        zone['hosts'][host]['nova-compute']['available']:
                        hosts.append({
                            'zone': zone['zoneName'],
                            'host_name': host
                        })

        # ensure we have at least as many compute hosts as we expect
        if len(hosts) < CONF.compute.min_compute_nodes:
            raise exceptions.InvalidConfiguration(
                "Host list %s is shorter than min_compute_nodes. "
                "Did a compute worker not boot correctly?" % hosts)

        # create 1 compute for each node, up to the min_compute_nodes
        # threshold (so that things don't get crazy if you have 1000
        # compute nodes but set min to 3).
        servers = []

        for host in hosts[:CONF.compute.min_compute_nodes]:
            # by getting to active state here, this means this has
            # landed on the host in question.
            # in order to use the availability_zone:host scheduler hint,
            # admin client is need here.
            inst = self.create_server(
                clients=self.os_admin,
                availability_zone='%(zone)s:%(host_name)s' % host)
            server = self.os_admin.servers_client.show_server(
                inst['id'])['server']
            # ensure server is located on the requested host
            self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
            servers.append(server)

        # make sure we really have the number of servers we think we should
        self.assertEqual(len(servers), CONF.compute.min_compute_nodes,
                         "Incorrect number of servers built %s" % servers)

        # ensure that every server ended up on a different host
        host_ids = [x['hostId'] for x in servers]
        self.assertEqual(
            len(set(host_ids)), len(servers),
            "Incorrect number of distinct host_ids scheduled to %s" % servers)
Esempio n. 16
0
def get_container_and_disk_format():
    a_formats = ['ami', 'ari', 'aki']

    container_format = CONF.image.container_formats[0]
    disk_format = CONF.image.disk_formats[0]

    if container_format in a_formats and container_format != disk_format:
        msg = ("The container format and the disk format don't match. "
               "Container format: %(container)s, Disk format: %(disk)s." % {
                   'container': container_format,
                   'disk': disk_format
               })
        raise exceptions.InvalidConfiguration(message=msg)

    return container_format, disk_format
Esempio n. 17
0
 def resource_setup(cls):
     super(L3AgentSchedulerTestJSON, cls).resource_setup()
     body = cls.admin_agents_client.list_agents()
     agents = body['agents']
     for agent in agents:
         # TODO(armax): falling back on default _agent_mode can be
         # dropped as soon as Icehouse is dropped.
         agent_mode = (agent['configurations'].get('agent_mode',
                                                   cls._agent_mode))
         if agent['agent_type'] == AGENT_TYPE and agent_mode in AGENT_MODES:
             cls.agent = agent
             break
     else:
         msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
         raise exceptions.InvalidConfiguration(msg)
     cls.router = cls.create_router()
Esempio n. 18
0
    def get_server_ip(cls, server):
        """Get the server fixed or floating IP.

        Based on the configuration we're in, return a correct ip
        address for validating that a guest is up.
        """
        if CONF.validation.connect_method == 'floating':
            return cls.validation_resources['floating_ip']['ip']
        elif CONF.validation.connect_method == 'fixed':
            addresses = server['addresses'][CONF.validation.network_for_ssh]
            for address in addresses:
                if address['version'] == CONF.validation.ip_version_for_ssh:
                    return address['addr']
            raise exceptions.ServerUnreachable(server_id=server['id'])
        else:
            raise lib_exc.InvalidConfiguration()
Esempio n. 19
0
    def resource_setup(cls):
        super(L3AgentSchedulerTestJSON, cls).resource_setup()
        body = cls.admin_agents_client.list_agents()
        agents = body['agents']
        for agent in agents:
            # TODO(armax): falling back on default _agent_mode can be
            # dropped as soon as Icehouse is dropped.
            agent_mode = (agent['configurations'].get('agent_mode',
                                                      cls._agent_mode))
            if agent['agent_type'] == AGENT_TYPE and agent_mode in AGENT_MODES:
                cls.agent = agent
                break
        else:
            msg = "L3 Agent Scheduler enabled in conf, but L3 Agent not found"
            raise exceptions.InvalidConfiguration(msg)
        cls.router = cls.create_router()

        if CONF.network.dvr_extra_resources:
            # NOTE(armax): If DVR is an available extension, and the created
            # router is indeed a distributed one, more resources need to be
            # provisioned in order to bind the router to the L3 agent in the
            # Liberty release or older, and are not required since the Mitaka
            # release.
            if test.is_extension_enabled('dvr', 'network'):
                cls.is_dvr_router = cls.admin_routers_client.show_router(
                    cls.router['id'])['router'].get('distributed', False)
                if cls.is_dvr_router:
                    cls.network = cls.create_network()
                    cls.create_subnet(cls.network)
                    cls.port = cls.create_port(cls.network)
                    cls.routers_client.add_router_interface(
                        cls.router['id'], port_id=cls.port['id'])
                    # NOTE: Sometimes we have seen this test fail with dvr in,
                    # multinode tests, since the dhcp port is not created
                    # before the test gets executed and so the router is not
                    # scheduled on the given agent. By adding the external
                    # gateway info to the router, the router should be properly
                    # scheduled in the dvr_snat node. This is a temporary work
                    # around to prevent a race condition.
                    external_gateway_info = {
                        'network_id': CONF.network.public_network_id,
                        'enable_snat': True
                    }
                    cls.admin_routers_client.update_router(
                        cls.router['id'],
                        external_gateway_info=external_gateway_info)
    def boot_node_ramdisk(cls, ramdisk_ref, iso=False):
        """Boot ironic using a ramdisk node.

        The following actions are executed:
          * Create/Pick networks to boot node in.
          * Create Neutron port and attach it to node.
          * Update node image_source.
          * Deploy node.
          * Wait until node is deployed.

        :param ramdisk_ref: Reference to user image or ramdisk to boot
                            the node with.
        :param iso: Boolean, default False, to indicate if the image ref
                    us actually an ISO image.
        """
        if ramdisk_ref is None:
            ramdisk_ref = cls.image_ref

        network, subnet, router = cls.create_networks()
        n_port = cls.create_neutron_port(network_id=network['id'])
        cls.vif_attach(node_id=cls.node['uuid'], vif_id=n_port['id'])
        if iso:
            patch_path = '/instance_info/boot_iso'
        else:
            # NOTE(TheJulia): The non ISO ramdisk path supports this
            # and it being here makes it VERY easy for us to add a test
            # of just a kernel/ramdisk loading from glance at some point.
            patch_path = '/instance_info/image_source'
        patch = [{'path': patch_path, 'op': 'add', 'value': ramdisk_ref}]
        cls.update_node(cls.node['uuid'], patch=patch)
        cls.set_node_provision_state(cls.node['uuid'], 'active')
        if CONF.validation.connect_method == 'floating':
            cls.node_ip = cls.add_floatingip_to_node(cls.node['uuid'])
        elif CONF.validation.connect_method == 'fixed':
            cls.node_ip = cls.get_server_ip(cls.node['uuid'])
        else:
            m = ('Configuration option "[validation]/connect_method" '
                 'must be set.')
            raise lib_exc.InvalidConfiguration(m)
        cls.wait_power_state(cls.node['uuid'],
                             bm.BaremetalPowerStates.POWER_ON)
        cls.wait_provisioning_state(cls.node['uuid'],
                                    bm.BaremetalProvisionStates.ACTIVE,
                                    timeout=CONF.baremetal.active_timeout,
                                    interval=30)
Esempio n. 21
0
 def resource_setup(cls):
     """Class level resource setup for test cases."""
     if (CONF.validation.ip_version_for_ssh not in (4, 6)
             and CONF.service_available.neutron):
         msg = "Invalid IP version %s in ip_version_for_ssh. Use 4 or 6"
         raise lib_exc.InvalidConfiguration(
             msg % CONF.validation.ip_version_for_ssh)
     if hasattr(cls, "os_primary"):
         vr = cls.validation_resources
         cls.validation_resources = vresources.create_validation_resources(
             cls.os_primary,
             use_neutron=CONF.service_available.neutron,
             ethertype='IPv' + str(CONF.validation.ip_version_for_ssh),
             floating_network_id=CONF.network.public_network_id,
             floating_network_name=CONF.network.floating_network_name,
             **vr)
     else:
         LOG.warning("Client manager not found, validation resources not"
                     " created")
Esempio n. 22
0
    def allowed(self, rule_name, roles):
        """Checks if a given rule in a policy is allowed with given role.

        :param string rule_name: Rule to be checked using provided requirements
            file specified by ``[patrole].custom_requirements_file``. Must be
            a key present in this file, under the appropriate component.
        :param List[string] roles: Roles to validate against custom
            requirements file.
        :returns: True if ``role`` is allowed to perform ``rule_name``, else
            False.
        :rtype: bool
        :raises RbacParsingException: If ``rule_name`` does not exist among the
            keyed policy names in the custom requirements file.
        """
        if not self.roles_dict:
            raise lib_exc.InvalidConfiguration(
                "Roles dictionary parsed from requirements YAML file is "
                "empty. Ensure the requirements YAML file is correctly "
                "formatted.")
        try:
            requirement_roles = self.roles_dict[rule_name]
        except KeyError:
            raise rbac_exceptions.RbacParsingException(
                "'%s' rule name is not defined in the requirements YAML file: "
                "%s" % (rule_name, self.filepath))

        for role_reqs in requirement_roles:
            required_roles = [
                role for role in role_reqs if not role.startswith("!")
            ]
            forbidden_roles = [
                role[1:] for role in role_reqs if role.startswith("!")
            ]

            # User must have all required roles
            required_passed = all([r in roles for r in required_roles])
            # User must not have any forbidden roles
            forbidden_passed = all([r not in forbidden_roles for r in roles])

            if required_passed and forbidden_passed:
                return True

        return False
Esempio n. 23
0
    def setup_clients(cls):
        super(ScenarioTest, cls).setup_clients()
        # Clients (in alphabetical order)
        cls.flavors_client = cls.manager.flavors_client
        cls.compute_floating_ips_client = (
            cls.manager.compute_floating_ips_client)
        if CONF.service_available.glance:
            # Check if glance v1 is available to determine which client to use.
            if CONF.image_feature_enabled.api_v1:
                cls.image_client = cls.manager.image_client
            elif CONF.image_feature_enabled.api_v2:
                cls.image_client = cls.manager.image_client_v2
            else:
                raise lib_exc.InvalidConfiguration(
                    'Either api_v1 or api_v2 must be True in '
                    '[image-feature-enabled].')
        # Compute image client
        cls.compute_images_client = cls.manager.compute_images_client
        cls.keypairs_client = cls.manager.keypairs_client
        # Nova security groups client
        cls.compute_security_groups_client = (
            cls.manager.compute_security_groups_client)
        cls.compute_security_group_rules_client = (
            cls.manager.compute_security_group_rules_client)
        cls.servers_client = cls.manager.servers_client
        cls.interface_client = cls.manager.interfaces_client
        # Neutron network client
        cls.networks_client = cls.manager.networks_client
        cls.ports_client = cls.manager.ports_client
        cls.routers_client = cls.manager.routers_client
        cls.subnets_client = cls.manager.subnets_client
        cls.floating_ips_client = cls.manager.floating_ips_client
        cls.security_groups_client = cls.manager.security_groups_client
        cls.security_group_rules_client = (
            cls.manager.security_group_rules_client)

        if CONF.volume_feature_enabled.api_v2:
            cls.volumes_client = cls.manager.volumes_v2_client
            cls.snapshots_client = cls.manager.snapshots_v2_client
        else:
            cls.volumes_client = cls.manager.volumes_client
            cls.snapshots_client = cls.manager.snapshots_client
Esempio n. 24
0
def is_alt_available(identity_version):
    # If dynamic credentials is enabled alt will be available
    if CONF.auth.use_dynamic_credentials:
        return True
    # Check whether test accounts file has the admin specified or not
    if CONF.auth.test_accounts_file:
        check_accounts = preprov_creds.PreProvisionedCredentialProvider(
            identity_version=identity_version, name='check_alt',
            **get_preprov_provider_params())
    else:
        raise exceptions.InvalidConfiguration(
            'A valid credential provider is needed')

    try:
        if not check_accounts.is_multi_user():
            return False
        else:
            return True
    except exceptions.InvalidConfiguration:
        return False
Esempio n. 25
0
 def setUp(self):
     super(ShareBasicOpsBase, self).setUp()
     # Setup image and flavor the test instance
     # Support both configured and injected values
     if not hasattr(self, 'flavor_ref'):
         self.flavor_ref = CONF.share.client_vm_flavor_ref
     if CONF.share.image_with_share_tools:
         images = self.compute_images_client.list_images()["images"]
         for img in images:
             if img["name"] == CONF.share.image_with_share_tools:
                 self.image_ref = img['id']
                 break
         if not self.image_ref:
             msg = ("Image %s not found" %
                    CONF.share.image_with_share_tools)
             raise exceptions.InvalidConfiguration(message=msg)
     self.ssh_user = CONF.share.image_username
     LOG.debug('Starting test for i:{image}, f:{flavor}. '
               'user: {ssh_user}'.format(image=self.image_ref,
                                         flavor=self.flavor_ref,
                                         ssh_user=self.ssh_user))
Esempio n. 26
0
    def setup_clients(cls):
        # Intialize the admin roles_client to perform role switching.
        admin_mgr = clients.Manager(
            credentials.get_configured_admin_credentials())
        if CONF.identity_feature_enabled.api_v3:
            admin_roles_client = admin_mgr.roles_v3_client
        else:
            raise lib_exc.InvalidConfiguration(
                "Patrole role overriding only supports v3 identity API.")

        cls.admin_roles_client = admin_roles_client

        cls._project_id = cls.os_primary.credentials.tenant_id
        cls._user_id = cls.os_primary.credentials.user_id
        cls._role_inferences_mapping = cls._prepare_role_inferences_mapping()

        cls._init_roles()

        # Change default role to admin
        cls._override_role(False)
        super(RbacUtilsMixin, cls).setup_clients()
Esempio n. 27
0
    def skip_checks(cls):
        super(BaseVolumeTest, cls).skip_checks()

        if not CONF.service_available.cinder:
            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
            raise cls.skipException(skip_msg)
        if cls._api_version == 1:
            if not CONF.volume_feature_enabled.api_v1:
                msg = "Volume API v1 is disabled"
                raise cls.skipException(msg)
        elif cls._api_version == 2:
            if not CONF.volume_feature_enabled.api_v2:
                msg = "Volume API v2 is disabled"
                raise cls.skipException(msg)
        elif cls._api_version == 3:
            if not CONF.volume_feature_enabled.api_v3:
                msg = "Volume API v3 is disabled"
                raise cls.skipException(msg)
        else:
            msg = ("Invalid Cinder API version (%s)" % cls._api_version)
            raise exceptions.InvalidConfiguration(msg)
    def create_networks(cls):
        """Create a network with a subnet connected to a router.

        Return existed network specified in compute/fixed_network_name
        config option.
        TODO(vsaienko): Add network/subnet/router when we setup
        ironic-standalone with multitenancy.

        :returns: network, subnet, router
        """
        network = None
        subnet = None
        router = None
        if CONF.network.shared_physical_network:
            if not CONF.compute.fixed_network_name:
                m = ('Configuration option "[compute]/fixed_network_name" '
                     'must be set.')
                raise lib_exc.InvalidConfiguration(m)
            network = cls.os_admin.networks_client.list_networks(
                name=CONF.compute.fixed_network_name)['networks'][0]
        return network, subnet, router
Esempio n. 29
0
    def get_class_validation_resources(cls, os_clients):
        """Provision validation resources according to configuration

        This is a wrapper around `create_validation_resources` from
        `tempest.common.validation_resources` that passes parameters from
        Tempest configuration. Only one instance of class level
        validation resources is managed by the helper, so If resources
        were already provisioned before, existing ones will be returned.

        Resources are returned as a dictionary. They are also scheduled for
        automatic cleanup during class teardown using
        `addClassResourcesCleanup`.

        If `CONF.validation.run_validation` is False no resource will be
        provisioned at all.

        @param os_clients: Clients to be used to provision the resources.
        """
        if not CONF.validation.run_validation:
            return

        if os_clients in cls._validation_resources:
            return cls._validation_resources[os_clients]

        if (CONF.validation.ip_version_for_ssh not in (4, 6) and
                CONF.service_available.neutron):
            msg = "Invalid IP version %s in ip_version_for_ssh. Use 4 or 6"
            raise lib_exc.InvalidConfiguration(
                msg % CONF.validation.ip_version_for_ssh)

        resources = vr.create_validation_resources(
            os_clients,
            **cls._validation_resources_params_from_conf())

        cls.addClassResourceCleanup(
            vr.clear_validation_resources, os_clients,
            use_neutron=CONF.service_available.neutron,
            **resources)
        cls._validation_resources[os_clients] = resources
        return resources
Esempio n. 30
0
 def test_create_port_in_allowed_allocation_pools(self):
     network = self.create_network()
     net_id = network['id']
     address = self._get_ipaddress_from_tempest_conf()
     if ((address.version == 4 and address.prefixlen >= 30) or
        (address.version == 6 and address.prefixlen >= 126)):
         msg = ("Subnet %s isn't large enough for the test" % address.cidr)
         raise exceptions.InvalidConfiguration(message=msg)
     allocation_pools = {'allocation_pools': [{'start': str(address[2]),
                                               'end': str(address[-2])}]}
     subnet = self.create_subnet(network, cidr=address,
                                 mask_bits=address.prefixlen,
                                 **allocation_pools)
     self.addCleanup(self.subnets_client.delete_subnet, subnet['id'])
     body = self.ports_client.create_port(network_id=net_id)
     self.addCleanup(self.ports_client.delete_port, body['port']['id'])
     port = body['port']
     ip_address = port['fixed_ips'][0]['ip_address']
     start_ip_address = allocation_pools['allocation_pools'][0]['start']
     end_ip_address = allocation_pools['allocation_pools'][0]['end']
     ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
     self.assertIn(ip_address, ip_range)