Exemple #1
0
def baymodel_data(**kwargs):
    """Generates random baymodel data

    Keypair and image id cannot be random for the baymodel to be valid due to
    validations for the presence of keypair and image id prior to baymodel
    creation.

    :param keypair_id: keypair name
    :param image_id: image id or name
    :returns: BayModelEntity with generated data
    """

    data = {
        "name": data_utils.rand_name('bay'),
        "coe": "swarm-mode",
        "tls_disabled": False,
        "network_driver": None,
        "volume_driver": None,
        "labels": {},
        "public": False,
        "dns_nameserver": "8.8.8.8",
        "flavor_id": data_utils.rand_name('bay'),
        "master_flavor_id": data_utils.rand_name('bay'),
        "external_network_id": config.Config.nic_id,
        "keypair_id": data_utils.rand_name('bay'),
        "image_id": data_utils.rand_name('bay')
    }

    data.update(kwargs)
    model = baymodel_model.BayModelEntity.from_dict(data)

    return model
    def test_create_list_update_show_delete_security_group(self):
        group_create_body, _ = self._create_security_group()

        # List security groups and verify if created group is there in response
        list_body = self.security_groups_client.list_security_groups()
        secgroup_list = list()
        for secgroup in list_body['security_groups']:
            secgroup_list.append(secgroup['id'])
        self.assertIn(group_create_body['security_group']['id'], secgroup_list)
        # Update the security group
        new_name = data_utils.rand_name('security-')
        new_description = data_utils.rand_name('security-description')
        update_body = self.security_groups_client.update_security_group(
            group_create_body['security_group']['id'],
            name=new_name,
            description=new_description)
        # Verify if security group is updated
        self.assertEqual(update_body['security_group']['name'], new_name)
        self.assertEqual(update_body['security_group']['description'],
                         new_description)
        # Show details of the updated security group
        show_body = self.security_groups_client.show_security_group(
            group_create_body['security_group']['id'])
        self.assertEqual(show_body['security_group']['name'], new_name)
        self.assertEqual(show_body['security_group']['description'],
                         new_description)
    def test_copy_object_in_same_container(self):
        # create source object
        src_object_name = data_utils.rand_name(name='SrcObject')
        src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
        resp, _ = self.object_client.create_object(self.container_name,
                                                   src_object_name,
                                                   src_data)
        # create destination object
        dst_object_name = data_utils.rand_name(name='DstObject')
        dst_data = data_utils.random_bytes(size=len(dst_object_name) * 3)
        resp, _ = self.object_client.create_object(self.container_name,
                                                   dst_object_name,
                                                   dst_data)
        # copy source object to destination
        headers = {}
        headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
                                            str(src_object_name))
        resp, body = self.object_client.create_object(self.container_name,
                                                      dst_object_name,
                                                      data=None,
                                                      headers=headers)
        self.assertHeaders(resp, 'Object', 'PUT')

        # check data
        resp, body = self.object_client.get_object(self.container_name,
                                                   dst_object_name)
        self.assertEqual(body, src_data)
    def test_create_update_and_delete_domain_config_groups_and_opts(self):
        domain, _ = self._create_domain_and_config(self.custom_config)

        # Check that updating configuration groups work.
        new_driver = data_utils.rand_name('driver')
        new_limit = data_utils.rand_int_id(0, 100)
        new_group_config = {'identity': {'driver': new_driver,
                                         'list_limit': new_limit}}

        updated_config = self.client.update_domain_group_config(
            domain['id'], 'identity', **new_group_config)['config']

        self.assertEqual(new_driver, updated_config['identity']['driver'])
        self.assertEqual(new_limit, updated_config['identity']['list_limit'])

        # Check that updating individual configuration group options work.
        new_driver = data_utils.rand_name('driver')

        updated_config = self.client.update_domain_group_option_config(
            domain['id'], 'identity', 'driver', driver=new_driver)['config']

        self.assertEqual(new_driver, updated_config['identity']['driver'])

        # Check that deleting individual configuration group options work.
        self.client.delete_domain_group_option_config(
            domain['id'], 'identity', 'driver')
        self.assertRaises(lib_exc.NotFound,
                          self.client.show_domain_group_option_config,
                          domain['id'], 'identity', 'driver')

        # Check that deleting configuration groups work.
        self.client.delete_domain_group_config(domain['id'], 'identity')
        self.assertRaises(lib_exc.NotFound,
                          self.client.show_domain_group_config,
                          domain['id'], 'identity')
Exemple #5
0
def cluster_template_data(**kwargs):
    """Generates random cluster_template data

    Keypair and image id cannot be random for the cluster_template to be valid
    due to validations for the presence of keypair and image id prior to
    cluster_template creation.

    :param keypair_id: keypair name
    :param image_id: image id or name
    :returns: ClusterTemplateEntity with generated data
    """

    data = {
        "name": data_utils.rand_name('cluster'),
        "coe": "swarm",
        "tls_disabled": False,
        "network_driver": None,
        "volume_driver": None,
        "docker_volume_size": 3,
        "labels": {},
        "public": False,
        "dns_nameserver": "8.8.8.8",
        "flavor_id": data_utils.rand_name('cluster'),
        "master_flavor_id": data_utils.rand_name('cluster'),
        "external_network_id": config.Config.nic_id,
        "keypair_id": data_utils.rand_name('cluster'),
        "image_id": data_utils.rand_name('cluster')
    }

    data.update(kwargs)
    model = cluster_template_model.ClusterTemplateEntity.from_dict(data)

    return model
Exemple #6
0
 def resource_setup(cls):
     super(EndPointsTestJSON, cls).resource_setup()
     cls.service_ids = list()
     s_name = data_utils.rand_name('service')
     s_type = data_utils.rand_name('type')
     s_description = data_utils.rand_name('description')
     service_data = cls.services_client.create_service(
         name=s_name, type=s_type,
         description=s_description)['OS-KSADM:service']
     cls.service_id = service_data['id']
     cls.service_ids.append(cls.service_id)
     # Create endpoints so as to use for LIST and GET test cases
     cls.setup_endpoints = list()
     for _ in range(2):
         region = data_utils.rand_name('region')
         url = data_utils.rand_url()
         endpoint = cls.endpoints_client.create_endpoint(
             service_id=cls.service_id,
             region=region,
             publicurl=url,
             adminurl=url,
             internalurl=url)['endpoint']
         # list_endpoints() will return 'enabled' field
         endpoint['enabled'] = True
         cls.setup_endpoints.append(endpoint)
Exemple #7
0
    def _get_updated_quotas(self):
        # Verify that GET shows the updated quota set of project
        project_name = data_utils.rand_name('cpu_quota_project')
        project_desc = project_name + '-desc'
        project = identity.identity_utils(self.os_admin).create_project(
            name=project_name, description=project_desc)
        project_id = project['id']
        self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
                        project_id)

        self.adm_client.update_quota_set(project_id, ram='5120')
        # Call show_quota_set with detail=true to cover the
        # get_quota_set_details response schema for microversion tests
        quota_set = self.adm_client.show_quota_set(
            project_id, detail=True)['quota_set']
        self.assertEqual(5120, quota_set['ram']['limit'])

        # Verify that GET shows the updated quota set of user
        user_name = data_utils.rand_name('cpu_quota_user')
        password = data_utils.rand_password()
        email = user_name + '@testmail.tm'
        user = identity.identity_utils(self.os_admin).create_user(
            username=user_name, password=password, project=project,
            email=email)
        user_id = user['id']
        self.addCleanup(identity.identity_utils(self.os_admin).delete_user,
                        user_id)

        self.adm_client.update_quota_set(project_id,
                                         user_id=user_id,
                                         ram='2048')
        quota_set = self.adm_client.show_quota_set(
            project_id, user_id=user_id)['quota_set']
        self.assertEqual(2048, quota_set['ram'])
 def test_to_verify_communication_between_two_vms_in_diff_network(self):
     net_id = self.network['id']
     name = data_utils.rand_name('server-smoke')
     group_create_body = self._create_custom_security_group()
     serverid = self._create_server_with_sec_group(
         name, net_id, group_create_body['security_group']['id'])
     self.assertTrue(self.verify_portgroup(self.network['id'], serverid))
     deviceport = self.ports_client.list_ports(device_id=serverid)
     body = self._associate_floating_ips(
         port_id=deviceport['ports'][0]['id'])
     fip1 = body['floatingip']['floating_ip_address']
     network2 = self.create_network()
     sub_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr).next()
     subnet2 = self.create_subnet(network2, cidr=sub_cidr)
     router2 = self.create_router(data_utils.rand_name('router2-'),
                                  external_network_id=self.ext_net_id,
                                  admin_state_up="true")
     self.create_router_interface(router2['id'], subnet2['id'])
     serverid2 = self._create_server_with_sec_group(
         name, network2['id'], group_create_body['security_group']['id'])
     deviceport2 = self.ports_client.list_ports(device_id=serverid2)
     body = self._associate_floating_ips(
         port_id=deviceport2['ports'][0]['id'])
     fip2 = body['floatingip']['floating_ip_address']
     self.assertTrue(self.verify_portgroup(self.network['id'], serverid))
     self.assertTrue(self.verify_portgroup(network2['id'], serverid2))
     self.assertTrue(self._check_remote_connectivity(fip1, fip2))
 def test_creation_of_VM_attach_to_user_created_port(self):
     group_create_body = self._create_custom_security_group()
     network = self.create_network()
     subnet = self.create_subnet(network)
     router = self.create_router(data_utils.rand_name('router-'),
                                 external_network_id=self.ext_net_id,
                                 admin_state_up="true")
     self.create_router_interface(router['id'], subnet['id'])
     post_body = {
         "name": data_utils.rand_name('port-'),
         "security_groups": [group_create_body['security_group']['id']],
         "network_id": network['id'],
         "admin_state_up": True}
     port = self.ports_client.create_port(**post_body)
     self.addCleanup(self.ports_client.delete_port, port['port']['id'])
     name = data_utils.rand_name('server-smoke')
     group_create_body, _ = self._create_security_group()
     serverid = self._create_server_user_created_port(
         name, port['port']['id'])
     self.assertTrue(self.verify_portgroup(network['id'], serverid))
     deviceport = self.ports_client.list_ports(device_id=serverid)
     body = self._associate_floating_ips(
         port_id=deviceport['ports'][0]['id'])
     floatingiptoreach = body['floatingip']['floating_ip_address']
     self._check_public_network_connectivity(floatingiptoreach)
Exemple #10
0
 def test_create_update_get_delete_region(self):
     # Create region
     r_description = data_utils.rand_name('description')
     region = self.client.create_region(
         description=r_description,
         parent_region_id=self.setup_regions[0]['id'])['region']
     # This test will delete the region as part of the validation
     # procedure, so it needs a different cleanup method that
     # would be useful in case the tests fails at any point before
     # reaching the deletion part.
     self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                     self.client.delete_region, region['id'])
     self.assertEqual(r_description, region['description'])
     self.assertEqual(self.setup_regions[0]['id'],
                      region['parent_region_id'])
     # Update region with new description and parent ID
     r_alt_description = data_utils.rand_name('description')
     region = self.client.update_region(
         region['id'],
         description=r_alt_description,
         parent_region_id=self.setup_regions[1]['id'])['region']
     self.assertEqual(r_alt_description, region['description'])
     self.assertEqual(self.setup_regions[1]['id'],
                      region['parent_region_id'])
     # Get the details of region
     region = self.client.show_region(region['id'])['region']
     self.assertEqual(r_alt_description, region['description'])
     self.assertEqual(self.setup_regions[1]['id'],
                      region['parent_region_id'])
     # Delete the region
     self.client.delete_region(region['id'])
     body = self.client.list_regions()['regions']
     regions_list = [r['id'] for r in body]
     self.assertNotIn(region['id'], regions_list)
    def _create_share_and_share_network(self):
        name = data_utils.rand_name('autotest_share_name')
        description = data_utils.rand_name('autotest_share_description')

        common_share_network = self.client.get_share_network(
            self.client.share_network)
        neutron_net_id = (
            common_share_network['neutron_net_id']
            if 'none' not in common_share_network['neutron_net_id'].lower()
            else None)
        neutron_subnet_id = (
            common_share_network['neutron_subnet_id']
            if 'none' not in common_share_network['neutron_subnet_id'].lower()
            else None)
        share_network = self.client.create_share_network(
            neutron_net_id=neutron_net_id,
            neutron_subnet_id=neutron_subnet_id,
        )

        self.share = self.create_share(
            share_protocol=self.protocol,
            size=1,
            name=name,
            description=description,
            share_network=share_network['id'],
            client=self.client,
            wait_for_creation=True
        )
        self.share = self.client.get_share(self.share['id'])
        return self.share, share_network
Exemple #12
0
 def create_share(self, share_protocol=None, size=None,
                  name=None, snapshot_id=None, description=None,
                  metadata=None, share_network_id=None,
                  share_type_id=None, is_public=False):
     metadata = metadata or {}
     if name is None:
         name = data_utils.rand_name("tempest-created-share")
     if description is None:
         description = data_utils.rand_name("tempest-created-share-desc")
     if size is None:
         size = self.share_size
     if share_protocol is None:
         share_protocol = self.share_protocol
     if share_protocol is None:
         raise share_exceptions.ShareProtocolNotSpecified()
     post_body = {
         "share": {
             "share_proto": share_protocol,
             "description": description,
             "snapshot_id": snapshot_id,
             "name": name,
             "size": size,
             "metadata": metadata,
             "is_public": is_public,
         }
     }
     if share_network_id:
         post_body["share"]["share_network_id"] = share_network_id
     if share_type_id:
         post_body["share"]["share_type"] = share_type_id
     body = json.dumps(post_body)
     resp, body = self.post("shares", body)
     self.expected_success(200, resp.status)
     return self._parse_resp(body)
    def resource_setup(cls):
        super(ConsistencyGroupsNegativeTest, cls).resource_setup()
        # Create share_type
        name = data_utils.rand_name("tempest-manila")
        extra_specs = cls.add_required_extra_specs_to_dict()
        share_type = cls.create_share_type(name, extra_specs=extra_specs)
        cls.share_type = share_type['share_type']

        # Create a consistency group
        cls.consistency_group = cls.create_consistency_group(
            share_type_ids=[cls.share_type['id']])

        # Create share inside consistency group
        cls.share_name = data_utils.rand_name("tempest-share-name")
        cls.share_desc = data_utils.rand_name("tempest-share-description")
        cls.share_size = 1
        cls.share = cls.create_share(
            name=cls.share_name,
            description=cls.share_desc,
            size=cls.share_size,
            consistency_group_id=cls.consistency_group['id'],
            share_type_id=cls.share_type['id'],
        )

        # Create a cgsnapshot of the consistency group
        cls.cgsnap_name = data_utils.rand_name("tempest-cgsnap-name")
        cls.cgsnap_desc = data_utils.rand_name("tempest-cgsnap-description")
        cls.cgsnapshot = cls.create_cgsnapshot_wait_for_active(
            cls.consistency_group["id"],
            name=cls.cgsnap_name,
            description=cls.cgsnap_desc)
 def test_service_set(self):
     service_name = self._create_dummy_service()
     # set service
     new_service_name = data_utils.rand_name("NewTestService")
     new_service_description = data_utils.rand_name("description")
     new_service_type = data_utils.rand_name("NewTestType")
     raw_output = self.openstack(
         "service set "
         "--type %(type)s "
         "--name %(name)s "
         "--description %(description)s "
         "--disable "
         "%(service)s"
         % {
             "type": new_service_type,
             "name": new_service_name,
             "description": new_service_description,
             "service": service_name,
         }
     )
     self.assertEqual(0, len(raw_output))
     # get service details
     raw_output = self.openstack("service show %s" % new_service_name)
     # assert service details
     service = self.parse_show_as_object(raw_output)
     self.assertEqual(new_service_type, service["type"])
     self.assertEqual(new_service_name, service["name"])
     self.assertEqual(new_service_description, service["description"])
 def test_create_update_get_delete_record(self):
     # Create Domain
     name = data_utils.rand_name('domain') + '.com.'
     email = data_utils.rand_name('dns') + '@testmail.com'
     _, domain = self.os.domains_client.create_domain(name, email)
     self.addCleanup(self.os.domains_client.delete_domain, domain['id'])
     # Create Record
     r_name = 'www.' + name
     r_data = "192.0.2.4"
     _, record = self.client.create_record(domain['id'],
                                           name=r_name, data=r_data,
                                           type='A')
     self.addCleanup(self._delete_record, domain['id'], record['id'])
     self.assertIsNotNone(record['id'])
     self.assertEqual(domain['id'], record['domain_id'])
     self.assertEqual(r_name, record['name'])
     self.assertEqual(r_data, record['data'])
     self.assertEqual('A', record['type'])
     # Update Record with data and ttl
     r_data1 = "192.0.2.5"
     r_ttl = 3600
     _, update_record = self.client.update_record(domain['id'],
                                                  record['id'],
                                                  name=r_name, type='A',
                                                  data=r_data1, ttl=r_ttl)
     self.assertEqual(r_data1, update_record['data'])
     self.assertEqual(r_ttl, update_record['ttl'])
     # GET record
     _, get_record = self.client.get_record(domain['id'], record['id'])
     self.assertEqual(update_record['data'], get_record['data'])
     self.assertEqual(update_record['name'], get_record['name'])
     self.assertEqual(update_record['type'], get_record['type'])
     self.assertEqual(update_record['ttl'], get_record['ttl'])
     self.assertEqual(update_record['domain_id'], get_record['domain_id'])
Exemple #16
0
    def test_check_simple_image_attributes(self):
        name = data_utils.rand_name('image')
        desc = data_utils.rand_name('desc for image')
        image_id, image_clean = self._create_image(name, desc)

        data = self.client.describe_image_attribute(
            ImageId=image_id, Attribute='kernel')
        self.assertIn('KernelId', data)

        data = self.client.describe_image_attribute(
            ImageId=image_id, Attribute='ramdisk')
        self.assertIn('RamdiskId', data)

        # description
        data = self.client.describe_image_attribute(
            ImageId=image_id, Attribute='description')
        self.assertIn('Description', data)
        self.assertIn('Value', data['Description'])
        self.assertEqual(desc, data['Description']['Value'])

        def _modify_description(**kwargs):
            self.client.modify_image_attribute(ImageId=image_id, **kwargs)
            data = self.client.describe_image_attribute(
                ImageId=image_id, Attribute='description')
            self.assertEqual(new_desc, data['Description']['Value'])

        new_desc = data_utils.rand_name('new desc')
        _modify_description(Attribute='description', Value=new_desc)
        _modify_description(Description={'Value': new_desc})

        data = self.client.deregister_image(ImageId=image_id)
        self.cancelResourceCleanUp(image_clean)
    def test_create_update_delete_ike_policy(self):
        # Creates a IKE policy
        name = data_utils.rand_name('ike-policy')
        body = (self.client.create_ikepolicy(
                name=name,
                ike_version="v1",
                encryption_algorithm="aes-128",
                auth_algorithm="sha1"))
        ikepolicy = body['ikepolicy']
        self.assertIsNotNone(ikepolicy['id'])
        self.addCleanup(self._delete_ike_policy, ikepolicy['id'])

        # Update IKE Policy
        new_ike = {'name': data_utils.rand_name("New-IKE"),
                   'description': "Updated ike policy",
                   'encryption_algorithm': "aes-256",
                   'ike_version': "v2",
                   'pfs': "group14",
                   'lifetime': {'units': "seconds", 'value': 2000}}
        self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
        # Confirm that update was successful by verifying using 'show'
        body = self.client.show_ikepolicy(ikepolicy['id'])
        ike_policy = body['ikepolicy']
        for key, value in six.iteritems(new_ike):
            self.assertIn(key, ike_policy)
            self.assertEqual(value, ike_policy[key])

        # Verification of ike policy delete
        self.client.delete_ikepolicy(ikepolicy['id'])
        body = self.client.list_ikepolicies()
        ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
        self.assertNotIn(ike_policy['id'], ikepolicies)
    def test_create_update_delete_tag(self):
        # Create a namespace
        namespace = self.create_namespace()
        self._create_namespace_tags(namespace)
        # Create a tag
        tag_name = data_utils.rand_name('tag_name')
        self.namespace_tags_client.create_namespace_tag(
            namespace=namespace['namespace'], tag_name=tag_name)

        body = self.namespace_tags_client.show_namespace_tag(
            namespace['namespace'], tag_name)
        self.assertEqual(tag_name, body['name'])
        # Update tag definition
        update_tag_definition = data_utils.rand_name('update-tag')
        body = self.namespace_tags_client.update_namespace_tag(
            namespace['namespace'], tag_name=tag_name,
            name=update_tag_definition)
        self.assertEqual(update_tag_definition, body['name'])
        # Delete tag definition
        self.namespace_tags_client.delete_namespace_tag(
            namespace['namespace'], update_tag_definition)
        # List namespace tags and validate deletion
        namespace_tags = [
            namespace_tag['name'] for namespace_tag in
            self.namespace_tags_client.list_namespace_tags(
                namespace['namespace'])['tags']]
        self.assertNotIn(update_tag_definition, namespace_tags)
Exemple #19
0
    def test_group_type_create_list_show(self):
        # Create/list/show group type.
        name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
        description = data_utils.rand_name("group-type-description")
        group_specs = {"consistent_group_snapshot_enabled": "<is> False"}
        params = {'name': name,
                  'description': description,
                  'group_specs': group_specs,
                  'is_public': True}
        body = self.create_group_type(**params)
        self.assertIn('name', body)
        err_msg = ("The created group_type %(var)s is not equal to the "
                   "requested %(var)s")
        self.assertEqual(name, body['name'], err_msg % {"var": "name"})
        self.assertEqual(description, body['description'],
                         err_msg % {"var": "description"})

        group_list = (
            self.admin_group_types_client.list_group_types()['group_types'])
        self.assertIsInstance(group_list, list)
        self.assertNotEmpty(group_list)

        fetched_group_type = self.admin_group_types_client.show_group_type(
            body['id'])['group_type']
        for key in params.keys():
            self.assertEqual(params[key], fetched_group_type[key],
                             '%s of the fetched group_type is different '
                             'from the created group_type' % key)
Exemple #20
0
    def test_get_share_with_share_type(self):

        # Data
        share_name = data_utils.rand_name("share")
        shr_type_name = data_utils.rand_name("share-type")
        extra_specs = self.add_required_extra_specs_to_dict({
            "storage_protocol": CONF.share.capability_storage_protocol,
        })

        # Create share type
        st_create = self.create_share_type(
            shr_type_name, extra_specs=extra_specs)

        # Create share with share type
        share = self.create_share(
            name=share_name, share_type_id=st_create["share_type"]["id"])
        self.assertEqual(share["name"], share_name)
        self.shares_client.wait_for_share_status(share["id"], "available")

        # Verify share info
        get = self.shares_v2_client.get_share(share["id"], version="2.5")
        self.assertEqual(share_name, get["name"])
        self.assertEqual(share["id"], get["id"])
        self.assertEqual(shr_type_name, get["share_type"])

        get = self.shares_v2_client.get_share(share["id"], version="2.6")
        self.assertEqual(st_create["share_type"]["id"], get["share_type"])
        self.assertEqual(shr_type_name, get["share_type_name"])
    def setUpClass(cls):
        # prepare v3 env
        os.environ['OS_IDENTITY_API_VERSION'] = '3'
        auth_url = os.environ.get('OS_AUTH_URL')
        auth_url = auth_url.replace('v2.0', 'v3')
        os.environ['OS_AUTH_URL'] = auth_url

        # create dummy domain
        cls.domain_name = data_utils.rand_name('TestDomain')
        cls.domain_description = data_utils.rand_name('description')
        cls.openstack(
            'domain create '
            '--description %(description)s '
            '--enable '
            '%(name)s' % {'description': cls.domain_description,
                          'name': cls.domain_name})

        # create dummy project
        cls.project_name = data_utils.rand_name('TestProject')
        cls.project_description = data_utils.rand_name('description')
        cls.openstack(
            'project create '
            '--domain %(domain)s '
            '--description %(description)s '
            '--enable '
            '%(name)s' % {'domain': cls.domain_name,
                          'description': cls.project_description,
                          'name': cls.project_name})
    def _check_create_cluster_template(self):
        ng_template_name = data_utils.rand_name('sahara-ng-template')
        ng_template = self.create_node_group_template(ng_template_name,
                                                      **self.worker_template)

        full_cluster_template = self.cluster_template.copy()
        full_cluster_template['node_groups'] = [
            {
                'name': 'master-node',
                'flavor_id': TEMPEST_CONF.compute.flavor_ref,
                'node_processes': ['namenode'],
                'count': 1
            },
            {
                'name': 'worker-node',
                'node_group_template_id': ng_template.id,
                'count': 3
            }
        ]

        template_name = data_utils.rand_name('sahara-cluster-template')

        # create cluster template
        resp_body = self.create_cluster_template(template_name,
                                                 **full_cluster_template)

        # check that template created successfully
        self.assertEqual(template_name, resp_body.name)
        self.assertDictContainsSubset(self.cluster_template,
                                      resp_body.__dict__)

        return resp_body.id, template_name
 def _create_dummy_user(self, add_clean_up=True):
     username = data_utils.rand_name('TestUser')
     password = data_utils.rand_name('password')
     email = data_utils.rand_name() + '@example.com'
     description = data_utils.rand_name('description')
     raw_output = self.openstack(
         'user create '
         '--domain %(domain)s '
         '--project %(project)s '
         '--project-domain %(project_domain)s '
         '--password %(password)s '
         '--email %(email)s '
         '--description %(description)s '
         '--enable '
         '%(name)s' % {'domain': self.domain_name,
                       'project': self.project_name,
                       'project_domain': self.domain_name,
                       'email': email,
                       'password': password,
                       'description': description,
                       'name': username})
     if add_clean_up:
         self.addCleanup(
             self.openstack,
             'user delete %s' % self.parse_show_as_object(raw_output)['id'])
     items = self.parse_show(raw_output)
     self.assert_show_fields(items, self.USER_FIELDS)
     return username
 def test_application_credential_create_with_options(self):
     name = data_utils.rand_name('name')
     secret = data_utils.rand_name('secret')
     description = data_utils.rand_name('description')
     tomorrow = (datetime.datetime.utcnow() +
                 datetime.timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S%z')
     role1, role2 = self._create_role_assignments()
     raw_output = self.openstack('application credential create %(name)s'
                                 ' --secret %(secret)s'
                                 ' --description %(description)s'
                                 ' --expiration %(tomorrow)s'
                                 ' --role %(role1)s'
                                 ' --role %(role2)s'
                                 ' --unrestricted'
                                 % {'name': name,
                                    'secret': secret,
                                    'description': description,
                                    'tomorrow': tomorrow,
                                    'role1': role1,
                                    'role2': role2})
     self.addCleanup(
         self.openstack,
         'application credential delete %(name)s' % {'name': name})
     items = self.parse_show(raw_output)
     self.assert_show_fields(items, self.APPLICATION_CREDENTIAL_FIELDS)
Exemple #25
0
    def create_trustor_and_roles(self):
        # create a project that trusts will be granted on
        trustor_project_name = data_utils.rand_name(name='project')
        project = self.projects_client.create_project(
            trustor_project_name,
            domain_id=CONF.identity.default_domain_id)['project']
        self.trustor_project_id = project['id']
        self.assertIsNotNone(self.trustor_project_id)

        # Create a trustor User
        trustor_username = data_utils.rand_name('user')
        u_desc = trustor_username + 'description'
        u_email = trustor_username + '@testmail.xx'
        trustor_password = data_utils.rand_password()
        user = self.users_client.create_user(
            name=trustor_username,
            description=u_desc,
            password=trustor_password,
            email=u_email,
            project_id=self.trustor_project_id,
            domain_id=CONF.identity.default_domain_id)['user']
        self.trustor_user_id = user['id']

        # And two roles, one we'll delegate and one we won't
        self.delegated_role = data_utils.rand_name('DelegatedRole')
        self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')

        role = self.roles_client.create_role(name=self.delegated_role)['role']
        self.delegated_role_id = role['id']

        role = self.roles_client.create_role(
            name=self.not_delegated_role)['role']
        self.not_delegated_role_id = role['id']

        # Assign roles to trustor
        self.roles_client.create_user_role_on_project(
            self.trustor_project_id,
            self.trustor_user_id,
            self.delegated_role_id)
        self.roles_client.create_user_role_on_project(
            self.trustor_project_id,
            self.trustor_user_id,
            self.not_delegated_role_id)

        # Get trustee user ID, use the demo user
        trustee_username = self.non_admin_client.user
        self.trustee_user_id = self.get_user_by_name(trustee_username)['id']
        self.assertIsNotNone(self.trustee_user_id)

        # Initialize a new client with the trustor credentials
        creds = common_creds.get_credentials(
            identity_version='v3',
            username=trustor_username,
            password=trustor_password,
            user_domain_id=CONF.identity.default_domain_id,
            tenant_name=trustor_project_name,
            project_domain_id=CONF.identity.default_domain_id,
            domain_id=CONF.identity.default_domain_id)
        os = clients.Manager(credentials=creds)
        self.trustor_client = os.trusts_client
Exemple #26
0
    def test_tag_security_group(self):
        cidr = '10.1.0.0/16'
        data = self.client.create_vpc(CidrBlock=cidr)
        vpc_id = data['Vpc']['VpcId']
        dv_clean = self.addResourceCleanUp(
            self.client.delete_vpc, VpcId=vpc_id)

        name = data_utils.rand_name('sgName')
        desc = data_utils.rand_name('sgDesc')
        data = self.client.create_security_group(VpcId=vpc_id,
                                                 GroupName=name,
                                                 Description=desc)
        group_id = data['GroupId']
        res_clean = self.addResourceCleanUp(self.client.delete_security_group,
                                            GroupId=group_id)
        time.sleep(2)

        def describe_func(*args, **kwargs):
            data = self.client.describe_security_groups(*args, **kwargs)
            self.assertEqual(1, len(data['SecurityGroups']))
            self.assertEqual(group_id,
                             data['SecurityGroups'][0]['GroupId'])

        self._test_tag_resource(group_id, 'security-group', describe_func)

        self.client.delete_security_group(GroupId=group_id)
        self.cancelResourceCleanUp(res_clean)

        self.client.delete_vpc(VpcId=vpc_id)
        self.cancelResourceCleanUp(dv_clean)
        self.get_vpc_waiter().wait_delete(vpc_id)
 def test_project_create(self):
     project_name = data_utils.rand_name('TestProject')
     description = data_utils.rand_name('description')
     raw_output = self.openstack(
         'project create '
         '--domain %(domain)s '
         '--description %(description)s '
         '--enable '
         '--property k1=v1 '
         '--property k2=v2 '
         '%(name)s' % {'domain': self.domain_name,
                       'description': description,
                       'name': project_name})
     self.addCleanup(
         self.openstack,
         'project delete '
         '--domain %(domain)s '
         '%(name)s' % {'domain': self.domain_name,
                       'name': project_name}
     )
     items = self.parse_show(raw_output)
     show_fields = list(self.PROJECT_FIELDS)
     show_fields.extend(['k1', 'k2'])
     self.assert_show_fields(items, show_fields)
     project = self.parse_show_as_object(raw_output)
     self.assertEqual('v1', project['k1'])
     self.assertEqual('v2', project['k2'])
Exemple #28
0
 def test_add_multiple_router_interfaces(self):
     network_name = data_utils.rand_name(self.__class__.__name__)
     network01 = self.networks_client.create_network(
         name=network_name)['network']
     self.addCleanup(self.networks_client.delete_network,
                     network01['id'])
     network_name = data_utils.rand_name(self.__class__.__name__)
     network02 = self.networks_client.create_network(
         name=network_name)['network']
     self.addCleanup(self.networks_client.delete_network,
                     network02['id'])
     subnet01 = self.create_subnet(network01)
     self.addCleanup(self.subnets_client.delete_subnet, subnet01['id'])
     sub02_cidr = self.cidr.next()
     subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
     self.addCleanup(self.subnets_client.delete_subnet, subnet02['id'])
     router = self.create_router()
     self.addCleanup(self.delete_router, router)
     interface01 = self._add_router_interface_with_subnet_id(router['id'],
                                                             subnet01['id'])
     self._verify_router_interface(router['id'], subnet01['id'],
                                   interface01['port_id'])
     interface02 = self._add_router_interface_with_subnet_id(router['id'],
                                                             subnet02['id'])
     self._verify_router_interface(router['id'], subnet02['id'],
                                   interface02['port_id'])
    def test_update_consistency_group_v2_4(self):

        # Get consistency_group
        consistency_group = self.shares_v2_client.get_consistency_group(
            self.consistency_group['id'], version='2.4')
        self.assertEqual(self.cg_name, consistency_group["name"])
        self.assertEqual(self.cg_desc, consistency_group["description"])

        # Update consistency_group
        new_name = data_utils.rand_name("tempest-new-name")
        new_desc = data_utils.rand_name("tempest-new-description")
        updated = self.shares_v2_client.update_consistency_group(
            consistency_group["id"],
            name=new_name,
            description=new_desc,
            version='2.4'
        )
        self.assertEqual(new_name, updated["name"])
        self.assertEqual(new_desc, updated["description"])

        # Get consistency_group
        consistency_group = self.shares_v2_client.get_consistency_group(
            self.consistency_group['id'], version='2.4')
        self.assertEqual(new_name, consistency_group["name"])
        self.assertEqual(new_desc, consistency_group["description"])
Exemple #30
0
 def resource_setup(cls):
     super(InheritsV3TestJSON, cls).resource_setup()
     u_name = data_utils.rand_name('user-')
     u_desc = '%s description' % u_name
     u_email = '*****@*****.**' % u_name
     u_password = data_utils.rand_password()
     cls.domain = cls.create_domain()
     cls.project = cls.projects_client.create_project(
         data_utils.rand_name('project-'),
         description=data_utils.rand_name('project-desc-'),
         domain_id=cls.domain['id'])['project']
     cls.addClassResourceCleanup(cls.projects_client.delete_project,
                                 cls.project['id'])
     cls.group = cls.groups_client.create_group(
         name=data_utils.rand_name('group-'), project_id=cls.project['id'],
         domain_id=cls.domain['id'])['group']
     cls.addClassResourceCleanup(cls.groups_client.delete_group,
                                 cls.group['id'])
     if not CONF.identity_feature_enabled.immutable_user_source:
         cls.user = cls.users_client.create_user(
             name=u_name,
             description=u_desc,
             password=u_password,
             email=u_email,
             project_id=cls.project['id'],
             domain_id=cls.domain['id']
         )['user']
         cls.addClassResourceCleanup(cls.users_client.delete_user,
                                     cls.user['id'])
 def test_update_non_existent_subnetpool(self):
     non_exist_id = data_utils.rand_name('subnetpool')
     self.assertRaises(lib_exc.NotFound, self.client.update_subnetpool,
                       non_exist_id, name='foo-name')
 def test_delete_non_existent_subnetpool(self):
     non_exist_id = data_utils.rand_name('subnetpool')
     self.assertRaises(lib_exc.NotFound, self.client.delete_subnetpool,
                       non_exist_id)
 def test_tenant_create_subnetpool_associate_shared_address_scope(self):
     address_scope = self.create_address_scope(
         name=data_utils.rand_name('smoke-address-scope'), is_admin=True,
         shared=True, ip_version=4)
     self.assertRaises(lib_exc.BadRequest, self._create_subnetpool,
                       address_scope_id=address_scope['id'])
 def test_create_subnetpool_associate_address_scope_of_other_owner(self):
     address_scope = self.create_address_scope(
         name=data_utils.rand_name('smoke-address-scope'), is_admin=True,
         ip_version=4)
     self.assertRaises(lib_exc.NotFound, self._create_subnetpool,
                       address_scope_id=address_scope['id'])
def create_ssh_security_group(clients,
                              add_rule=False,
                              ethertype='IPv4',
                              use_neutron=True):
    """Create a security group for ping/ssh testing

    Create a security group to be attached to a VM using the nova or neutron
    clients. If rules are added, the group can be attached to a VM to enable
    connectivity validation over ICMP and further testing over SSH.

    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
        or of a subclass of it. Resources are provisioned using clients from
        `clients`.
    :param add_rule: Whether security group rules are provisioned or not.
        Defaults to `False`.
    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
    :param use_neutron: When True resources are provisioned via neutron, when
        False resources are provisioned via nova.
    :returns: A dictionary with the security group as returned by the API.

    Examples::

        from tempest.common import validation_resources as vr
        from tempest.lib import auth
        from tempest.lib.services import clients

        creds = auth.get_credentials('http://mycloud/identity/v3',
                                     username='******', project_name='me',
                                     password='******', domain_name='Default')
        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
        # Security group for IPv4 tests
        sg4 = vr.create_ssh_security_group(osclients, add_rule=True)
        # Security group for IPv6 tests
        sg6 = vr.create_ssh_security_group(osclients, ethertype='IPv6',
                                           add_rule=True)
    """
    network_service = _network_service(clients, use_neutron)
    security_groups_client = network_service.SecurityGroupsClient()
    security_group_rules_client = network_service.SecurityGroupRulesClient()
    # Security Group clients for nova and neutron behave the same
    sg_name = data_utils.rand_name('securitygroup-')
    sg_description = data_utils.rand_name('description-')
    security_group = security_groups_client.create_security_group(
        name=sg_name, description=sg_description)['security_group']
    # Security Group Rules clients require different parameters depending on
    # the network service in use
    if add_rule:
        try:
            if use_neutron:
                security_group_rules_client.create_security_group_rule(
                    security_group_id=security_group['id'],
                    protocol='tcp',
                    ethertype=ethertype,
                    port_range_min=22,
                    port_range_max=22,
                    direction='ingress')
                security_group_rules_client.create_security_group_rule(
                    security_group_id=security_group['id'],
                    protocol='icmp',
                    ethertype=ethertype,
                    direction='ingress')
            else:
                security_group_rules_client.create_security_group_rule(
                    parent_group_id=security_group['id'],
                    ip_protocol='tcp',
                    from_port=22,
                    to_port=22)
                security_group_rules_client.create_security_group_rule(
                    parent_group_id=security_group['id'],
                    ip_protocol='icmp',
                    from_port=-1,
                    to_port=-1)
        except Exception as sgc_exc:
            # If adding security group rules fails, we cleanup the SG before
            # re-raising the failure up
            with excutils.save_and_reraise_exception():
                try:
                    msg = ('Error while provisioning security group rules in '
                           'security group %s. Trying to cleanup.')
                    # The exceptions logging is already handled, so using
                    # debug here just to provide more context
                    LOG.debug(msg, sgc_exc)
                    clear_validation_resources(clients,
                                               keypair=None,
                                               floating_ip=None,
                                               security_group=security_group,
                                               use_neutron=use_neutron)
                except Exception as cleanup_exc:
                    msg = ('Error during cleanup of a security group. '
                           'The cleanup was triggered by an exception during '
                           'the provisioning of security group rules.\n'
                           'Provisioning exception: %s\n'
                           'First cleanup exception: %s')
                    LOG.exception(msg, sgc_exc, cleanup_exc)
    LOG.debug(
        "SSH Validation resource security group with tcp and icmp "
        "rules %s created", sg_name)
    return security_group
 def create_share_network(self):
     share_network = self._create_share_network(
         neutron_net_id=self.network['id'],
         neutron_subnet_id=self.subnet['id'],
         name=data_utils.rand_name("sn-name"))
     return share_network
 def test_update_non_existent_router_returns_404(self):
     router = data_utils.rand_name('non_exist_router')
     self.assertRaises(lib_exc.NotFound, self.routers_client.update_router,
                       router, name="new_name")
Exemple #38
0
 def test_try_delete_share_type_by_nonexistent_id(self):
     self.assertRaises(lib_exc.NotFound,
                       self.shares_client.delete_share_type,
                       data_utils.rand_name("fake"))
Exemple #39
0
 def test_create_share_with_nonexistent_share_type(self):
     self.assertRaises(lib_exc.NotFound,
                       self.create_share,
                       share_type_id=data_utils.rand_name("fake"))
Exemple #40
0
 def _create_share_type(self):
     name = data_utils.rand_name("unique_st_name")
     extra_specs = self.add_required_extra_specs_to_dict({"key": "value"})
     return self.create_share_type(name, extra_specs=extra_specs)
 def _create_volume_from_image(self):
     """Create a cinder volume from the default image."""
     image_id = CONF.compute.image_ref
     vol_name = data_utils.rand_name(self.__class__.__name__ +
                                     '-volume-origin')
     return self.create_volume(name=vol_name, image_id=image_id)
Exemple #42
0
    def test_resize_server_revert_deleted_flavor(self):
        """Test reverting resized server with original flavor deleted

        Tests that we can revert the resize on an instance whose original
        flavor has been deleted.
        """

        # First we have to create a flavor that we can delete so make a copy
        # of the normal flavor from which we'd create a server.
        flavor = self.admin_flavors_client.show_flavor(
            self.flavor_ref)['flavor']
        flavor = self.admin_flavors_client.create_flavor(
            name=data_utils.rand_name('test_resize_flavor_'),
            ram=flavor['ram'],
            disk=flavor['disk'],
            vcpus=flavor['vcpus'])['flavor']
        self.addCleanup(self._flavor_clean_up, flavor['id'])

        # Set extra specs same as self.flavor_ref for the created flavor,
        # because the environment may need some special extra specs to
        # create server which should have been contained in
        # self.flavor_ref.
        extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
            self.flavor_ref)['extra_specs']
        if extra_spec_keys:
            self.admin_flavors_client.set_flavor_extra_spec(
                flavor['id'], **extra_spec_keys)

        # Now boot a server with the copied flavor.
        server = self.create_test_server(wait_until='ACTIVE',
                                         flavor=flavor['id'])
        server = self.servers_client.show_server(server['id'])['server']

        # If 'id' not in server['flavor'], we can only compare the flavor
        # details, so here we should save the to-be-deleted flavor's details,
        # for the flavor comparison after the server resizing.
        if not server['flavor'].get('id'):
            pre_flavor = {}
            body = self.flavors_client.show_flavor(flavor['id'])['flavor']
            for key in ['name', 'ram', 'vcpus', 'disk']:
                pre_flavor[key] = body[key]

        # Delete the flavor we used to boot the instance.
        self._flavor_clean_up(flavor['id'])

        # Now resize the server and wait for it to go into verify state.
        self.servers_client.resize_server(server['id'], self.flavor_ref_alt)
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'VERIFY_RESIZE')

        # Now revert the resize, it should be OK even though the original
        # flavor used to boot the server was deleted.
        self.servers_client.revert_resize_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'ACTIVE')

        server = self.servers_client.show_server(server['id'])['server']
        if server['flavor'].get('id'):
            msg = ('server flavor is not same as flavor!')
            self.assertEqual(flavor['id'], server['flavor']['id'], msg)
        else:
            self.assertEqual(
                pre_flavor['name'], server['flavor']['original_name'],
                "original_name in server flavor is not same as "
                "flavor name!")
            for key in ['ram', 'vcpus', 'disk']:
                msg = ('attribute %s in server flavor is not same as '
                       'flavor!' % key)
                self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
Exemple #43
0
    def test_full_migration(self, test_type):
        # We are testing with DHSS=True only because it allows us to specify
        # new_share_network.

        share = self.create_share(
            share_protocol='nfs',
            size=1,
            name=data_utils.rand_name('autotest_share_name'),
            client=self.get_user_client(),
            share_type=self.old_type['ID'],
            share_network=self.old_share_net['id'],
            wait_for_creation=True)
        share = self.admin_client.get_share(share['id'])

        pools = self.admin_client.pool_list(detail=True)

        dest_pool = utils.choose_matching_backend(
            share, pools, self.new_type)

        self.assertIsNotNone(dest_pool)

        source_pool = share['host']

        new_type = self.new_type
        if test_type == 'error':
            statuses = constants.TASK_STATE_MIGRATION_ERROR
            new_type = self.error_type
        else:
            statuses = (constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE,
                        constants.TASK_STATE_DATA_COPYING_COMPLETED)

        self.admin_client.migration_start(
            share['id'], dest_pool, writable=True, nondisruptive=False,
            preserve_metadata=True, preserve_snapshots=True,
            force_host_assisted_migration=False,
            new_share_network=self.new_share_net['id'],
            new_share_type=new_type['ID'])

        share = self.admin_client.wait_for_migration_task_state(
            share['id'], dest_pool, statuses)

        progress = self.admin_client.migration_get_progress(share['id'])
        self.assertEqual('100', progress['total_progress'])

        self.assertEqual(source_pool, share['host'])
        self.assertEqual(self.old_type['ID'], share['share_type'])
        self.assertEqual(self.old_share_net['id'], share['share_network_id'])

        if test_type == 'error':
            self.assertEqual(statuses, progress['task_state'])
        else:
            if test_type == 'success':
                self.admin_client.migration_complete(share['id'])
                statuses = constants.TASK_STATE_MIGRATION_SUCCESS
            elif test_type == 'cancel':
                self.admin_client.migration_cancel(share['id'])
                statuses = constants.TASK_STATE_MIGRATION_CANCELLED

            share = self.admin_client.wait_for_migration_task_state(
                share['id'], dest_pool, statuses)
            progress = self.admin_client.migration_get_progress(share['id'])
            self.assertEqual(statuses, progress['task_state'])
            if test_type == 'success':
                self.assertEqual(dest_pool, share['host'])
                self.assertEqual(new_type['ID'], share['share_type'])
                self.assertEqual(self.new_share_net['id'],
                                 share['share_network_id'])
            else:
                self.assertEqual(source_pool, share['host'])
                self.assertEqual(self.old_type['ID'], share['share_type'])
                self.assertEqual(self.old_share_net['id'],
                                 share['share_network_id'])
 def setUp(self):
     super(ObjectACLsNegativeTest, self).setUp()
     self.container_name = data_utils.rand_name(name='TestContainer')
     self.container_client.create_container(self.container_name)
 def test_update_all_tags(self):
     new_tag_name = data_utils.rand_name(self.__class__.__name__ + '-tag')
     with self.rbac_utils.override_role(self):
         self.servers_client.update_all_tags(self.server['id'],
                                             [new_tag_name])
Exemple #46
0
 def create_keypair(cls, client=None):
     client = client or cls.manager.keypairs_client
     name = data_utils.rand_name('keypair-test')
     body = client.create_keypair(name=name)
     cls.keypairs.append(body['keypair'])
     return body['keypair']
 def _update_project(self, project_uuid):
     put_body = {'display_name': data_utils.rand_name('project')}
     self.project_client.update_project(project_uuid, **put_body)
Exemple #48
0
 def resource_setup(cls):
     super(UsersTestJSON, cls).resource_setup()
     cls.alt_user = data_utils.rand_name('test_user')
     cls.alt_email = cls.alt_user + '@testmail.tm'
Exemple #49
0
 def resource_setup(cls):
     super(TestSubscriptions, cls).resource_setup()
     cls.queue_name = data_utils.rand_name('Queues-Test')
     # Create Queue
     cls.client.create_queue(cls.queue_name)
 def _add_tag_to_server(self):
     tag_name = data_utils.rand_name(self.__class__.__name__ + '-tag')
     self.servers_client.update_tag(self.server['id'], tag_name)
     self.addCleanup(self.servers_client.delete_all_tags, self.server['id'])
     return tag_name
def create_ssh_security_group(clients,
                              add_rule=False,
                              ethertype='IPv4',
                              use_neutron=True):
    """Create a security group for ping/ssh testing

    Create a security group to be attached to a VM using the nova or neutron
    clients. If rules are added, the group can be attached to a VM to enable
    connectivity validation over ICMP and further testing over SSH.

    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
        or of a subclass of it. Resources are provisioned using clients from
        `clients`.
    :param add_rule: Whether security group rules are provisioned or not.
        Defaults to `False`.
    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
    :param use_neutron: When True resources are provisioned via neutron, when
        False resources are provisioned via nova.
    :returns: A dictionary with the security group as returned by the API.

    Examples::

        from tempest.common import validation_resources as vr
        from tempest.lib import auth
        from tempest.lib.services import clients

        creds = auth.get_credentials('http://mycloud/identity/v3',
                                     username='******', project_name='me',
                                     password='******', domain_name='Default')
        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
        # Security group for IPv4 tests
        sg4 = vr.create_ssh_security_group(osclients, add_rule=True)
        # Security group for IPv6 tests
        sg6 = vr.create_ssh_security_group(osclients, ethertype='IPv6',
                                           add_rule=True)
    """
    network_service = _network_service(clients, use_neutron)
    security_groups_client = network_service.SecurityGroupsClient()
    security_group_rules_client = network_service.SecurityGroupRulesClient()
    # Security Group clients for nova and neutron behave the same
    sg_name = data_utils.rand_name('securitygroup-')
    sg_description = data_utils.rand_name('description-')
    security_group = security_groups_client.create_security_group(
        name=sg_name, description=sg_description)['security_group']
    # Security Group Rules clients require different parameters depending on
    # the network service in use
    if add_rule:
        if use_neutron:
            security_group_rules_client.create_security_group_rule(
                security_group_id=security_group['id'],
                protocol='tcp',
                ethertype=ethertype,
                port_range_min=22,
                port_range_max=22,
                direction='ingress')
            security_group_rules_client.create_security_group_rule(
                security_group_id=security_group['id'],
                protocol='icmp',
                ethertype=ethertype,
                direction='ingress')
        else:
            security_group_rules_client.create_security_group_rule(
                parent_group_id=security_group['id'],
                ip_protocol='tcp',
                from_port=22,
                to_port=22)
            security_group_rules_client.create_security_group_rule(
                parent_group_id=security_group['id'],
                ip_protocol='icmp',
                from_port=-1,
                to_port=-1)
    LOG.debug(
        "SSH Validation resource security group with tcp and icmp "
        "rules %s created", sg_name)
    return security_group
 def _create_subnetpool(self, is_admin=False, **kwargs):
     name = data_utils.rand_name('subnetpool-')
     subnetpool_data = copy.deepcopy(self._subnetpool_data)
     for key in subnetpool_data.keys():
         kwargs[key] = subnetpool_data[key]
     return self.create_subnetpool(name=name, is_admin=is_admin, **kwargs)
Exemple #53
0
    def create_server(self, name=None, image_id=None, flavor=None,
                      validatable=False, wait_until='ACTIVE',
                      clients=None, **kwargs):
        """Wrapper utility that returns a test server.

        This wrapper utility calls the common create test server and
        returns a test server. The purpose of this wrapper is to minimize
        the impact on the code of the tests already using this
        function.
        """

        # NOTE(jlanoux): As a first step, ssh checks in the scenario
        # tests need to be run regardless of the run_validation and
        # validatable parameters and thus until the ssh validation job
        # becomes voting in CI. The test resources management and IP
        # association are taken care of in the scenario tests.
        # Therefore, the validatable parameter is set to false in all
        # those tests. In this way create_server just return a standard
        # server and the scenario tests always perform ssh checks.

        # Needed for the cross_tenant_traffic test:
        if clients is None:
            clients = self.manager

        if name is None:
            name = data_utils.rand_name(self.__class__.__name__ + "-server")

        vnic_type = CONF.network.port_vnic_type

        # If vnic_type is configured create port for
        # every network
        if vnic_type:
            ports = []

            create_port_body = {'binding:vnic_type': vnic_type,
                                'namestart': 'port-smoke'}
            if kwargs:
                # Convert security group names to security group ids
                # to pass to create_port
                if 'security_groups' in kwargs:
                    security_groups = \
                        clients.security_groups_client.list_security_groups(
                        ).get('security_groups')
                    sec_dict = dict([(s['name'], s['id'])
                                    for s in security_groups])

                    sec_groups_names = [s['name'] for s in kwargs.pop(
                        'security_groups')]
                    security_groups_ids = [sec_dict[s]
                                           for s in sec_groups_names]

                    if security_groups_ids:
                        create_port_body[
                            'security_groups'] = security_groups_ids
                networks = kwargs.pop('networks', [])
            else:
                networks = []

            # If there are no networks passed to us we look up
            # for the project's private networks and create a port.
            # The same behaviour as we would expect when passing
            # the call to the clients with no networks
            if not networks:
                networks = clients.networks_client.list_networks(
                    **{'router:external': False, 'fields': 'id'})['networks']

            # It's net['uuid'] if networks come from kwargs
            # and net['id'] if they come from
            # clients.networks_client.list_networks
            for net in networks:
                net_id = net.get('uuid', net.get('id'))
                if 'port' not in net:
                    port = self._create_port(network_id=net_id,
                                             client=clients.ports_client,
                                             **create_port_body)
                    ports.append({'port': port['id']})
                else:
                    ports.append({'port': net['port']})
            if ports:
                kwargs['networks'] = ports
            self.ports = ports

        tenant_network = self.get_tenant_network()

        body, servers = compute.create_test_server(
            clients,
            tenant_network=tenant_network,
            wait_until=wait_until,
            name=name, flavor=flavor,
            image_id=image_id, **kwargs)

        self.addCleanup(waiters.wait_for_server_termination,
                        clients.servers_client, body['id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        clients.servers_client.delete_server, body['id'])
        server = clients.servers_client.show_server(body['id'])['server']
        return server
Exemple #54
0
    def _create_network_resources(self, tenant_id):
        """The function creates network resources in the given tenant.

        The function checks if network_resources class member is empty,
        In case it is, it will create a network, a subnet and a router for
        the tenant according to the given tenant id parameter.
        Otherwise it will create a network resource according
        to the values from network_resources dict.

        :param tenant_id: The tenant id to create resources for.
        :type tenant_id: str
        :raises: InvalidConfiguration, Exception
        :returns: network resources(network,subnet,router)
        :rtype: tuple
        """
        network = None
        subnet = None
        router = None
        # Make sure settings
        if self.network_resources:
            if self.network_resources['router']:
                if (not self.network_resources['subnet'] or
                    not self.network_resources['network']):
                    raise lib_exc.InvalidConfiguration(
                        'A router requires a subnet and network')
            elif self.network_resources['subnet']:
                if not self.network_resources['network']:
                    raise lib_exc.InvalidConfiguration(
                        'A subnet requires a network')
            elif self.network_resources['dhcp']:
                raise lib_exc.InvalidConfiguration('DHCP requires a subnet')

        rand_name_root = data_utils.rand_name(
            self.name, prefix=self.resource_prefix)
        if not self.network_resources or self.network_resources['network']:
            network_name = rand_name_root + "-network"
            network = self._create_network(network_name, tenant_id)
        try:
            if not self.network_resources or self.network_resources['subnet']:
                subnet_name = rand_name_root + "-subnet"
                subnet = self._create_subnet(subnet_name, tenant_id,
                                             network['id'])
            if not self.network_resources or self.network_resources['router']:
                router_name = rand_name_root + "-router"
                router = self._create_router(router_name, tenant_id)
                self._add_router_interface(router['id'], subnet['id'])
        except Exception:
            try:
                if router:
                    self._clear_isolated_router(router['id'], router['name'])
                if subnet:
                    self._clear_isolated_subnet(subnet['id'], subnet['name'])
                if network:
                    self._clear_isolated_network(network['id'],
                                                 network['name'])
            except Exception as cleanup_exception:
                msg = "There was an exception trying to setup network " \
                      "resources for tenant %s, and this error happened " \
                      "trying to clean them up: %s"
                LOG.warning(msg % (tenant_id, cleanup_exception))
            raise
        return network, subnet, router
 def resource_setup(cls):
     super(DvrRoutersNegativeTest, cls).resource_setup()
     cls.router = cls.create_router(data_utils.rand_name('router'))
     cls.network = cls.create_network()
     cls.subnet = cls.create_subnet(cls.network)
    def create_topology_subnet(
            self, subnet_name, network, routers_client=None,
            subnets_client=None, router_id=None, ip_version=4, cidr=None,
            mask_bits=None, **kwargs):
        subnet_name_ = constants.APPLIANCE_NAME_STARTS_WITH + subnet_name
        if not subnets_client:
            subnets_client = self.subnets_client
        if not routers_client:
            routers_client = self.routers_client

        def cidr_in_use(cidr, tenant_id):
            """Check cidr existence

            :returns: True if subnet with cidr already exist in tenant
                  False else

            """
            cidr_in_use = \
                self.os_admin.subnets_client.list_subnets(
                    tenant_id=tenant_id, cidr=cidr)['subnets']
            return len(cidr_in_use) != 0

        if ip_version == 6:
            tenant_cidr = (cidr or netaddr.IPNetwork(
                CONF.network.project_network_v6_cidr))
            mask_bits = mask_bits or CONF.network.project_network_v6_mask_bits
        else:
            tenant_cidr = cidr or netaddr.IPNetwork(
                CONF.network.project_network_cidr)
            mask_bits = mask_bits or CONF.network.project_network_mask_bits
        str_cidr = str(tenant_cidr)
        if not cidr:
            # Repeatedly attempt subnet creation with sequential cidr
            # blocks until an unallocated block is found.
            for subnet_cidr in tenant_cidr.subnet(mask_bits):
                str_cidr = str(subnet_cidr)
                if not cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
                    break
        else:
            if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
                LOG.error("Specified subnet %r is in use" % str_cidr)
                raise
        subnet = dict(name=data_utils.rand_name(subnet_name_),
                      network_id=network['id'], tenant_id=network['tenant_id'],
                      cidr=str_cidr, ip_version=ip_version, **kwargs)
        try:
            result = None
            result = subnets_client.create_subnet(**subnet)
        except lib_exc.Conflict as e:
            is_overlapping_cidr = 'overlaps with another subnet' in str(e)
            if not is_overlapping_cidr:
                raise
        self.assertIsNotNone(result, 'Unable to allocate tenant network')
        subnet = result['subnet']
        self.assertEqual(subnet['cidr'], str_cidr)
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        subnets_client.delete_subnet, subnet['id'])
        self.topology_subnets[subnet_name] = subnet
        if router_id:
            if not routers_client:
                routers_client = self.routers_client
            routers_client.add_router_interface(
                router_id, subnet_id=subnet["id"])
            self.addCleanup(
                test_utils.call_and_ignore_notfound_exc,
                routers_client.remove_router_interface, router_id,
                subnet_id=subnet["id"])
        return subnet
 def test_router_create_tenant_distributed_returns_forbidden(self):
     with testtools.ExpectedException(lib_exc.Forbidden):
         self.create_router(data_utils.rand_name('router'),
                            distributed=True)
def create_validation_resources(clients,
                                keypair=False,
                                floating_ip=False,
                                security_group=False,
                                security_group_rules=False,
                                ethertype='IPv4',
                                use_neutron=True,
                                floating_network_id=None,
                                floating_network_name=None):
    """Provision resources for VM ping/ssh testing

    Create resources required to be able to ping / ssh a virtual machine:
    keypair, security group, security group rules and a floating IP.
    Which of those resources are required may depend on the cloud setup and on
    the specific test and it can be controlled via the corresponding
    arguments.

    Provisioned resources are returned in a dictionary.

    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
        or of a subclass of it. Resources are provisioned using clients from
        `clients`.
    :param keypair: Whether to provision a keypair. Defaults to False.
    :param floating_ip: Whether to provision a floating IP. Defaults to False.
    :param security_group: Whether to provision a security group. Defaults to
        False.
    :param security_group_rules: Whether to provision security group rules.
        Defaults to False.
    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
    :param use_neutron: When True resources are provisioned via neutron, when
        False resources are provisioned via nova.
    :param floating_network_id: The id of the network used to provision a
        floating IP. Only used if a floating IP is requested and with neutron.
    :param floating_network_name: The name of the floating IP pool used to
        provision the floating IP. Only used if a floating IP is requested and
        with nova-net.
    :returns: A dictionary with the resources in the format they are returned
        by the API. Valid keys are 'keypair', 'floating_ip' and
        'security_group'.

    Examples::

        from tempest.common import validation_resources as vr
        from tempest.lib import auth
        from tempest.lib.services import clients

        creds = auth.get_credentials('http://mycloud/identity/v3',
                                     username='******', project_name='me',
                                     password='******', domain_name='Default')
        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
        # Request keypair and floating IP
        resources = dict(keypair=True, security_group=False,
                         security_group_rules=False, floating_ip=True)
        resources = vr.create_validation_resources(
            osclients, use_neutron=True,
            floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C',
            **resources)

        # The floating IP to be attached to the VM
        floating_ip = resources['floating_ip']['ip']
    """
    # Create and Return the validation resources required to validate a VM
    validation_data = {}
    try:
        if keypair:
            keypair_name = data_utils.rand_name('keypair')
            validation_data.update(
                clients.compute.KeyPairsClient().create_keypair(
                    name=keypair_name))
            LOG.debug("Validation resource key %s created", keypair_name)
        if security_group:
            validation_data['security_group'] = create_ssh_security_group(
                clients,
                add_rule=security_group_rules,
                use_neutron=use_neutron,
                ethertype=ethertype)
        if floating_ip:
            floating_ip_client = _network_service(
                clients, use_neutron).FloatingIPsClient()
            if use_neutron:
                floatingip = floating_ip_client.create_floatingip(
                    floating_network_id=floating_network_id)
                # validation_resources['floating_ip'] has historically looked
                # like a compute API POST /os-floating-ips response, so we need
                # to mangle it a bit for a Neutron response with different
                # fields.
                validation_data['floating_ip'] = floatingip['floatingip']
                validation_data['floating_ip']['ip'] = (
                    floatingip['floatingip']['floating_ip_address'])
            else:
                # NOTE(mriedem): The os-floating-ips compute API was deprecated
                # in the 2.36 microversion. Any tests for CRUD operations on
                # floating IPs using the compute API should be capped at 2.35.
                validation_data.update(
                    floating_ip_client.create_floating_ip(
                        pool=floating_network_name))
            LOG.debug("Validation resource floating IP %s created",
                      validation_data['floating_ip'])
    except Exception as prov_exc:
        # If something goes wrong, cleanup as much as possible before we
        # re-raise the exception
        with excutils.save_and_reraise_exception():
            if validation_data:
                # Cleanup may fail as well
                try:
                    msg = ('Error while provisioning validation resources %s. '
                           'Trying to cleanup what we provisioned so far: %s')
                    # The exceptions logging is already handled, so using
                    # debug here just to provide more context
                    LOG.debug(msg, prov_exc, str(validation_data))
                    clear_validation_resources(
                        clients,
                        keypair=validation_data.get('keypair', None),
                        floating_ip=validation_data.get('floating_ip', None),
                        security_group=validation_data.get(
                            'security_group', None),
                        use_neutron=use_neutron)
                except Exception as cleanup_exc:
                    msg = ('Error during cleanup of validation resources. '
                           'The cleanup was triggered by an exception during '
                           'the provisioning step.\n'
                           'Provisioning exception: %s\n'
                           'First cleanup exception: %s')
                    LOG.exception(msg, prov_exc, cleanup_exc)
    return validation_data
Exemple #59
0
 def test_remove_share_type_by_nonexistent_id(self):
     self.assertRaises(lib_exc.NotFound,
                       self.shares_client.remove_access_from_share_type,
                       data_utils.rand_name("fake"),
                       self.shares_client.tenant_id)
def create_validation_resources(clients,
                                keypair=False,
                                floating_ip=False,
                                security_group=False,
                                security_group_rules=False,
                                ethertype='IPv4',
                                use_neutron=True,
                                floating_network_id=None,
                                floating_network_name=None):
    """Provision resources for VM ping/ssh testing

    Create resources required to be able to ping / ssh a virtual machine:
    keypair, security group, security group rules and a floating IP.
    Which of those resources are required may depend on the cloud setup and on
    the specific test and it can be controlled via the corresponding
    arguments.

    Provisioned resources are returned in a dictionary.

    :param clients: Instance of `tempest.lib.services.clients.ServiceClients`
        or of a subclass of it. Resources are provisioned using clients from
        `clients`.
    :param keypair: Whether to provision a keypair. Defaults to False.
    :param floating_ip: Whether to provision a floating IP. Defaults to False.
    :param security_group: Whether to provision a security group. Defaults to
        False.
    :param security_group_rules: Whether to provision security group rules.
        Defaults to False.
    :param ethertype: 'IPv4' or 'IPv6'. Honoured only in case neutron is used.
    :param use_neutron: When True resources are provisioned via neutron, when
        False resources are provisioned via nova.
    :param floating_network_id: The id of the network used to provision a
        floating IP. Only used if a floating IP is requested and with neutron.
    :param floating_network_name: The name of the floating IP pool used to
        provision the floating IP. Only used if a floating IP is requested and
        with nova-net.
    :returns: A dictionary with the same keys as the input
        `validation_resources` and the resources for values in the format
         they are returned by the API.

    Examples::

        from tempest.common import validation_resources as vr
        from tempest.lib import auth
        from tempest.lib.services import clients

        creds = auth.get_credentials('http://mycloud/identity/v3',
                                     username='******', project_name='me',
                                     password='******', domain_name='Default')
        osclients = clients.ServiceClients(creds, 'http://mycloud/identity/v3')
        # Request keypair and floating IP
        resources = dict(keypair=True, security_group=False,
                         security_group_rules=False, floating_ip=True)
        resources = vr.create_validation_resources(
            osclients, use_neutron=True,
            floating_network_id='4240E68E-23DA-4C82-AC34-9FEFAA24521C',
            **resources)

        # The floating IP to be attached to the VM
        floating_ip = resources['floating_ip']['ip']
    """
    # Create and Return the validation resources required to validate a VM
    validation_data = {}
    if keypair:
        keypair_name = data_utils.rand_name('keypair')
        validation_data.update(
            clients.compute.KeyPairsClient().create_keypair(name=keypair_name))
        LOG.debug("Validation resource key %s created", keypair_name)
    if security_group:
        validation_data['security_group'] = create_ssh_security_group(
            clients,
            add_rule=security_group_rules,
            use_neutron=use_neutron,
            ethertype=ethertype)
    if floating_ip:
        floating_ip_client = _network_service(clients,
                                              use_neutron).FloatingIPsClient()
        if use_neutron:
            floatingip = floating_ip_client.create_floatingip(
                floating_network_id=floating_network_id)
            # validation_resources['floating_ip'] has historically looked
            # like a compute API POST /os-floating-ips response, so we need
            # to mangle it a bit for a Neutron response with different
            # fields.
            validation_data['floating_ip'] = floatingip['floatingip']
            validation_data['floating_ip']['ip'] = (
                floatingip['floatingip']['floating_ip_address'])
        else:
            # NOTE(mriedem): The os-floating-ips compute API was deprecated
            # in the 2.36 microversion. Any tests for CRUD operations on
            # floating IPs using the compute API should be capped at 2.35.
            validation_data.update(
                floating_ip_client.create_floating_ip(
                    pool=floating_network_name))
    return validation_data