def test_list_container_contents_json(self):
        # add metadata to an object

        # create a container
        container_name = rand_name(name='TestContainer')
        resp, _ = self.container_client.create_container(container_name)
        self.containers.append(container_name)
        # create object
        object_name = rand_name(name='TestObject')
        data = arbitrary_string()
        resp, _ = self.object_client.create_object(container_name,
                                                   object_name, data)
        # set object metadata
        meta_key = rand_name(name='Meta-Test-')
        meta_value = rand_name(name='MetaValue-')
        orig_metadata = {meta_key: meta_value}
        resp, _ = self.object_client.update_object_metadata(container_name,
                                                            object_name,
                                                            orig_metadata)
        # get container contents list
        params = {'format': 'json'}
        resp, object_list = \
            self.container_client.\
            list_container_contents(container_name, params=params)
        self.assertEqual(resp['status'], '200')
        self.assertIsNotNone(object_list)

        object_names = [obj['name'] for obj in object_list]
        self.assertIn(object_name, object_names)
Example #2
0
 def test_create_get_delete_token(self):
     # get a token by username and password
     user_name = data_utils.rand_name(name='user-')
     user_password = data_utils.rand_name(name='pass-')
     # first:create a tenant
     tenant_name = data_utils.rand_name(name='tenant-')
     resp, tenant = self.client.create_tenant(tenant_name)
     self.assertEqual(200, resp.status)
     self.data.tenants.append(tenant)
     # second:create a user
     resp, user = self.client.create_user(user_name, user_password,
                                          tenant['id'], '')
     self.assertEqual(200, resp.status)
     self.data.users.append(user)
     # then get a token for the user
     rsp, body = self.token_client.auth(user_name,
                                        user_password,
                                        tenant['name'])
     self.assertEqual(rsp['status'], '200')
     self.assertEqual(body['token']['tenant']['name'],
                      tenant['name'])
     # Perform GET Token
     token_id = body['token']['id']
     resp, token_details = self.client.get_token(token_id)
     self.assertEqual(resp['status'], '200')
     self.assertEqual(token_id, token_details['token']['id'])
     self.assertEqual(user['id'], token_details['user']['id'])
     self.assertEqual(user_name, token_details['user']['name'])
     self.assertEqual(tenant['name'],
                      token_details['token']['tenant']['name'])
     # then delete the token
     resp, body = self.client.delete_token(token_id)
     self.assertEqual(resp['status'], '204')
Example #3
0
    def test_tenant_update_desc(self):
        # Update description attribute of a tenant
        t_name = data_utils.rand_name(name='tenant-')
        t_desc = data_utils.rand_name(name='desc-')
        body = self.client.create_tenant(t_name, description=t_desc)
        tenant = body
        self.data.tenants.append(tenant)

        t_id = body['id']
        resp1_desc = body['description']

        t_desc2 = data_utils.rand_name(name='desc2-')
        body = self.client.update_tenant(t_id, description=t_desc2)
        resp2_desc = body['description']
        self.assertNotEqual(resp1_desc, resp2_desc)

        body = self.client.get_tenant(t_id)
        resp3_desc = body['description']

        self.assertNotEqual(resp1_desc, resp3_desc)
        self.assertEqual(t_desc, resp1_desc)
        self.assertEqual(resp2_desc, resp3_desc)

        self.client.delete_tenant(t_id)
        self.data.tenants.remove(tenant)
    def test_copy_object_across_containers(self):
        # create a container to use as  asource container
        src_container_name = data_utils.rand_name(name="TestSourceContainer")
        self.container_client.create_container(src_container_name)
        self.containers.append(src_container_name)
        # create a container to use as a destination container
        dst_container_name = data_utils.rand_name(name="TestDestinationContainer")
        self.container_client.create_container(dst_container_name)
        self.containers.append(dst_container_name)
        # create object in source container
        object_name = data_utils.rand_name(name="Object")
        data = data_utils.arbitrary_string(size=len(object_name) * 2, base_text=object_name)
        resp, _ = self.object_client.create_object(src_container_name, object_name, data)
        # set object metadata
        meta_key = data_utils.rand_name(name="test-")
        meta_value = data_utils.rand_name(name="MetaValue-")
        orig_metadata = {meta_key: meta_value}
        resp, _ = self.object_client.update_object_metadata(src_container_name, object_name, orig_metadata)
        self.assertIn(int(resp["status"]), HTTP_SUCCESS)
        self.assertHeaders(resp, "Object", "POST")

        # copy object from source container to destination container
        resp, _ = self.object_client.copy_object_across_containers(
            src_container_name, object_name, dst_container_name, object_name
        )
        self.assertEqual(resp["status"], "201")
        self.assertHeaders(resp, "Object", "PUT")

        # check if object is present in destination container
        resp, body = self.object_client.get_object(dst_container_name, object_name)
        self.assertEqual(body, data)
        actual_meta_key = "x-object-meta-" + meta_key
        self.assertIn(actual_meta_key, resp)
        self.assertEqual(resp[actual_meta_key], meta_value)
Example #5
0
    def test_create_update_delete_domain(self):
        d_name = data_utils.rand_name('domain-')
        d_desc = data_utils.rand_name('domain-desc-')
        domain = self.client.create_domain(
            d_name, description=d_desc)
        self.addCleanup(self._delete_domain, domain['id'])
        self.assertIn('id', domain)
        self.assertIn('description', domain)
        self.assertIn('name', domain)
        self.assertIn('enabled', domain)
        self.assertIn('links', domain)
        self.assertIsNotNone(domain['id'])
        self.assertEqual(d_name, domain['name'])
        self.assertEqual(d_desc, domain['description'])
        self.assertEqual(True, domain['enabled'])
        new_desc = data_utils.rand_name('new-desc-')
        new_name = data_utils.rand_name('new-name-')

        updated_domain = self.client.update_domain(
            domain['id'], name=new_name, description=new_desc)
        self.assertIn('id', updated_domain)
        self.assertIn('description', updated_domain)
        self.assertIn('name', updated_domain)
        self.assertIn('enabled', updated_domain)
        self.assertIn('links', updated_domain)
        self.assertIsNotNone(updated_domain['id'])
        self.assertEqual(new_name, updated_domain['name'])
        self.assertEqual(new_desc, updated_domain['description'])
        self.assertEqual('true', str(updated_domain['enabled']).lower())

        fetched_domain = self.client.get_domain(domain['id'])
        self.assertEqual(new_name, fetched_domain['name'])
        self.assertEqual(new_desc, fetched_domain['description'])
        self.assertEqual('true', str(fetched_domain['enabled']).lower())
    def test_create_update_get_service(self):
        # Creating a Service
        name = data_utils.rand_name('service')
        serv_type = data_utils.rand_name('type')
        desc = data_utils.rand_name('description')
        create_service = self.services_client.create_service(
            type=serv_type, name=name, description=desc)['service']
        self.addCleanup(self._del_service, create_service['id'])
        self.assertIsNotNone(create_service['id'])

        # Verifying response body of create service
        expected_data = {'name': name, 'type': serv_type, 'description': desc}
        self.assertDictContainsSubset(expected_data, create_service)

        # Update description
        s_id = create_service['id']
        resp1_desc = create_service['description']
        s_desc2 = data_utils.rand_name('desc2')
        update_service = self.services_client.update_service(
            s_id, description=s_desc2)['service']
        resp2_desc = update_service['description']

        self.assertNotEqual(resp1_desc, resp2_desc)

        # Get service
        fetched_service = self.services_client.show_service(s_id)['service']
        resp3_desc = fetched_service['description']

        self.assertEqual(resp2_desc, resp3_desc)
        self.assertDictContainsSubset(update_service, fetched_service)
Example #7
0
    def test_aggregate_create_update_with_az(self):
        # Update an aggregate and ensure properties are updated correctly
        self.useFixture(fixtures.LockFixture('availability_zone'))
        aggregate_name = rand_name(self.aggregate_name_prefix)
        az_name = rand_name(self.az_name_prefix)
        resp, aggregate = self.client.create_aggregate(aggregate_name, az_name)
        self.addCleanup(self.client.delete_aggregate, aggregate['id'])

        self.assertEqual(200, resp.status)
        self.assertEqual(aggregate_name, aggregate['name'])
        self.assertEqual(az_name, aggregate['availability_zone'])
        self.assertIsNotNone(aggregate['id'])

        aggregate_id = aggregate['id']
        new_aggregate_name = aggregate_name + '_new'
        new_az_name = az_name + '_new'

        resp, resp_aggregate = self.client.update_aggregate(aggregate_id,
                                                            new_aggregate_name,
                                                            new_az_name)
        self.assertEqual(200, resp.status)
        self.assertEqual(new_aggregate_name, resp_aggregate['name'])
        self.assertEqual(new_az_name, resp_aggregate['availability_zone'])

        resp, aggregates = self.client.list_aggregates()
        self.assertEqual(200, resp.status)
        self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
                      map(lambda x:
                         (x['id'], x['name'], x['availability_zone']),
                          aggregates))
Example #8
0
 def test_update_security_groups(self):
     # Update security group name and description
     # Create a security group
     s_name = data_utils.rand_name('sg-')
     s_description = data_utils.rand_name('description-')
     resp, securitygroup = \
         self.client.create_security_group(s_name, s_description)
     self.assertEqual(200, resp.status)
     self.assertIn('id', securitygroup)
     securitygroup_id = securitygroup['id']
     self.addCleanup(self._delete_security_group,
                     securitygroup_id)
     # Update the name and description
     s_new_name = data_utils.rand_name('sg-hth-')
     s_new_des = data_utils.rand_name('description-hth-')
     resp, sg_new = \
         self.client.update_security_group(securitygroup_id,
                                           name=s_new_name,
                                           description=s_new_des)
     self.assertEqual(200, resp.status)
     # get the security group
     resp, fetched_group = \
         self.client.get_security_group(securitygroup_id)
     self.assertEqual(s_new_name, fetched_group['name'])
     self.assertEqual(s_new_des, fetched_group['description'])
Example #9
0
 def test_create_delete_token(self):
     # get a token by username and password
     user_name = data_utils.rand_name(name='user-')
     user_password = data_utils.rand_name(name='pass-')
     # first:create a tenant
     tenant_name = data_utils.rand_name(name='tenant-')
     resp, tenant = self.client.create_tenant(tenant_name)
     self.assertEqual(200, resp.status)
     self.data.tenants.append(tenant)
     # second:create a user
     resp, user = self.client.create_user(user_name, user_password,
                                          tenant['id'], '')
     self.assertEqual(200, resp.status)
     self.data.users.append(user)
     # then get a token for the user
     rsp, body = self.token_client.auth(user_name,
                                        user_password,
                                        tenant['name'])
     access_data = json.loads(body)['access']
     self.assertEqual(rsp['status'], '200')
     self.assertEqual(access_data['token']['tenant']['name'],
                      tenant['name'])
     # then delete the token
     token_id = access_data['token']['id']
     resp, body = self.client.delete_token(token_id)
     self.assertEqual(resp['status'], '204')
Example #10
0
    def test_associate_user_to_project(self):
        # Associate a user to a project
        # Create a Project
        p_name = data_utils.rand_name('project')
        project = self.projects_client.create_project(p_name)['project']
        self.data.projects.append(project)

        # Create a User
        u_name = data_utils.rand_name('user')
        u_desc = u_name + 'description'
        u_email = u_name + '@testmail.tm'
        u_password = data_utils.rand_password()
        user = self.users_client.create_user(
            u_name, description=u_desc, password=u_password,
            email=u_email, project_id=project['id'])['user']
        # Delete the User at the end of this method
        self.addCleanup(self.users_client.delete_user, user['id'])

        # Get User To validate the user details
        new_user_get = self.users_client.show_user(user['id'])['user']
        # Assert response body of GET
        self.assertEqual(u_name, new_user_get['name'])
        self.assertEqual(u_desc, new_user_get['description'])
        self.assertEqual(project['id'],
                         new_user_get['project_id'])
        self.assertEqual(u_email, new_user_get['email'])
Example #11
0
    def test_project_create_with_parent(self):
        # Create root project without providing a parent_id
        self.data.setup_test_domain()
        domain_id = self.data.domain['id']

        root_project_name = data_utils.rand_name('root_project')
        root_project = self.projects_client.create_project(
            root_project_name, domain_id=domain_id)['project']
        self.addCleanup(
            self.projects_client.delete_project, root_project['id'])

        root_project_id = root_project['id']
        parent_id = root_project['parent_id']
        self.assertEqual(root_project_name, root_project['name'])
        # If not provided, the parent_id must point to the top level
        # project in the hierarchy, i.e. its domain
        self.assertEqual(domain_id, parent_id)

        # Create a project using root_project_id as parent_id
        project_name = data_utils.rand_name('project')
        project = self.projects_client.create_project(
            project_name, domain_id=domain_id,
            parent_id=root_project_id)['project']
        self.addCleanup(self.projects_client.delete_project, project['id'])
        parent_id = project['parent_id']
        self.assertEqual(project_name, project['name'])
        self.assertEqual(root_project_id, parent_id)
 def test_create_vip_as_admin_for_another_tenant(self):
     name = data_utils.rand_name('vip-')
     resp, body = self.admin_client.create_pool(
         name=data_utils.rand_name('pool-'), lb_method="ROUND_ROBIN",
         protocol="HTTP", subnet_id=self.subnet['id'],
         tenant_id=self.tenant_id)
     self.assertEqual('201', resp['status'])
     pool = body['pool']
     self.addCleanup(self.admin_client.delete_pool, pool['id'])
     resp, body = self.admin_client.create_vip(name=name,
                                               protocol="HTTP",
                                               protocol_port=80,
                                               subnet_id=self.subnet['id'],
                                               pool_id=pool['id'],
                                               tenant_id=self.tenant_id)
     self.assertEqual('201', resp['status'])
     vip = body['vip']
     self.addCleanup(self.admin_client.delete_vip, vip['id'])
     self.assertIsNotNone(vip['id'])
     self.assertEqual(self.tenant_id, vip['tenant_id'])
     resp, body = self.client.show_vip(vip['id'])
     self.assertEqual('200', resp['status'])
     show_vip = body['vip']
     self.assertEqual(vip['id'], show_vip['id'])
     self.assertEqual(vip['name'], show_vip['name'])
Example #13
0
    def test_get_updated_quotas(self):
        # Verify that GET shows the updated quota set of project
        project_name = data_utils.rand_name('cpu_quota_project')
        project_desc = project_name + '-desc'
        project = self.identity_utils.create_project(name=project_name,
                                                     description=project_desc)
        project_id = project['id']
        self.addCleanup(self.identity_utils.delete_project, project_id)

        self.adm_client.update_quota_set(project_id, ram='5120')
        quota_set = self.adm_client.show_quota_set(project_id)['quota_set']
        self.assertEqual(5120, quota_set['ram'])

        # Verify that GET shows the updated quota set of user
        user_name = data_utils.rand_name('cpu_quota_user')
        password = data_utils.rand_name('password')
        email = user_name + '@testmail.tm'
        user = self.identity_utils.create_user(username=user_name,
                                               password=password,
                                               project=project,
                                               email=email)
        if 'user' in user:
            user = user['user']
        user_id = user['id']
        self.addCleanup(self.identity_utils.delete_user, user_id)

        self.adm_client.update_quota_set(project_id,
                                         user_id=user_id,
                                         ram='2048')
        quota_set = self.adm_client.show_quota_set(
            project_id, user_id=user_id)['quota_set']
        self.assertEqual(2048, quota_set['ram'])
    def setUpClass(cls):
        super(ListImageFiltersTestJSON, cls).setUpClass()
        cls.client = cls.images_client

        resp, cls.server1 = cls.create_server()
        resp, cls.server2 = cls.create_server(wait_until='ACTIVE')
        # NOTE(sdague) this is faster than doing the sync wait_util on both
        cls.servers_client.wait_for_server_status(cls.server1['id'], 'ACTIVE')

        # Create images to be used in the filter tests
        image1_name = rand_name('image')
        resp, body = cls.client.create_image(cls.server1['id'], image1_name)
        cls.image1_id = parse_image_id(resp['location'])
        cls.client.wait_for_image_resp_code(cls.image1_id, 200)
        cls.client.wait_for_image_status(cls.image1_id, 'ACTIVE')
        resp, cls.image1 = cls.client.get_image(cls.image1_id)

        # Servers have a hidden property for when they are being imaged
        # Performing back-to-back create image calls on a single
        # server will sometimes cause failures
        image3_name = rand_name('image')
        resp, body = cls.client.create_image(cls.server2['id'], image3_name)
        cls.image3_id = parse_image_id(resp['location'])
        cls.client.wait_for_image_resp_code(cls.image3_id, 200)
        cls.client.wait_for_image_status(cls.image3_id, 'ACTIVE')
        resp, cls.image3 = cls.client.get_image(cls.image3_id)

        image2_name = rand_name('image')
        resp, body = cls.client.create_image(cls.server1['id'], image2_name)
        cls.image2_id = parse_image_id(resp['location'])
        cls.client.wait_for_image_resp_code(cls.image2_id, 200)
        cls.client.wait_for_image_status(cls.image2_id, 'ACTIVE')
        resp, cls.image2 = cls.client.get_image(cls.image2_id)
Example #15
0
 def test_create_update_get_delete_record(self):
     # Create Domain
     name = data_utils.rand_name('domain') + '.com.'
     email = data_utils.rand_name('dns') + '@testmail.com'
     _, domain = self.dns_domains_client.create_domain(name, email)
     self.addCleanup(self.dns_domains_client.delete_domain, domain['id'])
     # Create Record
     r_name = 'www.' + name
     r_data = "192.0.2.4"
     _, record = self.client.create_record(domain_id=domain['id'],
                                           name=r_name, data=r_data,
                                           type='A')
     self.addCleanup(self._delete_record, domain['id'], record['id'])
     self.assertIsNotNone(record['id'])
     self.assertEqual(domain['id'], record['domain_id'])
     self.assertEqual(r_name, record['name'])
     self.assertEqual(r_data, record['data'])
     self.assertEqual('A', record['type'])
     # Update Record with data and ttl
     r_data1 = "192.0.2.5"
     r_ttl = 3600
     _, update_record = self.client.update_record(domain_id=domain['id'],
                                                  record_id=record['id'],
                                                  name=r_name, type='A',
                                                  data=r_data1, ttl=r_ttl)
     self.assertEqual(r_data1, update_record['data'])
     self.assertEqual(r_ttl, update_record['ttl'])
     # GET record
     _, get_record = self.client.get_record(domain_id=domain['id'],
                                            record_id=record['id'])
     self.assertEqual(update_record['data'], get_record['data'])
     self.assertEqual(update_record['name'], get_record['name'])
     self.assertEqual(update_record['type'], get_record['type'])
     self.assertEqual(update_record['ttl'], get_record['ttl'])
     self.assertEqual(update_record['domain_id'], get_record['domain_id'])
Example #16
0
    def test_update_service(self):
        # Update description attribute of service
        name = data_utils.rand_name('service-')
        serv_type = data_utils.rand_name('type--')
        desc = data_utils.rand_name('description-')
        resp, body = self.service_client.create_service(name, serv_type,
                                                        description=desc)
        self.assertEqual('201', resp['status'])
        # Deleting the service created in this method
        self.addCleanup(self.service_client.delete_service, body['id'])

        s_id = body['id']
        resp1_desc = body['description']

        s_desc2 = data_utils.rand_name('desc2-')
        resp, body = self.service_client.update_service(
            s_id, description=s_desc2)
        resp2_desc = body['description']
        self.assertEqual('200', resp['status'])
        self.assertNotEqual(resp1_desc, resp2_desc)

        # Get service
        resp, body = self.service_client.get_service(s_id)
        resp3_desc = body['description']

        self.assertNotEqual(resp1_desc, resp3_desc)
        self.assertEqual(resp2_desc, resp3_desc)
Example #17
0
    def _create_type_and_volume(self, backend_name_key, with_prefix):
        # Volume/Type creation
        type_name = data_utils.rand_name('Type')
        vol_name = data_utils.rand_name('Volume')
        spec_key_with_prefix = "capabilities:volume_backend_name"
        spec_key_without_prefix = "volume_backend_name"
        if with_prefix:
            extra_specs = {spec_key_with_prefix: backend_name_key}
        else:
            extra_specs = {spec_key_without_prefix: backend_name_key}
        self.type = self.volume_types_client.create_volume_type(
            type_name, extra_specs=extra_specs)
        self.volume_type_id_list.append(self.type['id'])

        params = {self.name_field: vol_name, 'volume_type': type_name}

        self.volume = self.admin_volume_client.create_volume(size=1,
                                                             **params)
        if with_prefix:
            self.volume_id_list_with_prefix.append(self.volume['id'])
        else:
            self.volume_id_list_without_prefix.append(
                self.volume['id'])
        self.admin_volume_client.wait_for_volume_status(
            self.volume['id'], 'available')
Example #18
0
    def test_update_image(self):
        # Updates an image by image_id

        # Create image
        image_name = data_utils.rand_name('image')
        container_format = CONF.image.container_formats[0]
        disk_format = CONF.image.disk_formats[0]
        body = self.client.create_image(name=image_name,
                                        container_format=container_format,
                                        disk_format=disk_format,
                                        visibility='private')
        self.addCleanup(self.client.delete_image, body['id'])
        self.assertEqual('queued', body['status'])
        image_id = body['id']

        # Now try uploading an image file
        image_file = moves.cStringIO(data_utils.random_bytes())
        self.client.store_image_file(image_id, image_file)

        # Update Image
        new_image_name = data_utils.rand_name('new-image')
        body = self.client.update_image(image_id, [
            dict(replace='/name', value=new_image_name)])

        # Verifying updating

        body = self.client.show_image(image_id)
        self.assertEqual(image_id, body['id'])
        self.assertEqual(new_image_name, body['name'])
Example #19
0
    def test_list_statistics_with_no_merge_metrics(self):
        key = data_utils.rand_name('key')
        value = data_utils.rand_name('value')
        metric3 = helpers.create_metric(
            name=self._test_name,
            dimensions={key: value},
            timestamp=self._start_timestamp + 2000)
        self.monasca_client.create_metrics(metric3)
        query_param = '?name=' + str(self._test_name) + '&start_time=' + \
                      self._start_time_iso + '&end_time=' + helpers.\
            timestamp_to_iso(self._start_timestamp + 1000 * 4) + \
                      '&merge_metrics=True'

        for i in xrange(constants.MAX_RETRIES):
            resp, response_body = self.monasca_client.\
                list_measurements(query_param)
            elements = response_body['elements']
            for element in elements:
                if str(element['name']) == self._test_name and len(
                        element['measurements']) == 3:
                    end_time_iso = helpers.timestamp_to_iso(
                        self._start_timestamp + 1000 * 4)
                    query_parms = '?name=' + str(self._test_name) + \
                                  '&statistics=avg' + '&start_time=' + \
                                  str(self._start_time_iso) + '&end_time=' +\
                                  str(end_time_iso) + '&period=100000'
                    self.assertRaises(exceptions.Conflict,
                                      self.monasca_client.list_statistics,
                                      query_parms)
                    return
            time.sleep(constants.RETRY_WAIT_SECS)
        self._check_timeout(i, constants.MAX_RETRIES, elements, 3)
Example #20
0
    def test_create_list_update_show_delete_security_group(self):
        group_create_body, name = self._create_security_group()

        # List security groups and verify if created group is there in response
        resp, list_body = self.client.list_security_groups()
        self.assertEqual('200', resp['status'])
        secgroup_list = list()
        for secgroup in list_body['security_groups']:
            secgroup_list.append(secgroup['id'])
        self.assertIn(group_create_body['security_group']['id'], secgroup_list)
        # Update the security group
        new_name = data_utils.rand_name('security-')
        new_description = data_utils.rand_name('security-description')
        resp, update_body = self.client.update_security_group(
            group_create_body['security_group']['id'],
            name=new_name,
            description=new_description)
        # Verify if security group is updated
        self.assertEqual('200', resp['status'])
        self.assertEqual(update_body['security_group']['name'], new_name)
        self.assertEqual(update_body['security_group']['description'],
                         new_description)
        # Show details of the updated security group
        resp, show_body = self.client.show_security_group(
            group_create_body['security_group']['id'])
        self.assertEqual(show_body['security_group']['name'], new_name)
        self.assertEqual(show_body['security_group']['description'],
                         new_description)
Example #21
0
    def test_tenant_update_name(self):
        # Update name attribute of a tenant
        t_name1 = data_utils.rand_name(name='tenant-')
        resp, body = self.client.create_tenant(t_name1)
        self.assertEqual(200, resp.status)
        tenant = body
        self.data.tenants.append(tenant)

        t_id = body['id']
        resp1_name = body['name']

        t_name2 = data_utils.rand_name(name='tenant2-')
        resp, body = self.client.update_tenant(t_id, name=t_name2)
        resp2_name = body['name']
        self.assertEqual(200, resp.status)
        self.assertNotEqual(resp1_name, resp2_name)

        resp, body = self.client.get_tenant(t_id)
        resp3_name = body['name']

        self.assertNotEqual(resp1_name, resp3_name)
        self.assertEqual(t_name1, resp1_name)
        self.assertEqual(resp2_name, resp3_name)

        self.client.delete_tenant(t_id)
        self.data.tenants.remove(tenant)
    def test_patch_actions_in_alarm_definition(self):
        notification_name = data_utils.rand_name('notification-')
        notification_type = 'EMAIL'
        address = 'root@localhost'

        resp, response_body = self.monasca_client.create_notification_method(
            notification_name, type=notification_type, address=address)
        notification_id = self._verify_create_notification_method(
            resp, response_body, notification_name, notification_type, address)

        # Create an alarm definition
        response_body_list = self._create_alarm_definitions(
            expression=None, number_of_definitions=1)
        # Patch alarm definition
        patch_alarm_def_name = data_utils.rand_name('monitoring_alarm_update')
        resp, body = self.monasca_client.patch_alarm_definition(
            response_body_list[0]['id'],
            name=patch_alarm_def_name,
            expression=response_body_list[0]['expression'],
            actions_enabled='true',
            alarm_actions=[notification_id],
            ok_actions=[notification_id],
            undetermined_actions=[notification_id]
        )
        self.assertEqual(200, resp.status)
        self._verify_update_patch_alarm_definition(body, patch_alarm_def_name,
                                                   None, None, notification_id)
        # Get and verify details of an alarm after update
        resp, response_body = self.monasca_client.get_alarm_definition(
            response_body_list[0]['id'])
        self._verify_update_patch_alarm_definition(response_body,
                                                   patch_alarm_def_name, None,
                                                   None, notification_id)
        self._delete_notification(notification_id)
    def test_list_alarm_definitions_with_severity(self):
        name = data_utils.rand_name('alarm_definition')
        expression = 'avg(cpu_utilization) >= 1000'
        alarm_definition = helpers.create_alarm_definition(
            name=name,
            description="description",
            expression=expression,
            severity="LOW")
        resp, res_body_create_alarm_def = self.monasca_client.\
            create_alarm_definitions(alarm_definition)
        self.assertEqual(201, resp.status)

        name = data_utils.rand_name('alarm_definition')
        expression = 'avg(cpu_utilization) >= 1000'
        alarm_definition = helpers.create_alarm_definition(
            name=name,
            description="description",
            expression=expression,
            severity="MEDIUM")
        resp, res_body_create_alarm_def = self.monasca_client.\
            create_alarm_definitions(alarm_definition)
        self.assertEqual(201, resp.status)

        query_param = '?severity=MEDIUM'
        resp, response_body = self.monasca_client.\
            list_alarm_definitions(query_param)
        self._verify_list_alarm_definitions_response_body(resp, response_body)
        elements = response_body['elements']
        self._verify_list_get_alarm_definitions_elements(
            elements, 1, res_body_create_alarm_def)
        links = response_body['links']
        self._verify_list_alarm_definitions_links(links)
Example #24
0
 def test_create_get_delete_service(self):
     # GET Service
     # Creating a Service
     name = data_utils.rand_name('service')
     s_type = data_utils.rand_name('type')
     description = data_utils.rand_name('description')
     service_data = self.services_client.create_service(
         name=name, type=s_type,
         description=description)['OS-KSADM:service']
     self.assertFalse(service_data['id'] is None)
     self.addCleanup(self._del_service, service_data['id'])
     # Verifying response body of create service
     self.assertIn('id', service_data)
     self.assertIn('name', service_data)
     self.assertEqual(name, service_data['name'])
     self.assertIn('type', service_data)
     self.assertEqual(s_type, service_data['type'])
     self.assertIn('description', service_data)
     self.assertEqual(description, service_data['description'])
     # Get service
     fetched_service = (
         self.services_client.show_service(service_data['id'])
         ['OS-KSADM:service'])
     # verifying the existence of service created
     self.assertIn('id', fetched_service)
     self.assertEqual(fetched_service['id'], service_data['id'])
     self.assertIn('name', fetched_service)
     self.assertEqual(fetched_service['name'], service_data['name'])
     self.assertIn('type', fetched_service)
     self.assertEqual(fetched_service['type'], service_data['type'])
     self.assertIn('description', fetched_service)
     self.assertEqual(fetched_service['description'],
                      service_data['description'])
    def test_list_alarm_definitions_with_multiple_dimensions(self):
        # Create an alarm definition with random dimensions
        name = data_utils.rand_name('alarm_definition')
        dimensions = {data_utils.rand_name('key-1'): data_utils.rand_name('value-1'),
                      data_utils.rand_name('key-2'): data_utils.rand_name('value-2')}
        dimension_strings = [key + '=' + value for key, value in dimensions.items()]
        expression = 'avg(cpu_utilization{' + ','.join(dimension_strings) + '}) >= 1000'

        alarm_definition = helpers.create_alarm_definition(
            name=name,
            description="description",
            expression=expression)
        resp, res_body_create_alarm_def = self.monasca_client.\
            create_alarm_definitions(alarm_definition)
        self.assertEqual(201, resp.status)

        # List alarms
        query_dimensions = [key + ':' + value for key, value in dimensions.items()]
        query_parms = '?dimensions=' + ','.join(query_dimensions)
        resp, response_body = self.monasca_client.list_alarm_definitions(query_parms)
        self._verify_list_alarm_definitions_response_body(resp, response_body)

        elements = response_body['elements']
        self._verify_list_get_alarm_definitions_elements(
            elements, 1, res_body_create_alarm_def)

        links = response_body['links']
        self._verify_list_alarm_definitions_links(links)
    def test_get_updated_quotas(self):
        # Verify that GET shows the updated quota set of tenant
        tenant_name = data_utils.rand_name('cpu_quota_tenant_')
        tenant_desc = tenant_name + '-desc'
        identity_client = self.os_adm.identity_client
        _, tenant = identity_client.create_tenant(name=tenant_name,
                                                  description=tenant_desc)
        tenant_id = tenant['id']
        self.addCleanup(identity_client.delete_tenant, tenant_id)

        self.adm_client.update_quota_set(tenant_id, ram='5120')
        resp, quota_set = self.adm_client.get_quota_set(tenant_id)
        self.assertEqual(200, resp.status)
        self.assertEqual(5120, quota_set['ram'])

        # Verify that GET shows the updated quota set of user
        user_name = data_utils.rand_name('cpu_quota_user_')
        password = data_utils.rand_name('password-')
        email = user_name + '@testmail.tm'
        _, user = identity_client.create_user(name=user_name,
                                              password=password,
                                              tenant_id=tenant_id,
                                              email=email)
        user_id = user['id']
        self.addCleanup(identity_client.delete_user, user_id)

        self.adm_client.update_quota_set(tenant_id,
                                         user_id=user_id,
                                         ram='2048')
        resp, quota_set = self.adm_client.get_quota_set(tenant_id,
                                                        user_id=user_id)
        self.assertEqual(200, resp.status)
        self.assertEqual(2048, quota_set['ram'])
    def test_aggregate_basic_ops(self):
        self.useFixture(fixtures.LockFixture('availability_zone'))
        az = 'foo_zone'
        aggregate_name = data_utils.rand_name('aggregate-scenario')
        aggregate = self._create_aggregate(name=aggregate_name,
                                           availability_zone=az)

        metadata = {'meta_key': 'meta_value'}
        self._set_aggregate_metadata(aggregate, metadata)

        host = self._get_host_name()
        self._add_host(aggregate['id'], host)
        self._check_aggregate_details(aggregate, aggregate_name, az, [host],
                                      metadata)

        aggregate_name = data_utils.rand_name('renamed-aggregate-scenario')
        # Updating the name alone. The az must be specified again otherwise
        # the tempest client would send None in the put body
        aggregate = self._update_aggregate(aggregate, aggregate_name, az)

        new_metadata = {'foo': 'bar'}
        self._set_aggregate_metadata(aggregate, new_metadata)

        self._check_aggregate_details(aggregate, aggregate['name'], az,
                                      [host], new_metadata)
    def test_create_alarm_definition_with_notification(self):
        notification_name = data_utils.rand_name('notification-')
        notification_type = 'EMAIL'
        notification_address = 'root@localhost'
        resp, response_body = self.monasca_client.create_notification_method(
            name=notification_name, type=notification_type,
            address=notification_address)
        notification_id = self._verify_create_notification_method(
            resp, response_body, notification_name, notification_type,
            notification_address)

        # Create an alarm definition
        alarm_def_name = data_utils.rand_name('monitoring_alarm_definition')
        expression = "mem_total_mb > 0"
        alarm_definition = helpers.create_alarm_definition(
            name=alarm_def_name,
            expression=expression,
            alarm_actions=[notification_id],
            ok_actions=[notification_id],
            undetermined_actions=[notification_id],
            severity="LOW")
        resp, response_body = self.monasca_client.create_alarm_definitions(
            alarm_definition)
        self._verify_create_alarm_definitions(resp, response_body,
                                              alarm_definition)
        self.assertEqual(notification_id, response_body['ok_actions'][0])
        self.assertEqual(notification_id, response_body['alarm_actions'][0])
        self.assertEqual(notification_id,
                         response_body['undetermined_actions'][0])

        self._delete_notification(notification_id)
Example #29
0
 def _create_creds(self, suffix=None, admin=False):
     rand_name_root = rand_name(self.name)
     if suffix:
         rand_name_root += suffix
     tenant_name = rand_name_root + "-tenant"
     tenant_desc = tenant_name + "-desc"
     rand_name_root = rand_name(self.name)
     tenant = self._create_tenant(name=tenant_name,
                                  description=tenant_desc)
     if suffix:
         rand_name_root += suffix
     username = rand_name_root + "-user"
     email = rand_name_root + "@example.com"
     user = self._create_user(username, self.password,
                              tenant, email)
     if admin:
         role = None
         try:
             roles = self._list_roles()
             if self.tempest_client:
                 role = next(r for r in roles if r['name'] == 'admin')
             else:
                 role = next(r for r in roles if r.name == 'admin')
         except StopIteration:
             msg = "No admin role found"
             raise exceptions.NotFound(msg)
         if self.tempest_client:
             self._assign_user_role(tenant['id'], user['id'], role['id'])
         else:
             self._assign_user_role(tenant.id, user.id, role.id)
     return user, tenant
Example #30
0
    def populate_spam_table(self, table_name, usercount, itemcount):

        dates = ['2013-12-0%sT16:00:00.000001' % i for i in range(1, 8)]
        from_headers = ["*****@*****.**" % rand_name() for _ in range(10)]
        to_headers = ["*****@*****.**" % rand_name() for _ in range(10)]
        emails = []

        new_items = []
        for _ in range(usercount):
            email = "*****@*****.**" % rand_name()
            emails.append(email)

            for item in range(itemcount):
                message_id = rand_uuid()
                date = random.choice(dates)
                # put item
                item = {
                    "user_id": {"S": email},
                    "date_message_id": {"S": date + "#" + message_id},
                    "message_id": {"S": message_id},
                    "from_header": {"S": random.choice(from_headers)},
                    "to_header": {"S": random.choice(to_headers)},
                }
                new_items.append(item)
                self.conn.put_item(table_name, item)
        return new_items
Example #31
0
 def test_create_keypair_when_public_key_bits_exceeds_maximum(self):
     # Keypair should not be created when public key bits are too long
     k_name = rand_name("keypair-")
     pub_key = 'ssh-rsa ' + 'A' * 2048 + ' openstack@ubuntu'
     self.assertRaises(exceptions.BadRequest, self.client.create_keypair,
                       k_name, pub_key)
    def test_batch_operations(self):
        tables = []
        for i in range(0, 5):
            tname = rand_name(self.table_prefix).replace('-', '')
            headers, body = self.client.create_table(ATTRIBUTE_DEFINITIONS,
                                                     tname, KEY_SCHEMA,
                                                     LSI_INDEXES)
            self.wait_for_table_active(tname)
            tables.append(tname)

        request_items = {
            "request_items":
            {tname: [{
                "put_request": {
                    "item": ITEM
                }
            }]
             for tname in tables}
        }
        headers, body = self.client.batch_write_item(request_items)
        self.assertEqual({}, body['unprocessed_items'])

        request_items = {
            "request_items":
            {tname: {
                "keys": [ITEM_PRIMARY_KEY]
            }
             for tname in tables}
        }
        headers, body = self.client.batch_get_item(request_items)
        self.assertEqual({}, body['unprocessed_keys'])
        for tname in tables:
            self.assertEqual(ITEM, body['responses'][tname][0])

        request_items = {
            "request_items": {
                tname: [{
                    "put_request": {
                        "item": ITEM_ALT
                    }
                }, {
                    "delete_request": {
                        "key": ITEM_PRIMARY_KEY
                    }
                }]
                for tname in tables
            }
        }
        headers, body = self.client.batch_write_item(request_items)
        self.assertEqual({}, body['unprocessed_items'])
        request_items = {
            "request_items":
            {tname: {
                "keys": [ITEM_PRIMARY_KEY_ALT]
            }
             for tname in tables}
        }
        headers, body = self.client.batch_get_item(request_items)
        self.assertEqual({}, body['unprocessed_keys'])
        for tname in tables:
            self.assertEqual(1, len(body['responses'][tname]))
            self.assertEqual(ITEM_ALT, body['responses'][tname][0])

        request_items = {
            "request_items": {
                tname: [{
                    "put_request": {
                        "item": ITEM_PRIMARY_KEY_ALT
                    }
                }]
                for tname in tables
            }
        }
        headers, body = self.client.batch_write_item(request_items)
        self.assertEqual({}, body['unprocessed_items'])
        request_items = {
            "request_items":
            {tname: {
                "keys": [ITEM_PRIMARY_KEY_ALT]
            }
             for tname in tables}
        }
        headers, body = self.client.batch_get_item(request_items)
        self.assertEqual({}, body['unprocessed_keys'])
        for tname in tables:
            self.assertEqual(ITEM_PRIMARY_KEY_ALT, body['responses'][tname][0])

        for tname in tables:
            self.client.delete_table(tname)
            self.wait_for_table_deleted(tname)
    def test_items_indexed_table(self):
        tname = rand_name(self.table_prefix).replace('-', '')

        headers, body = self.client.create_table(ATTRIBUTE_DEFINITIONS, tname,
                                                 KEY_SCHEMA, LSI_INDEXES)

        self.wait_for_table_active(tname)

        # retrive non-existing
        headers, body = self.client.get_item(tname, ITEM_PRIMARY_KEY)
        self.assertEqual({}, body)
        headers, body = self.client.query(tname, key_conditions=KEY_CONDITIONS)
        self.assertEqual(0, body['count'])
        headers, body = self.client.query(tname,
                                          key_conditions=KEY_CONDITIONS_INDEX,
                                          index_name=INDEX_NAME_N)
        self.assertEqual(0, body['count'])

        headers, body = self.client.scan(tname, scan_filter=SCAN_FILTER)
        self.assertEqual(0, body['count'])

        # put item
        self.client.put_item(tname, ITEM)
        self.client.put_item(tname, ITEM)

        headers, body = self.client.get_item(tname, ITEM_PRIMARY_KEY)
        self.assertEqual(ITEM, body['item'])
        headers, body = self.client.query(tname, key_conditions=KEY_CONDITIONS)
        self.assertEqual(1, len(body['items']))
        self.assertEqual(ITEM, body['items'][0])
        headers, body = self.client.query(tname,
                                          key_conditions=KEY_CONDITIONS_INDEX,
                                          index_name=INDEX_NAME_N)
        self.assertEqual(1, len(body['items']))
        self.assertEqual(ITEM, body['items'][0])
        headers, body = self.client.scan(tname, scan_filter=SCAN_FILTER)
        self.assertEqual(1, len(body['items']))
        self.assertEqual(ITEM, body['items'][0])

        # extend this put cases after fixing bug #1348336

        # update item
        self.client.update_item(tname, ITEM_PRIMARY_KEY, ATTRIBUTES_UPDATE)
        updated_item = _local_update(ITEM, ATTRIBUTES_UPDATE)

        headers, body = self.client.get_item(tname, ITEM_PRIMARY_KEY)
        self.assertEqual(updated_item, body['item'])

        # delete item
        self.client.delete_item(tname, ITEM_PRIMARY_KEY)

        headers, body = self.client.get_item(tname, ITEM_PRIMARY_KEY)
        self.assertEqual({}, body)
        headers, body = self.client.query(tname, key_conditions=KEY_CONDITIONS)
        self.assertEqual(0, body['count'])
        headers, body = self.client.query(tname,
                                          key_conditions=KEY_CONDITIONS_INDEX,
                                          index_name=INDEX_NAME_N)
        self.assertEqual(0, body['count'])
        headers, body = self.client.scan(tname, scan_filter=SCAN_FILTER)
        self.assertEqual(0, body['count'])

        # check for no exception
        self.client.delete_item(tname, ITEM_PRIMARY_KEY)

        self.client.delete_table(tname)
        self.wait_for_table_deleted(tname)
    def test_table_operations(self):
        tname = rand_name(self.table_prefix).replace('-', '')
        url = '{url}/tables/{table}'.format(url=self.client.base_url,
                                            table=tname)

        resp, body = self.client.list_tables()
        tables = [table['href'] for table in body['tables']]
        self.assertNotIn(url, tables)

        not_found_msg = "'%s' does not exist" % tname
        self._check_exception(exceptions.NotFound, not_found_msg,
                              self.client.delete_table, tname)
        self._check_exception(exceptions.NotFound, not_found_msg,
                              self.client.get_item, tname, ITEM_PRIMARY_KEY)
        self._check_exception(exceptions.NotFound,
                              not_found_msg,
                              self.client.query,
                              tname,
                              key_conditions=KEY_CONDITIONS)
        self._check_exception(exceptions.NotFound,
                              not_found_msg,
                              self.client.query,
                              tname,
                              index_name=INDEX_NAME_N,
                              key_conditions=KEY_CONDITIONS_INDEX)
        self._check_exception(exceptions.NotFound,
                              not_found_msg,
                              self.client.scan,
                              tname,
                              scan_filter=SCAN_FILTER)
        self._check_exception(exceptions.NotFound, not_found_msg,
                              self.client.put_item, tname, ITEM)
        self._check_exception(exceptions.NotFound, not_found_msg,
                              self.client.update_item, tname, ITEM_PRIMARY_KEY,
                              ATTRIBUTES_UPDATE)
        self._check_exception(exceptions.NotFound, not_found_msg,
                              self.client.delete_item, tname, ITEM_PRIMARY_KEY)
        self._check_exception(exceptions.NotFound, not_found_msg,
                              self.client.describe_table, tname)

        resp, body = self.client.list_tables()
        tables = [table['href'] for table in body['tables']]
        self.assertNotIn(url, tables)

        headers, body = self.client.create_table(ATTRIBUTE_DEFINITIONS, tname,
                                                 KEY_SCHEMA, LSI_INDEXES)

        exc_message = 'Table %s already exists' % tname
        self._check_exception(exceptions.BadRequest, exc_message,
                              self.client.create_table, ATTRIBUTE_DEFINITIONS,
                              tname, KEY_SCHEMA, LSI_INDEXES)

        self.assertTrue(self.wait_for_table_active(tname))

        self._check_exception(exceptions.BadRequest, exc_message,
                              self.client.create_table, ATTRIBUTE_DEFINITIONS,
                              tname, KEY_SCHEMA, LSI_INDEXES)

        resp, body = self.client.list_tables()
        tables = [table['href'] for table in body['tables']]
        self.assertIn(url, tables)

        self.client.put_item(tname, ITEM)
        self.client.get_item(tname, ITEM_PRIMARY_KEY)
        self.client.query(tname, key_conditions=KEY_CONDITIONS)
        self.client.query(tname,
                          key_conditions=KEY_CONDITIONS_INDEX,
                          index_name=INDEX_NAME_N)
        self.client.scan(tname, scan_filter=SCAN_FILTER)
        self.client.update_item(tname, ITEM_PRIMARY_KEY, ATTRIBUTES_UPDATE)
        self.client.delete_item(tname, ITEM_PRIMARY_KEY)
        self.client.describe_table(tname)
        self.client.delete_table(tname)

        self.assertTrue(self.wait_for_table_deleted(tname))

        self._check_exception(exceptions.NotFound, not_found_msg,
                              self.client.delete_table, tname)

        resp, body = self.client.list_tables()
        tables = [table['href'] for table in body['tables']]
        self.assertNotIn(url, tables)

        # checking that data in the table is not accessible after table
        # deletion
        headers, body = self.client.create_table(ATTRIBUTE_DEFINITIONS, tname,
                                                 KEY_SCHEMA, LSI_INDEXES)
        self.wait_for_table_active(tname)
        self.client.put_item(tname, ITEM)
        headers, body = self.client.query(tname, key_conditions=KEY_CONDITIONS)
        self.assertEqual(1, body['count'])

        self.client.delete_table(tname)
        self.wait_for_table_deleted(tname)

        headers, body = self.client.create_table(ATTRIBUTE_DEFINITIONS, tname,
                                                 KEY_SCHEMA, LSI_INDEXES)
        self.wait_for_table_active(tname)
        headers, body = self.client.query(tname, key_conditions=KEY_CONDITIONS)
        self.assertEqual(0, body['count'])

        self.client.delete_table(tname)
        self.wait_for_table_deleted(tname)
 def resource_setup(cls):
     super(ExtraSpecsNegativeV2Test, cls).resource_setup()
     vol_type_name = data_utils.rand_name('Volume-type-')
     cls.extra_specs = {"spec1": "val1"}
     cls.volume_type = cls.volume_types_client.create_volume_type(
         vol_type_name, extra_specs=cls.extra_specs)
 def test_aggregate_create_as_user(self):
     # Regular user is not allowed to create an aggregate.
     aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
     self.assertRaises(exceptions.Unauthorized,
                       self.user_client.create_aggregate,
                       name=aggregate_name)
Example #37
0
 def setUp(self):
     super(PublicObjectTest, self).setUp()
     self.container_name = data_utils.rand_name(name='TestContainer')
     self.container_client.create_container(self.container_name)
Example #38
0
 def test_keypair_delete_nonexistant_key(self):
     # Non-existant key deletion should throw a proper error
     k_name = rand_name("keypair-non-existant-")
     self.assertRaises(exceptions.NotFound, self.client.delete_keypair,
                       k_name)
Example #39
0
 def setUp(self):
     super(MagnetoDBBatchWriteTest, self).setUp()
     self.tname = rand_name(self.table_prefix).replace('-', '')
Example #40
0
 def test_create_keypair_with_empty_public_key(self):
     # Keypair should not be created with an empty public key
     k_name = rand_name("keypair-")
     pub_key = ' '
     self.assertRaises(exceptions.BadRequest, self.client.create_keypair,
                       k_name, pub_key)
Example #41
0
 def test_list_volumes_detail_with_nonexistent_name(self):
     v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
     params = {self.name_field: v_name}
     fetched_volume = \
         self.client.list_volumes(detail=True, params=params)['volumes']
     self.assertEqual(0, len(fetched_volume))
Example #42
0
 def test_keypair_create_with_invalid_pub_key(self):
     # Keypair should not be created with a non RSA public key
     k_name = rand_name('keypair-')
     pub_key = "ssh-rsa JUNK nova@ubuntu"
     self.assertRaises(exceptions.BadRequest,
                       self.client.create_keypair, k_name, pub_key)
    def test_search_nonexistent_hypervisor(self):
        nonexistent_hypervisor_name = data_utils.rand_name('test_hypervisor')

        self.assertRaises(exceptions.NotFound, self.client.search_hypervisor,
                          nonexistent_hypervisor_name)
Example #44
0
 def test_create_tenant_by_unauthorized_user(self):
     # Non-administrator user should not be authorized to create a tenant
     tenant_name = data_utils.rand_name(name='tenant-')
     self.assertRaises(exceptions.Unauthorized,
                       self.non_admin_client.create_tenant, tenant_name)
Example #45
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """
    Workload driver. Executes an action function against a nova-cluster.
    """
    admin_manager = clients.AdminManager()

    ssh_user = CONF.stress.target_ssh_user
    ssh_key = CONF.stress.target_private_key_path
    logfiles = CONF.stress.target_logfiles
    log_check_interval = int(CONF.stress.log_check_interval)
    default_thread_num = int(CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    for test in tests:
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            manager = clients.Manager()
        for p_number in moves.xrange(test.get('threads', default_thread_num)):
            if test.get('use_isolated_tenants', False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                identity_client = admin_manager.identity_client
                _, tenant = identity_client.create_tenant(name=tenant_name)
                identity_client.create_user(username,
                                            password,
                                            tenant['id'],
                                            "email")
                manager = clients.Manager(username=username,
                                          password="******",
                                          tenant_name=tenant_name)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(kwargs.iteritems()))

            LOG.debug("calling Target Object %s" %
                      test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic,))

            process = {'process': p,
                       'p_number': p_number,
                       'action': test_run.action,
                       'statistic': shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    while True:
        if max_runs is None:
            remaining = end_time - time.time()
            if remaining <= 0:
                break
        else:
            remaining = log_check_interval
            all_proc_term = True
            for process in processes:
                if process['process'].is_alive():
                    all_proc_term = False
                    break
            if all_proc_term:
                break

        time.sleep(min(remaining, log_check_interval))
        if stop_on_error:
            for process in processes:
                if process['statistic']['fails'] > 0:
                    break

        if not logfiles:
            continue
        if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
                              stop_on_error):
            had_errors = True
            break

    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
                 (process['p_number'],
                  process['action'],
                  process['statistic']['runs'],
                     process['statistic']['fails']))
    LOG.info("Summary:")
    LOG.info("Run %d actions (%d failed)" %
             (sum_runs, sum_fails))

    if not had_errors and CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Example #46
0
 def test_create_tenant_by_unauthorized_user(self):
     # Non-administrator user should not be authorized to create a tenant
     tenant_name = data_utils.rand_name(name='tenant')
     self.assertRaises(lib_exc.Forbidden,
                       self.non_admin_tenants_client.create_tenant,
                       tenant_name)
 def test_create_image_specify_invalid_metadata(self):
     # Return an error when creating image with invalid metadata
     snapshot_name = data_utils.rand_name('test-snap-')
     meta = {'': ''}
     self.assertRaises(exceptions.BadRequest, self.client.create_image,
                       self.server_id, snapshot_name, meta)
Example #48
0
 def setUp(self):
     super(MagnetoDBListBackupsTest, self).setUp()
     self.tname = rand_name(self.table_prefix).replace('-', '')
Example #49
0
    def test_create_backup(self):
        # Positive test:create backup successfully and rotate backups correctly
        # create the first and the second backup

        # Check if glance v1 is available to determine which client to use. We
        # prefer glance v1 for the compute API tests since the compute image
        # API proxy was written for glance v1.
        if CONF.image_feature_enabled.api_v1:
            glance_client = self.os.image_client
        elif CONF.image_feature_enabled.api_v2:
            glance_client = self.os.image_client_v2
        else:
            raise lib_exc.InvalidConfiguration(
                'Either api_v1 or api_v2 must be True in '
                '[image-feature-enabled].')

        backup1 = data_utils.rand_name('backup-1')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup1).response
        oldest_backup_exist = True

        # the oldest one should be deleted automatically in this test
        def _clean_oldest_backup(oldest_backup):
            if oldest_backup_exist:
                try:
                    glance_client.delete_image(oldest_backup)
                except lib_exc.NotFound:
                    pass
                else:
                    LOG.warning("Deletion of oldest backup %s should not have "
                                "been successful as it should have been "
                                "deleted during rotation." % oldest_backup)

        image1_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(_clean_oldest_backup, image1_id)
        waiters.wait_for_image_status(glance_client,
                                      image1_id, 'active')

        backup2 = data_utils.rand_name('backup-2')
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup2).response
        image2_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(glance_client.delete_image, image2_id)
        waiters.wait_for_image_status(glance_client,
                                      image2_id, 'active')

        # verify they have been created
        properties = {
            'image_type': 'backup',
            'backup_type': "daily",
            'instance_uuid': self.server_id,
        }
        params = {
            'status': 'active',
            'sort_key': 'created_at',
            'sort_dir': 'asc'
        }
        if CONF.image_feature_enabled.api_v1:
            for key, value in properties.items():
                params['property-%s' % key] = value
            image_list = glance_client.list_images(
                detail=True,
                **params)['images']
        else:
            # Additional properties are flattened in glance v2.
            params.update(properties)
            image_list = glance_client.list_images(params)['images']

        self.assertEqual(2, len(image_list))
        self.assertEqual((backup1, backup2),
                         (image_list[0]['name'], image_list[1]['name']))

        # create the third one, due to the rotation is 2,
        # the first one will be deleted
        backup3 = data_utils.rand_name('backup-3')
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        resp = self.client.create_backup(self.server_id,
                                         backup_type='daily',
                                         rotation=2,
                                         name=backup3).response
        image3_id = data_utils.parse_image_id(resp['location'])
        self.addCleanup(glance_client.delete_image, image3_id)
        # the first back up should be deleted
        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
        glance_client.wait_for_resource_deletion(image1_id)
        oldest_backup_exist = False
        if CONF.image_feature_enabled.api_v1:
            image_list = glance_client.list_images(
                detail=True, **params)['images']
        else:
            image_list = glance_client.list_images(params)['images']
        self.assertEqual(2, len(image_list),
                         'Unexpected number of images for '
                         'v2:test_create_backup; was the oldest backup not '
                         'yet deleted? Image list: %s' %
                         [image['name'] for image in image_list])
        self.assertEqual((backup2, backup3),
                         (image_list[0]['name'], image_list[1]['name']))
 def test_create_image_specify_metadata_over_limits(self):
     # Return an error when creating image with meta data over 256 chars
     snapshot_name = data_utils.rand_name('test-snap-')
     meta = {'a' * 260: 'b' * 260}
     self.assertRaises(exceptions.BadRequest, self.client.create_image,
                       self.server_id, snapshot_name, meta)
    def test_compute_with_volumes(self):
        # EC2 1. integration test (not strict)
        image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
        sec_group_name = data_utils.rand_name("securitygroup-")
        group_desc = sec_group_name + " security group description "
        security_group = self.ec2_client.create_security_group(sec_group_name,
                                                               group_desc)
        self.addResourceCleanUp(self.destroy_security_group_wait,
                                security_group)
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="icmp",
                cidr_ip="0.0.0.0/0",
                from_port=-1,
                to_port=-1))
        self.assertTrue(
            self.ec2_client.authorize_security_group(
                sec_group_name,
                ip_protocol="tcp",
                cidr_ip="0.0.0.0/0",
                from_port=22,
                to_port=22))
        reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
                                    ramdisk_id=self.images["ari"]["image_id"],
                                    instance_type=self.instance_type,
                                    key_name=self.keypair_name,
                                    security_groups=(sec_group_name,))

        LOG.debug("Instance booted - state: %s",
                  reservation.instances[0].state)

        self.addResourceCleanUp(self.destroy_reservation,
                                reservation)
        volume = self.ec2_client.create_volume(1, self.zone)
        LOG.debug("Volume created - status: %s", volume.status)

        self.addResourceCleanUp(self.destroy_volume_wait, volume)
        instance = reservation.instances[0]
        if instance.state != "running":
            self.assertInstanceStateWait(instance, "running")
        LOG.debug("Instance now running - state: %s", instance.state)

        address = self.ec2_client.allocate_address()
        rcuk_a = self.addResourceCleanUp(address.delete)
        self.assertTrue(address.associate(instance.id))

        rcuk_da = self.addResourceCleanUp(address.disassociate)
        # TODO(afazekas): ping test. dependecy/permission ?

        self.assertVolumeStatusWait(volume, "available")
        # NOTE(afazekas): it may be reports available before it is available

        ssh = remote_client.RemoteClient(address.public_ip,
                                         CONF.compute.ssh_user,
                                         pkey=self.keypair.material)
        text = data_utils.rand_name("Pattern text for console output -")
        resp = ssh.write_to_console(text)
        self.assertFalse(resp)

        def _output():
            output = instance.get_console_output()
            return output.output

        wait.re_search_wait(_output, text)
        part_lines = ssh.get_partitions().split('\n')
        volume.attach(instance.id, "/dev/vdh")

        def _volume_state():
            """Return volume state realizing that 'in-use' is overloaded."""
            volume.update(validate=True)
            status = volume.status
            attached = volume.attach_data.status
            LOG.debug("Volume %s is in status: %s, attach_status: %s",
                      volume.id, status, attached)
            # Nova reports 'in-use' on 'attaching' volumes because we
            # have a single volume status, and EC2 has 2. Ensure that
            # if we aren't attached yet we return something other than
            # 'in-use'
            if status == 'in-use' and attached != 'attached':
                return 'attaching'
            else:
                return status

        wait.re_search_wait(_volume_state, "in-use")

        # NOTE(afazekas):  Different Hypervisor backends names
        # differently the devices,
        # now we just test is the partition number increased/decrised

        def _part_state():
            current = ssh.get_partitions().split('\n')
            LOG.debug("Partition map for instance: %s", current)
            if current > part_lines:
                return 'INCREASE'
            if current < part_lines:
                return 'DECREASE'
            return 'EQUAL'

        wait.state_wait(_part_state, 'INCREASE')
        part_lines = ssh.get_partitions().split('\n')

        # TODO(afazekas): Resource compare to the flavor settings

        volume.detach()

        self.assertVolumeStatusWait(_volume_state, "available")
        wait.re_search_wait(_volume_state, "available")

        wait.state_wait(_part_state, 'DECREASE')

        instance.stop()
        address.disassociate()
        self.assertAddressDissasociatedWait(address)
        self.cancelResourceCleanUp(rcuk_da)
        address.release()
        self.assertAddressReleasedWait(address)
        self.cancelResourceCleanUp(rcuk_a)

        LOG.debug("Instance %s state: %s", instance.id, instance.state)
        if instance.state != "stopped":
            self.assertInstanceStateWait(instance, "stopped")
    def test_create_image_specify_name_over_256_chars(self):
        # Return an error if snapshot name over 256 characters is passed

        snapshot_name = data_utils.rand_name('a' * 260)
        self.assertRaises(exceptions.BadRequest, self.client.create_image,
                          self.server_id, snapshot_name)
Example #53
0
 def test_update_non_existent_router_returns_404(self):
     router = data_utils.rand_name('non_exist_router')
     self.assertRaises(lib_exc.NotFound,
                       self.client.update_router,
                       router,
                       name="new_name")
Example #54
0
 def test_create_role_by_unauthorized_user(self):
     # Non-administrator user should not be able to create role
     role_name = data_utils.rand_name(name='role')
     self.assertRaises(lib_exc.Forbidden, self.non_admin_client.create_role,
                       role_name)
Example #55
0
 def resource_setup(cls):
     super(DvrRoutersNegativeTest, cls).resource_setup()
     cls.router = cls.create_router(data_utils.rand_name('router'))
     cls.network = cls.create_network()
     cls.subnet = cls.create_subnet(cls.network)
Example #56
0
 def _generate_name(self):
     return data_utils.rand_name(self._name)
Example #57
0
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
    """Workload driver. Executes an action function against a nova-cluster."""
    admin_manager = credentials.AdminManager()

    ssh_user = STRESS_CONF.stress.target_ssh_user
    ssh_key = STRESS_CONF.stress.target_private_key_path
    logfiles = STRESS_CONF.stress.target_logfiles
    log_check_interval = int(STRESS_CONF.stress.log_check_interval)
    default_thread_num = int(
        STRESS_CONF.stress.default_thread_number_per_action)
    if logfiles:
        controller = STRESS_CONF.stress.target_controller
        computes = _get_compute_nodes(controller, ssh_user, ssh_key)
        for node in computes:
            do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
    skip = False
    for test in tests:
        for service in test.get('required_services', []):
            if not CONF.service_available.get(service):
                skip = True
                break
        if skip:
            break
        # TODO(andreaf) This has to be reworked to use the credential
        # provider interface. For now only tests marked as 'use_admin' will
        # work.
        if test.get('use_admin', False):
            manager = admin_manager
        else:
            raise NotImplemented('Non admin tests are not supported')
        for p_number in range(test.get('threads', default_thread_num)):
            if test.get('use_isolated_tenants', False):
                username = data_utils.rand_name("stress_user")
                tenant_name = data_utils.rand_name("stress_tenant")
                password = "******"
                if CONF.identity.auth_version == 'v2':
                    identity_client = admin_manager.identity_client
                    projects_client = admin_manager.tenants_client
                    roles_client = admin_manager.roles_client
                    users_client = admin_manager.users_client
                    domains_client = None
                else:
                    identity_client = admin_manager.identity_v3_client
                    projects_client = admin_manager.projects_client
                    roles_client = admin_manager.roles_v3_client
                    users_client = admin_manager.users_v3_client
                    domains_client = admin_manager.domains_client
                domain = (identity_client.auth_provider.credentials.
                          get('project_domain_name', 'Default'))
                credentials_client = cred_client.get_creds_client(
                    identity_client, projects_client, users_client,
                    roles_client, domains_client, project_domain_name=domain)
                project = credentials_client.create_project(
                    name=tenant_name, description=tenant_name)
                user = credentials_client.create_user(username, password,
                                                      project, "email")
                # Add roles specified in config file
                for conf_role in CONF.auth.tempest_roles:
                    credentials_client.assign_user_role(user, project,
                                                        conf_role)
                creds = credentials_client.get_credentials(user, project,
                                                           password)
                manager = clients.Manager(credentials=creds)

            test_obj = importutils.import_class(test['action'])
            test_run = test_obj(manager, max_runs, stop_on_error)

            kwargs = test.get('kwargs', {})
            test_run.setUp(**dict(six.iteritems(kwargs)))

            LOG.debug("calling Target Object %s" %
                      test_run.__class__.__name__)

            mp_manager = multiprocessing.Manager()
            shared_statistic = mp_manager.dict()
            shared_statistic['runs'] = 0
            shared_statistic['fails'] = 0

            p = multiprocessing.Process(target=test_run.execute,
                                        args=(shared_statistic,))

            process = {'process': p,
                       'p_number': p_number,
                       'action': test_run.action,
                       'statistic': shared_statistic}

            processes.append(process)
            p.start()
    if stop_on_error:
        # NOTE(mkoderer): only the parent should register the handler
        signal.signal(signal.SIGCHLD, sigchld_handler)
    end_time = time.time() + duration
    had_errors = False
    try:
        while True:
            if max_runs is None:
                remaining = end_time - time.time()
                if remaining <= 0:
                    break
            else:
                remaining = log_check_interval
                all_proc_term = True
                for process in processes:
                    if process['process'].is_alive():
                        all_proc_term = False
                        break
                if all_proc_term:
                    break

            time.sleep(min(remaining, log_check_interval))
            if stop_on_error:
                if any([True for proc in processes
                        if proc['statistic']['fails'] > 0]):
                    break

            if not logfiles:
                continue
            if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
                                  stop_on_error):
                had_errors = True
                break
    except KeyboardInterrupt:
        LOG.warning("Interrupted, going to print statistics and exit ...")

    if stop_on_error:
        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
    terminate_all_processes()

    sum_fails = 0
    sum_runs = 0

    LOG.info("Statistics (per process):")
    for process in processes:
        if process['statistic']['fails'] > 0:
            had_errors = True
        sum_runs += process['statistic']['runs']
        sum_fails += process['statistic']['fails']
        print("Process %d (%s): Run %d actions (%d failed)" % (
            process['p_number'],
            process['action'],
            process['statistic']['runs'],
            process['statistic']['fails']))
    print("Summary:")
    print("Run %d actions (%d failed)" % (sum_runs, sum_fails))

    if not had_errors and STRESS_CONF.stress.full_clean_stack:
        LOG.info("cleaning up")
        cleanup.cleanup()
    if had_errors:
        return 1
    else:
        return 0
Example #58
0
 def test_router_create_tenant_distributed_returns_forbidden(self):
     self.assertRaises(lib_exc.Forbidden,
                       self.create_router,
                       data_utils.rand_name('router'),
                       distributed=True)
Example #59
0
    def test_device_tagging(self):
        # Create volumes
        # The create_volume methods waits for the volumes to be available and
        # the base class will clean them up on tearDown.
        boot_volume = self.create_volume(CONF.compute.image_ref)
        other_volume = self.create_volume()
        untagged_volume = self.create_volume()

        # Create networks
        net1 = self.networks_client.create_network(
            name=data_utils.rand_name('device-tagging-net1'))['network']
        self.addCleanup(self.networks_client.delete_network, net1['id'])

        net2 = self.networks_client.create_network(
            name=data_utils.rand_name('device-tagging-net2'))['network']
        self.addCleanup(self.networks_client.delete_network, net2['id'])

        # Create subnets
        subnet1 = self.subnets_client.create_subnet(
            network_id=net1['id'],
            cidr='10.1.1.0/24',
            ip_version=4)['subnet']
        self.addCleanup(self.subnets_client.delete_subnet, subnet1['id'])

        subnet2 = self.subnets_client.create_subnet(
            network_id=net2['id'],
            cidr='10.2.2.0/24',
            ip_version=4)['subnet']
        self.addCleanup(self.subnets_client.delete_subnet, subnet2['id'])

        # Create ports
        self.port1 = self.ports_client.create_port(
            network_id=net1['id'],
            fixed_ips=[{'subnet_id': subnet1['id']}])['port']
        self.addCleanup(self.ports_client.delete_port, self.port1['id'])

        self.port2 = self.ports_client.create_port(
            network_id=net1['id'],
            fixed_ips=[{'subnet_id': subnet1['id']}])['port']
        self.addCleanup(self.ports_client.delete_port, self.port2['id'])

        # Create server
        admin_pass = data_utils.rand_password()
        config_drive_enabled = CONF.compute_feature_enabled.config_drive

        server = self.create_test_server(
            validatable=True,
            config_drive=config_drive_enabled,
            adminPass=admin_pass,
            name=data_utils.rand_name('device-tagging-server'),
            networks=[
                # Validation network for ssh
                {
                    'uuid': self.get_tenant_network()['id']
                },
                # Different tags for different ports
                {
                    'port': self.port1['id'],
                    'tag': 'port-1'
                },
                {
                    'port': self.port2['id'],
                    'tag': 'port-2'
                },
                # Two nics on same net, one tagged one not
                {
                    'uuid': net1['id'],
                    'tag': 'net-1'
                },
                {
                    'uuid': net1['id']
                },
                # Two nics on same net, different IP
                {
                    'uuid': net2['id'],
                    'fixed_ip': '10.2.2.100',
                    'tag': 'net-2-100'
                },
                {
                    'uuid': net2['id'],
                    'fixed_ip': '10.2.2.200',
                    'tag': 'net-2-200'
                }
            ],
            block_device_mapping_v2=[
                # Boot volume
                {
                    'uuid': boot_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 0,
                    'tag': 'boot'
                },
                # Other volume
                {
                    'uuid': other_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 1,
                    'tag': 'other'
                },
                # Untagged volume
                {
                    'uuid': untagged_volume['id'],
                    'source_type': 'volume',
                    'destination_type': 'volume',
                    'boot_index': 2
                }
            ])

        self.addCleanup(self.delete_server, server['id'])

        self.ssh_client = remote_client.RemoteClient(
            self.get_server_ip(server),
            CONF.validation.image_ssh_user,
            admin_pass,
            self.validation_resources['keypair']['private_key'],
            server=server,
            servers_client=self.servers_client)

        # Find the MAC addresses of our fixed IPs
        self.net_2_100_mac = None
        self.net_2_200_mac = None
        ifaces = self.interfaces_client.list_interfaces(server['id'])
        for iface in ifaces['interfaceAttachments']:
            if 'fixed_ips' in iface:
                for ip in iface['fixed_ips']:
                    if ip['ip_address'] == '10.2.2.100':
                        self.net_2_100_mac = iface['mac_addr']
                    if ip['ip_address'] == '10.2.2.200':
                        self.net_2_200_mac = iface['mac_addr']
        # Make sure we have the MACs we need, there's no reason for some to be
        # missing
        self.assertTrue(self.net_2_100_mac)
        self.assertTrue(self.net_2_200_mac)

        # Verify metadata from metadata service
        if CONF.compute_feature_enabled.metadata_service:
            md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
            LOG.info('Attempting to verify tagged devices in server %s via '
                     'the metadata service: %s', server['id'], md_url)

            def get_and_verify_metadata():
                try:
                    self.ssh_client.exec_command('curl -V')
                except exceptions.SSHExecCommandFailed:
                    if not CONF.compute_feature_enabled.config_drive:
                        raise self.skipException('curl not found in guest '
                                                 'and config drive is '
                                                 'disabled')
                    LOG.warning('curl was not found in the guest, device '
                                'tagging metadata was not checked in the '
                                'metadata API')
                    return True
                cmd = 'curl %s' % md_url
                md_json = self.ssh_client.exec_command(cmd)
                self.verify_device_metadata(md_json)
                return True

            if not test.call_until_true(get_and_verify_metadata,
                                        CONF.compute.build_timeout,
                                        CONF.compute.build_interval):
                raise exceptions.TimeoutException('Timeout while verifying '
                                                  'metadata on server.')

        # Verify metadata on config drive
        if CONF.compute_feature_enabled.config_drive:
            cmd_blkid = 'blkid -t LABEL=config-2 -o device'
            LOG.info('Attempting to verify tagged devices in server %s via '
                     'the config drive.', server['id'])
            dev_name = self.ssh_client.exec_command(cmd_blkid)
            dev_name = dev_name.rstrip()
            self.ssh_client.exec_command('sudo mount %s /mnt' % dev_name)
            cmd_md = 'sudo cat /mnt/openstack/latest/meta_data.json'
            md_json = self.ssh_client.exec_command(cmd_md)
            self.verify_device_metadata(md_json)
Example #60
0
 def test_create_update_delete_pool_vip(self):
     # Creates a vip
     name = data_utils.rand_name('vip-')
     address = self.subnet['allocation_pools'][0]['end']
     resp, body = self.client.create_pool(
         name=data_utils.rand_name("pool-"),
         lb_method='ROUND_ROBIN',
         protocol='HTTP',
         subnet_id=self.subnet['id'])
     pool = body['pool']
     resp, body = self.client.create_vip(name=name,
                                         protocol="HTTP",
                                         protocol_port=80,
                                         subnet_id=self.subnet['id'],
                                         pool_id=pool['id'],
                                         address=address)
     self.assertEqual('201', resp['status'])
     vip = body['vip']
     vip_id = vip['id']
     # Confirm VIP's address correctness with a show
     resp, body = self.client.show_vip(vip_id)
     self.assertEqual('200', resp['status'])
     vip = body['vip']
     self.assertEqual(address, vip['address'])
     # Verification of vip update
     new_name = "New_vip"
     new_description = "New description"
     persistence_type = "HTTP_COOKIE"
     update_data = {"session_persistence": {"type": persistence_type}}
     resp, body = self.client.update_vip(vip_id,
                                         name=new_name,
                                         description=new_description,
                                         connection_limit=10,
                                         admin_state_up=False,
                                         **update_data)
     self.assertEqual('200', resp['status'])
     updated_vip = body['vip']
     self.assertEqual(new_name, updated_vip['name'])
     self.assertEqual(new_description, updated_vip['description'])
     self.assertEqual(10, updated_vip['connection_limit'])
     self.assertFalse(updated_vip['admin_state_up'])
     self.assertEqual(persistence_type,
                      updated_vip['session_persistence']['type'])
     # Verification of vip delete
     resp, body = self.client.delete_vip(vip['id'])
     self.assertEqual('204', resp['status'])
     self.client.wait_for_resource_deletion('vip', vip['id'])
     # Verification of pool update
     new_name = "New_pool"
     resp, body = self.client.update_pool(pool['id'],
                                          name=new_name,
                                          description="new_description",
                                          lb_method='LEAST_CONNECTIONS')
     self.assertEqual('200', resp['status'])
     updated_pool = body['pool']
     self.assertEqual(new_name, updated_pool['name'])
     self.assertEqual('new_description', updated_pool['description'])
     self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
     # Verification of pool delete
     resp, body = self.client.delete_pool(pool['id'])
     self.assertEqual('204', resp['status'])