def test_list_security_services_with_detail(self, version): utils.check_skip_if_microversion_not_supported(version) with_ou = True if utils.is_microversion_ge(version, '2.44') else False if utils.is_microversion_ge(version, '2.0'): listed = self.shares_v2_client.list_security_services( detailed=True, version=version)['security_services'] else: listed = self.shares_client.list_security_services( detailed=True)['security_services'] self.assertTrue(any(self.ss_ldap['id'] == ss['id'] for ss in listed)) self.assertTrue( any(self.ss_kerberos['id'] == ss['id'] for ss in listed)) # verify keys keys = [ "name", "id", "status", "description", "domain", "server", "dns_ip", "user", "password", "type", "created_at", "updated_at", "project_id", ] [self.assertIn(key, s_s.keys()) for s_s in listed for key in keys] for ss in listed: self.assertEqual(with_ou, 'ou' in ss.keys())
def setUp(self): super(SecurityServicesTest, self).setUp() ss_ldap_data = { 'name': 'ss_ldap', 'dns_ip': '1.1.1.1', 'server': 'fake_server_1', 'domain': 'fake_domain_1', 'user': '******', 'password': '******', } if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.44'): ss_ldap_data['ou'] = 'OU=fake_unit_1' ss_kerberos_data = { 'name': 'ss_kerberos', 'dns_ip': '2.2.2.2', 'server': 'fake_server_2', 'domain': 'fake_domain_2', 'user': '******', 'password': '******', } if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.44'): ss_kerberos_data['ou'] = 'OU=fake_unit_2' self.ss_ldap = self.create_security_service('ldap', **ss_ldap_data) self.ss_kerberos = self.create_security_service( 'kerberos', **ss_kerberos_data)
def _test_manage(self, share, is_public=False, version=CONF.share.max_api_microversion): name = "Name for 'managed' share that had ID %s" % share['id'] description = "Description for 'managed' share" # Manage share managed_share = self.shares_v2_client.manage_share( service_host=share['host'], export_path=share['export_locations'][0], protocol=share['share_proto'], share_type_id=self.st['share_type']['id'], name=name, description=description, is_public=is_public, version=version, ) # Add managed share to cleanup queue self.method_resources.insert( 0, { 'type': 'share', 'id': managed_share['id'], 'client': self.shares_client }) # Wait for success self.shares_v2_client.wait_for_share_status(managed_share['id'], 'available') # Verify data of managed share self.assertEqual(name, managed_share['name']) self.assertEqual(description, managed_share['description']) self.assertEqual(share['host'], managed_share['host']) self.assertEqual(share['share_proto'], managed_share['share_proto']) if utils.is_microversion_ge(version, "2.6"): self.assertEqual(self.st['share_type']['id'], managed_share['share_type']) else: self.assertEqual(self.st['share_type']['name'], managed_share['share_type']) if utils.is_microversion_ge(version, "2.8"): self.assertEqual(is_public, managed_share['is_public']) else: self.assertFalse(managed_share['is_public']) if utils.is_microversion_ge(version, "2.16"): self.assertEqual(share['user_id'], managed_share['user_id']) else: self.assertNotIn('user_id', managed_share) # Delete share self.shares_v2_client.delete_share(managed_share['id']) self.shares_v2_client.wait_for_resource_deletion( share_id=managed_share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, managed_share['id'])
def test_list_snapshots_with_detail(self, version): params = None if version and utils.is_microversion_ge(version, '2.36'): params = {'name~': 'tempest', 'description~': 'tempest'} # list share snapshots if version is None: snaps = self.shares_client.list_snapshots_with_detail() else: utils.skip_if_microversion_not_supported(version) snaps = self.shares_v2_client.list_snapshots_with_detail( version=version, params=params) # verify keys expected_keys = [ "status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size" ] if version and utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) # strict key check [self.assertEqual(set(expected_keys), set(s.keys())) for s in snaps] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg)
def _get_share_instance(self, version): """Test that we get the proper keys back for the instance.""" share_instances = self.shares_v2_client.get_instances_of_share( self.share['id'], version=version, ) si = self.shares_v2_client.get_share_instance( share_instances[0]['id'], version=version) expected_keys = [ 'host', 'share_id', 'id', 'share_network_id', 'status', 'availability_zone', 'share_server_id', 'created_at', ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): expected_keys.append("replica_state") if utils.is_microversion_ge(version, '2.22'): expected_keys.append("share_type_id") if utils.is_microversion_ge(version, '2.30'): expected_keys.append("cast_rules_to_readonly") expected_keys = sorted(expected_keys) actual_keys = sorted(si.keys()) self.assertEqual(expected_keys, actual_keys, 'Share instance %s returned incorrect keys; ' 'expected %s, got %s.' % ( si['id'], expected_keys, actual_keys))
def _list_shares_with_detail(self, version): # list shares shares = self.shares_v2_client.list_shares_with_detail( version=six.text_type(version)) # verify keys keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "host", "snapshot_id", "is_public", "share_type", ] if utils.is_microversion_lt(version, '2.9'): keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.4'): keys.extend(["consistency_group_id", "source_cgsnapshot_member_id"]) if utils.is_microversion_ge(version, '2.6'): keys.append("share_type_name") [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our shares in list and have no duplicates for share in self.shares: gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg)
def _verify_export_location_structure( self, export_locations, role='admin', version=LATEST_MICROVERSION, format='summary'): # Determine which keys to expect based on role, version and format summary_keys = ['id', 'path'] if utils.is_microversion_ge(version, '2.14'): summary_keys += ['preferred'] admin_summary_keys = summary_keys + [ 'share_instance_id', 'is_admin_only'] detail_keys = summary_keys + ['created_at', 'updated_at'] admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at'] if format == 'summary': if role == 'admin': expected_keys = admin_summary_keys else: expected_keys = summary_keys else: if role == 'admin': expected_keys = admin_detail_keys else: expected_keys = detail_keys if not isinstance(export_locations, (list, tuple, set)): export_locations = (export_locations, ) for export_location in export_locations: # Check that the correct keys are present self.assertEqual(len(expected_keys), len(export_location)) for key in expected_keys: self.assertIn(key, export_location) # Check the format of ever-present summary keys self.assertTrue(uuidutils.is_uuid_like(export_location['id'])) self.assertTrue(isinstance(export_location['path'], six.string_types)) if utils.is_microversion_ge(version, '2.14'): self.assertIn(export_location['preferred'], (True, False)) if role == 'admin': self.assertIn(export_location['is_admin_only'], (True, False)) self.assertTrue(uuidutils.is_uuid_like( export_location['share_instance_id'])) # Check the format of the detail keys if format == 'detail': for time in (export_location['created_at'], export_location['updated_at']): # If var 'time' has incorrect value then ValueError # exception is expected to be raised. So, just try parse # it making assertion that it has proper date value. timeutils.parse_strtime(time)
def _get_share(self, version): # get share share = self.shares_v2_client.get_share(self.shares[0]['id'], version=six.text_type(version)) # verify keys expected_keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "host", "snapshot_id", "is_public", ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): expected_keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.4'): expected_keys.extend( ["consistency_group_id", "source_cgsnapshot_member_id"]) if utils.is_microversion_ge(version, '2.5'): expected_keys.append("share_type_name") if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): expected_keys.append("replication_type") actual_keys = list(share.keys()) [self.assertIn(key, actual_keys) for key in expected_keys] # verify values msg = "Expected name: '%s', actual name: '%s'" % (self.share_name, share["name"]) self.assertEqual(self.share_name, six.text_type(share["name"]), msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (self.share_desc, share["description"]) self.assertEqual(self.share_desc, six.text_type(share["description"]), msg) msg = "Expected size: '%s', actual size: '%s'" % (self.share_size, share["size"]) self.assertEqual(self.share_size, int(share["size"]), msg)
def _test_manage(self, share, is_public=False, version=CONF.share.max_api_microversion): name = "Name for 'managed' share that had ID %s" % share['id'] description = "Description for 'managed' share" # Manage share managed_share = self.shares_v2_client.manage_share( service_host=share['host'], export_path=share['export_locations'][0], protocol=share['share_proto'], share_type_id=self.st['share_type']['id'], name=name, description=description, is_public=is_public, version=version, ) # Add managed share to cleanup queue self.method_resources.insert( 0, {'type': 'share', 'id': managed_share['id'], 'client': self.shares_client}) # Wait for success self.shares_v2_client.wait_for_share_status(managed_share['id'], 'available') # Verify data of managed share self.assertEqual(name, managed_share['name']) self.assertEqual(description, managed_share['description']) self.assertEqual(share['host'], managed_share['host']) self.assertEqual(share['share_proto'], managed_share['share_proto']) if utils.is_microversion_ge(version, "2.6"): self.assertEqual(self.st['share_type']['id'], managed_share['share_type']) else: self.assertEqual(self.st['share_type']['name'], managed_share['share_type']) if utils.is_microversion_ge(version, "2.8"): self.assertEqual(is_public, managed_share['is_public']) else: self.assertFalse(managed_share['is_public']) if utils.is_microversion_ge(version, "2.16"): self.assertEqual(share['user_id'], managed_share['user_id']) else: self.assertNotIn('user_id', managed_share) # Delete share self.shares_v2_client.delete_share(managed_share['id']) self.shares_v2_client.wait_for_resource_deletion( share_id=managed_share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, managed_share['id'])
def resource_setup(cls): super(ManageNFSShareTest, cls).resource_setup() if cls.protocol not in CONF.share.enable_protocols: message = "%s tests are disabled" % cls.protocol raise cls.skipException(message) # Create share types cls.st_name = data_utils.rand_name("manage-st-name") cls.st_name_invalid = data_utils.rand_name("manage-st-name-invalid") cls.extra_specs = { 'storage_protocol': CONF.share.capability_storage_protocol, 'driver_handles_share_servers': False, 'snapshot_support': six.text_type( CONF.share.capability_snapshot_support), } cls.extra_specs_invalid = { 'storage_protocol': CONF.share.capability_storage_protocol, 'driver_handles_share_servers': True, 'snapshot_support': six.text_type( CONF.share.capability_snapshot_support), } cls.st = cls.create_share_type( name=cls.st_name, cleanup_in_class=True, extra_specs=cls.extra_specs) cls.st_invalid = cls.create_share_type( name=cls.st_name_invalid, cleanup_in_class=True, extra_specs=cls.extra_specs_invalid) creation_data = {'kwargs': { 'share_type_id': cls.st['share_type']['id'], 'share_protocol': cls.protocol, }} # Data for creating shares in parallel data = [creation_data, creation_data] if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.5"): data.append(creation_data) if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.8"): data.append(creation_data) shares_created = cls.create_shares(data) cls.shares = [] # Load all share data (host, etc.) for share in shares_created: # Unmanage shares from manila cls.shares.append(cls.shares_client.get_share(share['id'])) cls.shares_client.unmanage_share(share['id']) cls.shares_client.wait_for_resource_deletion( share_id=share['id'])
def test_get_snapshot(self, version): # get snapshot if version is None: snapshot = self.shares_client.get_snapshot( self.snap["id"])['snapshot'] else: utils.check_skip_if_microversion_not_supported(version) snapshot = self.shares_v2_client.get_snapshot( self.snap["id"], version=version)['snapshot'] # verify keys expected_keys = [ "status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size" ] if version and utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) actual_keys = snapshot.keys() # strict key check self.assertEqual(set(expected_keys), set(actual_keys)) # verify data msg = "Expected name: '%s', actual name: '%s'" % (self.snap_name, snapshot["name"]) self.assertEqual(self.snap_name, snapshot["name"], msg) msg = ("Expected description: '%s' actual description: '%s'" % (self.snap_desc, snapshot["description"])) self.assertEqual(self.snap_desc, snapshot["description"], msg) msg = ("Expected share_id: '%s', actual share_id: '%s'" % (self.shares[0]["id"], snapshot["share_id"])) self.assertEqual(self.shares[0]["id"], snapshot["share_id"], msg) # Verify that the user_id and project_id are same as the one for # the base share if version and utils.is_microversion_ge(version, '2.17'): msg = ("Expected %(key)s in snapshot: '%(expected)s', " "actual %(key)s in snapshot: '%(actual)s'") self.assertEqual( self.shares[0]['user_id'], snapshot['user_id'], msg % { 'expected': self.shares[0]['user_id'], 'actual': snapshot['user_id'], 'key': 'user_id' }) self.assertEqual( self.shares[0]['project_id'], snapshot['project_id'], msg % { 'expected': self.shares[0]['project_id'], 'actual': snapshot['project_id'], 'key': 'project_id' })
def test_get_snapshot(self, version): # get snapshot if version is None: snapshot = self.shares_client.get_snapshot(self.snap["id"]) else: utils.skip_if_microversion_not_supported(version) snapshot = self.shares_v2_client.get_snapshot( self.snap["id"], version=version) # verify keys expected_keys = ["status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size"] if version and utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) actual_keys = snapshot.keys() # strict key check self.assertEqual(set(expected_keys), set(actual_keys)) # verify data msg = "Expected name: '%s', actual name: '%s'" % (self.snap_name, snapshot["name"]) self.assertEqual(self.snap_name, snapshot["name"], msg) msg = ("Expected description: '%s' actual description: '%s'" % (self.snap_desc, snapshot["description"])) self.assertEqual(self.snap_desc, snapshot["description"], msg) msg = ("Expected share_id: '%s', actual share_id: '%s'" % (self.shares[0]["id"], snapshot["share_id"])) self.assertEqual(self.shares[0]["id"], snapshot["share_id"], msg) # Verify that the user_id and project_id are same as the one for # the base share if version and utils.is_microversion_ge(version, '2.17'): msg = ("Expected %(key)s in snapshot: '%(expected)s', " "actual %(key)s in snapshot: '%(actual)s'") self.assertEqual(self.shares[0]['user_id'], snapshot['user_id'], msg % { 'expected': self.shares[0]['user_id'], 'actual': snapshot['user_id'], 'key': 'user_id'}) self.assertEqual(self.shares[0]['project_id'], snapshot['project_id'], msg % { 'expected': self.shares[0]['project_id'], 'actual': snapshot['project_id'], 'key': 'project_id'})
def _list_shares_with_detail(self, version): # list shares shares = self.shares_v2_client.list_shares_with_detail( version=six.text_type(version)) # verify keys keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "host", "snapshot_id", "is_public", "share_type", ] if utils.is_microversion_lt(version, '2.9'): keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.4'): keys.extend( ["consistency_group_id", "source_cgsnapshot_member_id"]) if utils.is_microversion_ge(version, '2.6'): keys.append("share_type_name") if utils.is_microversion_ge(version, '2.10'): keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): keys.append("replication_type") [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our shares in list and have no duplicates for share in self.shares: gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg)
def _get_share_instance(self, version): """Test that we get the proper keys back for the instance.""" share_instances = self.shares_v2_client.get_instances_of_share( self.share['id'], version=version, ) si = self.shares_v2_client.get_share_instance(share_instances[0]['id'], version=version) expected_keys = [ 'host', 'share_id', 'id', 'share_network_id', 'status', 'availability_zone', 'share_server_id', 'created_at', ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") expected_keys = sorted(expected_keys) actual_keys = sorted(si.keys()) self.assertEqual( expected_keys, actual_keys, 'Share instance %s returned incorrect keys; ' 'expected %s, got %s.' % (si['id'], expected_keys, actual_keys))
def test_list_share_groups_with_detail_min(self, version): params = None if utils.is_microversion_ge(version, '2.36'): params = {'name~': 'tempest', 'description~': 'tempest'} # List share groups share_groups = self.shares_v2_client.list_share_groups( detailed=True, params=params, version=version) # Verify keys for sg in share_groups: keys = set(sg.keys()) self.assertTrue( constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS.issubset( keys), 'Not all required keys returned for share group %s. ' 'Expected at least: %s, found %s' % ( sg['id'], constants.SHARE_GROUP_DETAIL_REQUIRED_KEYS, ','.join(keys), ) ) # Share group ids are in list exactly once for group_id in (self.share_group["id"], self.share_group2["id"]): gen = [share_group["id"] for share_group in share_groups if share_group["id"] == group_id] msg = ("Expected id %s exactly once in share group list" % group_id) self.assertEqual(1, len(gen), msg)
def test_share_type_create_list(self, version): self.skip_if_microversion_not_supported(version) name = data_utils.rand_name("tempest-manila") description = None if utils.is_microversion_ge(version, "2.41"): description = "Description for share type" extra_specs = self.add_extra_specs_to_dict() # Create share type st_create = self.create_share_type(name, extra_specs=extra_specs, version=version, description=description) self._verify_is_public_key_name(st_create['share_type'], version) st_id = st_create["share_type"]["id"] # list share types st_list = self.shares_v2_client.list_share_types(version=version) sts = st_list["share_types"] self.assertGreaterEqual(len(sts), 1) self.assertTrue(any(st_id in st["id"] for st in sts)) for st in sts: self._verify_is_public_key_name(st, version) # Check that backwards compatibility didn't break vts = st_list["volume_types"] self.assertEqual(len(sts), len(vts)) for i in range(len(sts)): self.assertDictMatch(sts[i], vts[i])
def test_share_type_create_get(self, version): self.skip_if_microversion_not_supported(version) name = data_utils.rand_name("tempest-manila") description = None if utils.is_microversion_ge(version, "2.41"): description = "Description for share type" extra_specs = self.add_extra_specs_to_dict({ "key": "value", }) # Create share type st_create = self.create_share_type(name, extra_specs=extra_specs, version=version, description=description) self.assertEqual(name, st_create['share_type']['name']) self._verify_description(description, st_create['share_type'], version) self._verify_is_public_key_name(st_create['share_type'], version) st_id = st_create["share_type"]["id"] # Get share type get = self.shares_v2_client.get_share_type(st_id, version=version) self.assertEqual(name, get["share_type"]["name"]) self.assertEqual(st_id, get["share_type"]["id"]) self._verify_description(description, get['share_type'], version) self.assertEqual(extra_specs, get["share_type"]["extra_specs"]) self._verify_is_public_key_name(get['share_type'], version) # Check that backwards compatibility didn't break self.assertDictMatch(get["volume_type"], get["share_type"])
def _get_share_instance(self, version): """Test that we get the proper keys back for the instance.""" share_instances = self.shares_v2_client.get_instances_of_share(self.share["id"], version=version) si = self.shares_v2_client.get_share_instance(share_instances[0]["id"], version=version) expected_keys = [ "host", "share_id", "id", "share_network_id", "status", "availability_zone", "share_server_id", "created_at", ] if utils.is_microversion_lt(version, "2.9"): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, "2.10"): expected_keys.append("access_rules_status") expected_keys = sorted(expected_keys) actual_keys = sorted(si.keys()) self.assertEqual( expected_keys, actual_keys, "Share instance %s returned incorrect keys; " "expected %s, got %s." % (si["id"], expected_keys, actual_keys), )
def _test_manage(self, snapshot, version=CONF.share.max_api_microversion): name = ("Name for 'managed' snapshot that had ID %s" % snapshot['id']) description = "Description for 'managed' snapshot" utils.skip_if_manage_not_supported_for_version(version) # Manage snapshot share_id = snapshot['share_id'] snapshot = self.shares_v2_client.manage_snapshot( share_id, snapshot['provider_location'], name=name, description=description, # Some drivers require additional parameters passed as driver # options, as follows: # - size: Hitachi HNAS Driver driver_options={'size': snapshot['size']}, version=version, )['snapshot'] # Add managed snapshot to cleanup queue self.method_resources.insert( 0, { 'type': 'snapshot', 'id': snapshot['id'], 'client': self.shares_v2_client }) # Wait for success waiters.wait_for_resource_status(self.shares_v2_client, snapshot['id'], constants.STATUS_AVAILABLE, resource_name='snapshot') # Verify manage snapshot API response expected_keys = [ "status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size", "provider_location" ] if utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) actual_keys = snapshot.keys() # Strict key check self.assertEqual(set(expected_keys), set(actual_keys)) # Verify data of managed snapshot get_snapshot = self.shares_v2_client.get_snapshot( snapshot['id'])['snapshot'] self.assertEqual(name, get_snapshot['name']) self.assertEqual(description, get_snapshot['description']) self.assertEqual(snapshot['share_id'], get_snapshot['share_id']) # Delete snapshot self.shares_v2_client.delete_snapshot(get_snapshot['id']) self.shares_client.wait_for_resource_deletion( snapshot_id=get_snapshot['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_snapshot, get_snapshot['id'])
def test_show_share_server(self): share = self.shares_v2_client.get_share(self.share["id"]) server = self.shares_v2_client.show_share_server( share["share_server_id"]) keys = [ "id", "host", "project_id", "status", "share_network_name", "created_at", "updated_at", "backend_details", ] if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.49"): keys.append("is_auto_deletable") keys.append("identifier") # all expected keys are present for key in keys: self.assertIn(key, server.keys()) # 'created_at' is valid date self.assertTrue(self.date_re.match(server["created_at"])) # 'updated_at' is valid date if set if server["updated_at"]: self.assertTrue(self.date_re.match(server["updated_at"])) # veriy that values for following keys are not empty for k in ('host', 'id', 'project_id', 'status', 'share_network_name'): self.assertGreater(len(server[k]), 0) # 'backend_details' should be a dict self.assertIsInstance(server["backend_details"], dict)
def test_try_read_extra_specs_on_share_type_with_user(self): st = self._create_share_type() share_type = self.shares_v2_client.get_share_type( st['share_type']['id']) # Verify a non-admin can only read the required extra-specs expected_keys = ['driver_handles_share_servers', 'snapshot_support'] if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.24'): expected_keys.append('create_share_from_snapshot_support') if utils.is_microversion_ge(CONF.share.max_api_microversion, constants.REVERT_TO_SNAPSHOT_MICROVERSION): expected_keys.append('revert_to_snapshot_support') if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.32'): expected_keys.append('mount_snapshot_support') actual_keys = share_type['share_type']['extra_specs'].keys() self.assertEqual(sorted(expected_keys), sorted(actual_keys), 'Incorrect extra specs visible to non-admin user; ' 'expected %s, got %s' % (expected_keys, actual_keys))
def test_try_read_extra_specs_on_share_type_with_user(self): st = self.create_share_type(extra_specs=self.extra_specs) share_type = self.shares_v2_client.get_share_type(st['id']) # Verify a non-admin can only read the required extra-specs expected_keys = ['driver_handles_share_servers', 'snapshot_support'] if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.24'): expected_keys.append('create_share_from_snapshot_support') if utils.is_microversion_ge(CONF.share.max_api_microversion, constants.REVERT_TO_SNAPSHOT_MICROVERSION): expected_keys.append('revert_to_snapshot_support') if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.32'): expected_keys.append('mount_snapshot_support') actual_keys = share_type['share_type']['extra_specs'].keys() self.assertEqual( sorted(expected_keys), sorted(actual_keys), 'Incorrect extra specs visible to non-admin user; ' 'expected %s, got %s' % (expected_keys, actual_keys))
def test_get_security_service(self, version): self.skip_if_microversion_not_supported(version) with_ou = True if utils.is_microversion_ge(version, '2.44') else False data = self.generate_security_service_data(set_ou=with_ou) if utils.is_microversion_ge(version, '2.0'): ss = self.create_security_service(client=self.shares_v2_client, version=version, **data) get = self.shares_v2_client.get_security_service(ss["id"], version=version) else: ss = self.create_security_service(**data) get = self.shares_client.get_security_service(ss["id"]) self.assertDictContainsSubset(data, ss) self.assertEqual(with_ou, 'ou' in ss) self.assertDictContainsSubset(data, get) self.assertEqual(with_ou, 'ou' in get)
def _get_share(self, version): # get share share = self.shares_v2_client.get_share( self.shares[0]['id'], version=six.text_type(version)) # verify keys expected_keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "host", "snapshot_id", "is_public", ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): expected_keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.4'): expected_keys.extend(["consistency_group_id", "source_cgsnapshot_member_id"]) if utils.is_microversion_ge(version, '2.5'): expected_keys.append("share_type_name") if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): expected_keys.append("replication_type") actual_keys = list(share.keys()) [self.assertIn(key, actual_keys) for key in expected_keys] # verify values msg = "Expected name: '%s', actual name: '%s'" % (self.share_name, share["name"]) self.assertEqual(self.share_name, six.text_type(share["name"]), msg) msg = "Expected description: '%s', "\ "actual description: '%s'" % (self.share_desc, share["description"]) self.assertEqual( self.share_desc, six.text_type(share["description"]), msg) msg = "Expected size: '%s', actual size: '%s'" % (self.share_size, share["size"]) self.assertEqual(self.share_size, int(share["size"]), msg)
def _get_share(self, version): # get share share = self.shares_v2_client.get_share(self.shares[0]['id'], version=str(version))['share'] # verify keys expected_keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "snapshot_id", "is_public", ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): expected_keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.5'): expected_keys.append("share_type_name") if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): expected_keys.append("replication_type") if utils.is_microversion_ge(version, '2.16'): expected_keys.append("user_id") if utils.is_microversion_ge(version, '2.24'): expected_keys.append("create_share_from_snapshot_support") if utils.is_microversion_ge(version, constants.REVERT_TO_SNAPSHOT_MICROVERSION): expected_keys.append("revert_to_snapshot_support") actual_keys = list(share.keys()) [self.assertIn(key, actual_keys) for key in expected_keys] # verify values msg = "Expected name: '%s', actual name: '%s'" % (self.share_name, share["name"]) self.assertEqual(self.share_name, str(share["name"]), msg) msg = ("Expected description: '%s', " "actual description: '%s'" % (self.share_desc, share["description"])) self.assertEqual(self.share_desc, str(share["description"]), msg) msg = "Expected size: '%s', actual size: '%s'" % ( CONF.share.share_size, share["size"]) self.assertEqual(CONF.share.share_size, int(share["size"]), msg)
def _test_manage(self, snapshot, version=CONF.share.max_api_microversion): name = ("Name for 'managed' snapshot that had ID %s" % snapshot['id']) description = "Description for 'managed' snapshot" # Manage snapshot share_id = snapshot['share_id'] snapshot = self.shares_v2_client.manage_snapshot( share_id, snapshot['provider_location'], name=name, description=description, driver_options={}, version=version, ) # Add managed snapshot to cleanup queue self.method_resources.insert( 0, { 'type': 'snapshot', 'id': snapshot['id'], 'client': self.shares_v2_client }) # Wait for success self.shares_v2_client.wait_for_snapshot_status(snapshot['id'], 'available') # Verify manage snapshot API response expected_keys = [ "status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size", "provider_location" ] if utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) actual_keys = snapshot.keys() # Strict key check self.assertEqual(set(expected_keys), set(actual_keys)) # Verify data of managed snapshot get_snapshot = self.shares_v2_client.get_snapshot(snapshot['id']) self.assertEqual(name, get_snapshot['name']) self.assertEqual(description, get_snapshot['description']) self.assertEqual(snapshot['share_id'], get_snapshot['share_id']) self.assertEqual(snapshot['provider_location'], get_snapshot['provider_location']) # Delete snapshot self.shares_v2_client.delete_snapshot(get_snapshot['id']) self.shares_client.wait_for_resource_deletion( snapshot_id=get_snapshot['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_snapshot, get_snapshot['id'])
def _test_manage(self, snapshot, version=CONF.share.max_api_microversion): name = ("Name for 'managed' snapshot that had ID %s" % snapshot['id']) description = "Description for 'managed' snapshot" # Manage snapshot share_id = snapshot['share_id'] snapshot = self.shares_v2_client.manage_snapshot( share_id, snapshot['provider_location'], name=name, description=description, driver_options={}, version=version, ) # Add managed snapshot to cleanup queue self.method_resources.insert( 0, {'type': 'snapshot', 'id': snapshot['id'], 'client': self.shares_v2_client}) # Wait for success self.shares_v2_client.wait_for_snapshot_status(snapshot['id'], 'available') # Verify manage snapshot API response expected_keys = ["status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size", "provider_location"] if utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) actual_keys = snapshot.keys() # Strict key check self.assertEqual(set(expected_keys), set(actual_keys)) # Verify data of managed snapshot get_snapshot = self.shares_v2_client.get_snapshot(snapshot['id']) self.assertEqual(name, get_snapshot['name']) self.assertEqual(description, get_snapshot['description']) self.assertEqual(snapshot['share_id'], get_snapshot['share_id']) self.assertEqual(snapshot['provider_location'], get_snapshot['provider_location']) # Delete snapshot self.shares_v2_client.delete_snapshot(get_snapshot['id']) self.shares_client.wait_for_resource_deletion( snapshot_id=get_snapshot['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_snapshot, get_snapshot['id'])
def test_get_share_instance(self, version): """Test that we get the proper keys back for the instance.""" utils.check_skip_if_microversion_not_supported(version) share_instances = self.shares_v2_client.get_instances_of_share( self.share['id'], version=version, )['share_instances'] si = self.shares_v2_client.get_share_instance( share_instances[0]['id'], version=version)['share_instance'] expected_keys = [ 'host', 'share_id', 'id', 'share_network_id', 'status', 'availability_zone', 'share_server_id', 'created_at', ] if utils.is_microversion_lt(version, '2.9'): expected_keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.10'): expected_keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): expected_keys.append("replica_state") if utils.is_microversion_ge(version, '2.22'): expected_keys.append("share_type_id") if utils.is_microversion_ge(version, '2.30'): expected_keys.append("cast_rules_to_readonly") if utils.is_microversion_ge(version, '2.54'): expected_keys.append("progress") expected_keys = sorted(expected_keys) actual_keys = sorted(si.keys()) self.assertEqual( expected_keys, actual_keys, 'Share instance %s returned incorrect keys; ' 'expected %s, got %s.' % (si['id'], expected_keys, actual_keys))
def test_share_type_create_show_list_with_is_default_key(self, version): self.skip_if_microversion_not_supported(version) name = data_utils.rand_name("tempest-manila") extra_specs = self.add_extra_specs_to_dict() # Create share type st_create = self.create_share_type(name, extra_specs=extra_specs, version=version)['share_type'] if utils.is_microversion_ge(version, '2.46'): self.assertIn('is_default', st_create) self.assertIs(False, st_create['is_default']) else: self.assertNotIn('is_default', st_create) # list share types st_list = self.shares_v2_client.list_share_types(version=version) for st_get in st_list['share_types']: if utils.is_microversion_ge(version, '2.46'): self.assertIn('is_default', st_get) if st_create['id'] == st_get['id']: self.assertIs(False, st_get['is_default']) else: self.assertTrue(st_get['is_default'] in (True, False)) else: self.assertNotIn('is_default', st_get) # show share types st_id = st_create['id'] st_show = self.shares_v2_client.get_share_type( st_id, version=version)['share_type'] if utils.is_microversion_ge(version, '2.46'): self.assertIn('is_default', st_show) self.assertIs(False, st_show['is_default']) else: self.assertNotIn('is_default', st_show)
def _create_share_for_manage(self): creation_data = { 'share_type_id': self.st['share_type']['id'], 'share_protocol': self.protocol, } share = self.create_share(**creation_data) share = self.shares_v2_client.get_share(share['id']) if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.9"): el = self.shares_v2_client.list_share_export_locations(share["id"]) share["export_locations"] = el return share
def test_share_group_type_create_show_list_with_is_default_key( self, version): self.skip_if_microversion_not_supported(version) name = data_utils.rand_name("tempest-manila") # Create share group type sg_type_c = self.create_share_group_type( name=name, share_types=self.share_type['id'], cleanup_in_class=False, version=version) if utils.is_microversion_ge(version, '2.46'): self.assertIn('is_default', sg_type_c) self.assertIs(False, sg_type_c['is_default']) else: self.assertNotIn('is_default', sg_type_c) # List share group type sg_type_list = self.shares_v2_client.list_share_group_types( version=version) for sg_type_get in sg_type_list: if utils.is_microversion_ge(version, '2.46'): self.assertIn('is_default', sg_type_get) self.assertTrue(sg_type_get['is_default'] in (True, False)) else: self.assertNotIn('is_default', sg_type_get) # Show share group type sg_type_id = sg_type_c['id'] sg_type_show = self.shares_v2_client.get_share_group_type( sg_type_id, version=version) if utils.is_microversion_ge(version, '2.46'): self.assertIn('is_default', sg_type_show) self.assertIs(False, sg_type_show['is_default']) else: self.assertNotIn('is_default', sg_type_show)
def _get_manage_params_from_share(self, share, invalid_params=None): valid_params = { 'service_host': share['host'], 'protocol': share['share_proto'], 'share_type_id': share['share_type'], } if CONF.share.multitenancy_enabled: valid_params['share_server_id'] = share['share_server_id'] if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.9"): el = self.shares_v2_client.list_share_export_locations(share["id"]) valid_params['export_path'] = el[0]['path'] if invalid_params: valid_params.update(invalid_params) return valid_params
def test_share_type_create_get(self, version): utils.check_skip_if_microversion_not_supported(version) name = data_utils.rand_name("tempest-manila") description = None if utils.is_microversion_ge(version, "2.41"): description = "Description for share type" extra_specs = self.add_extra_specs_to_dict({ "key": "value", }) # Create share type st_create = self.create_share_type(name, extra_specs=extra_specs, version=version, description=description) self.assertEqual(name, st_create['name']) self._verify_description(description, st_create, version) self._verify_is_public_key_name(st_create, version) st_id = st_create["id"] # Get share type get = self.shares_v2_client.get_share_type(st_id, version=version) self.assertEqual(name, get["share_type"]["name"]) self.assertEqual(st_id, get["share_type"]["id"]) self._verify_description(description, get['share_type'], version) if utils.is_microversion_lt(version, "2.24"): # snapshot_support is an implied/required extra-spec until # version 2.24, and the service assumes it to be True since we # don't provide it during share type creation. extra_specs.update({"snapshot_support": 'True'}) self.assertEqual(extra_specs, get["share_type"]["extra_specs"]) self._verify_is_public_key_name(get['share_type'], version) # Check that backwards compatibility didn't break self.assertDictMatch(get["volume_type"], get["share_type"])
def test_update_security_service(self): data = self.generate_security_service_data() ss = self.create_security_service(**data) self.assertDictContainsSubset(data, ss) upd_data = self.generate_security_service_data() updated = self.shares_client.update_security_service( ss["id"], **upd_data) get = self.shares_client.get_security_service(ss["id"]) self.assertDictContainsSubset(upd_data, updated) self.assertDictContainsSubset(upd_data, get) if utils.is_microversion_ge(CONF.share.max_api_microversion, '2.44'): # update again with ou upd_data_ou = self.generate_security_service_data(set_ou=True) updated_ou = self.shares_v2_client.update_security_service( ss["id"], **upd_data_ou) get_ou = self.shares_v2_client.get_security_service(ss["id"]) self.assertDictContainsSubset(upd_data_ou, updated_ou) self.assertDictContainsSubset(upd_data_ou, get_ou)
def test_show_quotas_detail(self, microversion, with_user): utils.check_skip_if_microversion_not_supported(microversion) quota_args = { "tenant_id": self.tenant_id, "version": microversion, } keys = [ 'gigabytes', 'snapshot_gigabytes', 'shares', 'snapshots', 'share_networks' ] if utils.is_microversion_ge(microversion, SHARE_REPLICAS_MICROVERSION): keys.append('share_replicas') keys.append('replica_gigabytes') if with_user: quota_args.update({"user_id": self.user_id}) quotas = self.shares_v2_client.detail_quotas(**quota_args)['quota_set'] quota_keys = list(quotas.keys()) for outer in keys: self.assertIn(outer, quota_keys) outer_keys = list(quotas[outer].keys()) for inner in ('in_use', 'limit', 'reserved'): self.assertIn(inner, outer_keys) self.assertGreater(int(quotas[outer][inner]), -2)
def _validate_export_location_api_behavior(self, replica, replica_exports, primary_replica_exports, share_exports, version): share_export_paths = [e['path'] for e in share_exports] # Expectations expected_number_of_exports = len(primary_replica_exports + replica_exports) expected_exports = replica_exports + primary_replica_exports # In and beyond version 2.47, secondary "non-active" replica exports # are not expected to be present in the share export locations. # Secondary replicas can be "active" only in in "writable" # replication. In other types of replication, secondary replicas are # either "in_sync" or "out_of_sync" replica_is_non_active = (replica['replica_state'] != constants.REPLICATION_STATE_ACTIVE) if utils.is_microversion_ge(version, '2.47') and replica_is_non_active: expected_number_of_exports = len(primary_replica_exports) expected_exports = primary_replica_exports # Assertions self.assertEqual(expected_number_of_exports, len(share_exports)) for export in expected_exports: self.assertIn(export['path'], share_export_paths)
def test_list_snapshots_with_detail(self, version): # list share snapshots if version is None: snaps = self.shares_client.list_snapshots_with_detail() else: utils.skip_if_microversion_not_supported(version) snaps = self.shares_v2_client.list_snapshots_with_detail( version=version) # verify keys expected_keys = ["status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size"] if version and utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) # strict key check [self.assertEqual(set(expected_keys), set(s.keys())) for s in snaps] # our share id in list and have no duplicates gen = [sid["id"] for sid in snaps if sid["id"] in self.snap["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg)
def _list_shares_with_detail(self, version): # list shares shares = self.shares_v2_client.list_shares_with_detail( version=six.text_type(version)) # verify keys keys = [ "status", "description", "links", "availability_zone", "created_at", "project_id", "volume_type", "share_proto", "name", "snapshot_id", "id", "size", "share_network_id", "metadata", "snapshot_id", "is_public", "share_type", ] if utils.is_microversion_lt(version, '2.9'): keys.extend(["export_location", "export_locations"]) if utils.is_microversion_ge(version, '2.2'): keys.append("snapshot_support") if utils.is_microversion_ge(version, '2.6'): keys.append("share_type_name") if utils.is_microversion_ge(version, '2.10'): keys.append("access_rules_status") if utils.is_microversion_ge(version, '2.11'): keys.append("replication_type") if utils.is_microversion_ge(version, '2.16'): keys.append("user_id") if utils.is_microversion_ge(version, '2.24'): keys.append("create_share_from_snapshot_support") if utils.is_microversion_ge(version, constants.REVERT_TO_SNAPSHOT_MICROVERSION): keys.append("revert_to_snapshot_support") [self.assertIn(key, sh.keys()) for sh in shares for key in keys] # our shares in list and have no duplicates for share in self.shares: gen = [sid["id"] for sid in shares if sid["id"] in share["id"]] msg = "expected id lists %s times in share list" % (len(gen)) self.assertEqual(1, len(gen), msg)
def _test_manage(self, is_public=False, version=CONF.share.max_api_microversion, check_manage=False): share = self._create_share_for_manage() name = "Name for 'managed' share that had ID %s" % share['id'] description = "Description for 'managed' share" # Unmanage share self._unmanage_share_and_wait(share) if check_manage: # After 'unmanage' operation, share instance should be deleted. # Assert not related to 'manage' test, but placed here for # resource optimization. share_instance_list = self.shares_v2_client.list_share_instances() share_ids = [si['share_id'] for si in share_instance_list] self.assertNotIn(share['id'], share_ids) # Manage share managed_share = self.shares_v2_client.manage_share( service_host=share['host'], export_path=share['export_locations'][0], protocol=share['share_proto'], share_type_id=self.st['share_type']['id'], name=name, description=description, is_public=is_public, version=version, ) # Add managed share to cleanup queue self.method_resources.insert( 0, {'type': 'share', 'id': managed_share['id'], 'client': self.shares_client}) # Wait for success self.shares_v2_client.wait_for_share_status(managed_share['id'], 'available') # Verify data of managed share self.assertEqual(name, managed_share['name']) self.assertEqual(description, managed_share['description']) self.assertEqual(share['host'], managed_share['host']) self.assertEqual(share['share_proto'], managed_share['share_proto']) if utils.is_microversion_ge(version, "2.6"): self.assertEqual(self.st['share_type']['id'], managed_share['share_type']) else: self.assertEqual(self.st['share_type']['name'], managed_share['share_type']) if utils.is_microversion_ge(version, "2.8"): self.assertEqual(is_public, managed_share['is_public']) else: self.assertFalse(managed_share['is_public']) if utils.is_microversion_ge(version, "2.16"): self.assertEqual(share['user_id'], managed_share['user_id']) else: self.assertNotIn('user_id', managed_share) # Delete share self.shares_v2_client.delete_share(managed_share['id']) self.shares_v2_client.wait_for_resource_deletion( share_id=managed_share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, managed_share['id'])
def test_list_access_rules(self, version): if (utils.is_microversion_lt(version, '2.13') and CONF.share.enable_cephx_rules_for_protocols): msg = ("API version %s does not support cephx access type, need " "version >= 2.13." % version) raise self.skipException(msg) # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, self.access_to) else: rule = self.shares_v2_client.create_access_rule(self.share["id"], self.access_type, self.access_to, version=version) # rules must start out in 'new' until 2.28 & 'queued_to_apply' after if utils.is_microversion_le(version, "2.27"): self.assertEqual("new", rule['state']) else: self.assertEqual("queued_to_apply", rule['state']) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # list rules if utils.is_microversion_eq(version, '1.0'): rules = self.shares_client.list_access_rules(self.share["id"]) else: rules = self.shares_v2_client.list_access_rules(self.share["id"], version=version) # verify keys keys = ("id", "access_type", "access_to", "access_level") if utils.is_microversion_ge(version, '2.21'): keys += ("access_key", ) for key in keys: [self.assertIn(key, r.keys()) for r in rules] for key in ('deleted', 'deleted_at', 'instance_mappings'): [self.assertNotIn(key, r.keys()) for r in rules] # verify values self.assertEqual(self.access_type, rules[0]["access_type"]) self.assertEqual(self.access_to, rules[0]["access_to"]) self.assertEqual('rw', rules[0]["access_level"]) if utils.is_microversion_ge(version, '2.21'): if self.access_type == 'cephx': self.assertIsNotNone(rules[0]['access_key']) else: self.assertIsNone(rules[0]['access_key']) # our share id in list and have no duplicates gen = [r["id"] for r in rules if r["id"] in rule["id"]] msg = "expected id lists %s times in rule list" % (len(gen)) self.assertEqual(1, len(gen), msg) if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule(self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version)
def test_list_access_rules(self, version): if (utils.is_microversion_lt(version, '2.13') and CONF.share.enable_cephx_rules_for_protocols): msg = ("API version %s does not support cephx access type, need " "version >= 2.13." % version) raise self.skipException(msg) # create rule if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule( self.share["id"], self.access_type, self.access_to) else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, self.access_to, version=version) if utils.is_microversion_eq(version, '1.0'): self.shares_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") elif utils.is_microversion_eq(version, '2.9'): self.shares_v2_client.wait_for_access_rule_status( self.share["id"], rule["id"], "active") else: self.shares_v2_client.wait_for_share_status( self.share["id"], "active", status_attr='access_rules_status', version=version) # list rules if utils.is_microversion_eq(version, '1.0'): rules = self.shares_client.list_access_rules(self.share["id"]) else: rules = self.shares_v2_client.list_access_rules(self.share["id"], version=version) # verify keys keys = ("id", "access_type", "access_to", "access_level") if utils.is_microversion_ge(version, '2.21'): keys += ("access_key", ) for key in keys: [self.assertIn(key, r.keys()) for r in rules] for key in ('deleted', 'deleted_at', 'instance_mappings'): [self.assertNotIn(key, r.keys()) for r in rules] # verify values self.assertEqual(self.access_type, rules[0]["access_type"]) self.assertEqual(self.access_to, rules[0]["access_to"]) self.assertEqual('rw', rules[0]["access_level"]) if utils.is_microversion_ge(version, '2.21'): if self.access_type == 'cephx': self.assertIsNotNone(rules[0]['access_key']) else: self.assertIsNone(rules[0]['access_key']) # our share id in list and have no duplicates gen = [r["id"] for r in rules if r["id"] in rule["id"]] msg = "expected id lists %s times in rule list" % (len(gen)) self.assertEqual(1, len(gen), msg) if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule( self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version)
def test_list_access_rules(self, version): utils.check_skip_if_microversion_not_supported(version) if (utils.is_microversion_lt(version, '2.13') and CONF.share.enable_cephx_rules_for_protocols): msg = ("API version %s does not support cephx access type, need " "version >= 2.13." % version) raise self.skipException(msg) metadata = None if utils.is_microversion_ge(version, '2.45'): metadata = {'key1': 'v1', 'key2': 'v2'} if utils.is_microversion_le(version, '2.9'): client = self.shares_client else: client = self.shares_v2_client # create rule rule = self.allow_access(self.share["id"], client=client, access_type=self.access_type, access_to=self.access_to, metadata=metadata, version=version) # verify added rule keys since 2.33 when create rule if utils.is_microversion_ge(version, '2.33'): self.assertIn('created_at', list(rule.keys())) self.assertIn('updated_at', list(rule.keys())) else: self.assertNotIn('created_at', list(rule.keys())) self.assertNotIn('updated_at', list(rule.keys())) # rules must start out in 'new' until 2.28 & 'queued_to_apply' after if utils.is_microversion_le(version, "2.27"): self.assertEqual("new", rule['state']) else: self.assertEqual("queued_to_apply", rule['state']) # list rules if utils.is_microversion_eq(version, '1.0'): rules = self.shares_client.list_access_rules( self.share["id"])['access_list'] else: rules = self.shares_v2_client.list_access_rules( self.share["id"], version=version)['access_list'] # verify keys keys = ("id", "access_type", "access_to", "access_level") if utils.is_microversion_ge(version, '2.21'): keys += ("access_key", ) if utils.is_microversion_ge(version, '2.33'): keys += ( "created_at", "updated_at", ) if utils.is_microversion_ge(version, '2.45'): keys += ("metadata", ) for key in keys: [self.assertIn(key, r.keys()) for r in rules] for key in ('deleted', 'deleted_at', 'instance_mappings'): [self.assertNotIn(key, r.keys()) for r in rules] # verify values self.assertEqual(self.access_type, rules[0]["access_type"]) self.assertEqual(self.access_to, rules[0]["access_to"]) self.assertEqual('rw', rules[0]["access_level"]) if utils.is_microversion_ge(version, '2.21'): if self.access_type == 'cephx': self.assertIsNotNone(rules[0]['access_key']) else: self.assertIsNone(rules[0]['access_key']) # our share id in list and have no duplicates gen = [r["id"] for r in rules if r["id"] in rule["id"]] msg = "expected id lists %s times in rule list" % (len(gen)) self.assertEqual(1, len(gen), msg)