def test_list_share_networks_with_detail(self): listed = self.shares_v2_client.list_share_networks_with_detail() any(self.sn_with_ldap_ss["id"] in sn["id"] for sn in listed) # verify keys keys = [ "name", "id", "description", "network_type", "project_id", "cidr", "ip_version", "neutron_net_id", "neutron_subnet_id", "created_at", "updated_at", "segmentation_id", ] # In v2.18 and beyond, we expect gateway. if utils.is_microversion_supported('2.18'): keys.append('gateway') # In v2.20 and beyond, we expect mtu. if utils.is_microversion_supported('2.20'): keys.append('mtu') [self.assertIn(key, sn.keys()) for sn in listed for key in keys]
def test_list_share_networks_with_detail(self): listed = self.shares_v2_client.list_share_networks_with_detail( )['share_networks'] any(self.sn_with_ldap_ss["id"] in sn["id"] for sn in listed) # verify keys keys = [ "name", "id", "description", "network_type", "project_id", "cidr", "ip_version", "neutron_net_id", "neutron_subnet_id", "created_at", "updated_at", "segmentation_id", ] # In v2.18 and beyond, we expect gateway. if utils.is_microversion_supported('2.18'): keys.append('gateway') # In v2.20 and beyond, we expect mtu. if utils.is_microversion_supported('2.20'): keys.append('mtu') # In v2.51 and beyond, share-network does not have # network parameters anymore. if utils.is_microversion_supported('2.51'): subnet_keys = [ "network_type", "cidr", "ip_version", "neutron_net_id", "neutron_subnet_id", "segmentation_id", "gateway", "mtu" ] keys = list(set(keys) - set(subnet_keys)) keys.append('share_network_subnets') for sn in listed: [self.assertIn(key, list(subnet.keys())) for key in subnet_keys for subnet in sn['share_network_subnets']] [self.assertIn(key, sn.keys()) for sn in listed for key in keys]
def test_create_get_delete_share(self): share = self.create_share(self.protocol) detailed_elements = { "name", "id", "availability_zone", "description", "project_id", "host", "created_at", "share_proto", "metadata", "size", "snapshot_id", "share_network_id", "status", "share_type", "volume_type", "links", "is_public", } msg = "At least one expected element missing from share " "response. Expected %(expected)s, got %(actual)s." % { "expected": detailed_elements, "actual": share.keys(), } self.assertTrue(detailed_elements.issubset(share.keys()), msg) self.assertFalse(share["is_public"]) # The 'status' of the share returned by the create API must be # the default value - 'creating'. self.assertEqual("creating", share["status"]) # Get share using v 2.1 - we expect key 'snapshot_support' to be absent share_get = self.shares_v2_client.get_share(share["id"], version="2.1") detailed_elements.add("export_location") self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # Get share using v 2.2 - we expect key 'snapshot_support' to exist share_get = self.shares_v2_client.get_share(share["id"], version="2.2") detailed_elements.add("snapshot_support") self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) if utils.is_microversion_supported("2.9"): # Get share using v 2.9 - key 'export_location' is expected # to be absent share_get = self.shares_v2_client.get_share(share["id"], version="2.9") detailed_elements.remove("export_location") self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # In v 2.11 and beyond, we expect key 'replication_type' in the # share data returned by the share create API. if utils.is_microversion_supported("2.11"): detailed_elements.add("replication_type") self.assertTrue(detailed_elements.issubset(share.keys()), msg) # Delete share self.shares_v2_client.delete_share(share["id"]) self.shares_v2_client.wait_for_resource_deletion(share_id=share["id"]) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, share["id"])
def test_create_get_delete_share(self): share = self.create_share(self.protocol) detailed_elements = {'name', 'id', 'availability_zone', 'description', 'project_id', 'host', 'created_at', 'share_proto', 'metadata', 'size', 'snapshot_id', 'share_network_id', 'status', 'share_type', 'volume_type', 'links', 'is_public'} msg = ( "At least one expected element missing from share " "response. Expected %(expected)s, got %(actual)s." % { "expected": detailed_elements, "actual": share.keys(), } ) self.assertTrue(detailed_elements.issubset(share.keys()), msg) self.assertFalse(share['is_public']) # The 'status' of the share returned by the create API must be # the default value - 'creating'. self.assertEqual('creating', share['status']) # Get share using v 2.1 - we expect key 'snapshot_support' to be absent share_get = self.shares_v2_client.get_share(share['id'], version='2.1') detailed_elements.add('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # Get share using v 2.2 - we expect key 'snapshot_support' to exist share_get = self.shares_v2_client.get_share(share['id'], version='2.2') detailed_elements.add('snapshot_support') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) if utils.is_microversion_supported('2.9'): # Get share using v 2.9 - key 'export_location' is expected # to be absent share_get = self.shares_v2_client.get_share( share['id'], version='2.9') detailed_elements.remove('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # In v 2.11 and beyond, we expect key 'replication_type' in the # share data returned by the share create API. if utils.is_microversion_supported('2.11'): detailed_elements.add('replication_type') self.assertTrue(detailed_elements.issubset(share.keys()), msg) # In v 2.16 and beyond, we add user_id in show/create/manage # share echo. if utils.is_microversion_supported('2.16'): detailed_elements.add('user_id') self.assertTrue(detailed_elements.issubset(share.keys()), msg) # Delete share self.shares_v2_client.delete_share(share['id']) self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, share['id'])
def test_create_get_delete_share(self): share = self.create_share(self.protocol) detailed_elements = { 'name', 'id', 'availability_zone', 'description', 'project_id', 'host', 'created_at', 'share_proto', 'metadata', 'size', 'snapshot_id', 'share_network_id', 'status', 'share_type', 'volume_type', 'links', 'is_public' } msg = ("At least one expected element missing from share " "response. Expected %(expected)s, got %(actual)s." % { "expected": detailed_elements, "actual": share.keys(), }) self.assertTrue(detailed_elements.issubset(share.keys()), msg) self.assertFalse(share['is_public']) # The 'status' of the share returned by the create API must be # set and have value either 'creating' or # 'available' (if share creation is really fast as in # case of Dummy driver). self.assertIn(share['status'], ('creating', 'available')) # Get share using v 2.1 - we expect key 'snapshot_support' to be absent share_get = self.shares_v2_client.get_share(share['id'], version='2.1') detailed_elements.add('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # Get share using v 2.2 - we expect key 'snapshot_support' to exist share_get = self.shares_v2_client.get_share(share['id'], version='2.2') detailed_elements.add('snapshot_support') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) if utils.is_microversion_supported('2.9'): # Get share using v 2.9 - key 'export_location' is expected # to be absent share_get = self.shares_v2_client.get_share(share['id'], version='2.9') detailed_elements.remove('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # In v 2.11 and beyond, we expect key 'replication_type' in the # share data returned by the share create API. if utils.is_microversion_supported('2.11'): detailed_elements.add('replication_type') self.assertTrue(detailed_elements.issubset(share.keys()), msg) # In v 2.16 and beyond, we add user_id in show/create/manage # share echo. if utils.is_microversion_supported('2.16'): detailed_elements.add('user_id') self.assertTrue(detailed_elements.issubset(share.keys()), msg) # Delete share self.shares_v2_client.delete_share(share['id']) self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, share['id'])
def test_reset_tenant_quotas(self): # Get default_quotas default = self.client.default_quotas(self.tenant_id) # Get current quotas custom = self.client.show_quotas(self.tenant_id) # Make quotas for update data = { "shares": int(custom["shares"]) + 2, "snapshots": int(custom["snapshots"]) + 2, "gigabytes": int(custom["gigabytes"]) + 2, "snapshot_gigabytes": int(custom["snapshot_gigabytes"]) + 2, "share_networks": int(custom["share_networks"]) + 2, } if (utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION) and CONF.share.run_share_group_tests): data["share_groups"] = int(custom["share_groups"]) + 2 data["share_group_snapshots"] = ( int(custom["share_group_snapshots"]) + 2) # set new quota updated = self.client.update_quotas(self.tenant_id, **data) self.assertEqual(data["shares"], int(updated["shares"])) self.assertEqual(data["snapshots"], int(updated["snapshots"])) self.assertEqual(data["gigabytes"], int(updated["gigabytes"])) self.assertEqual(data["snapshot_gigabytes"], int(updated["snapshot_gigabytes"])) self.assertEqual(data["share_networks"], int(updated["share_networks"])) if (utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION) and CONF.share.run_share_group_tests): self.assertEqual(data["share_groups"], int(updated["share_groups"])) self.assertEqual(data["share_group_snapshots"], int(updated["share_group_snapshots"])) # Reset customized quotas self.client.reset_quotas(self.tenant_id) # Verify quotas reseted = self.client.show_quotas(self.tenant_id) self.assertEqual(int(default["shares"]), int(reseted["shares"])) self.assertEqual(int(default["snapshots"]), int(reseted["snapshots"])) self.assertEqual(int(default["gigabytes"]), int(reseted["gigabytes"])) self.assertEqual(int(default["snapshot_gigabytes"]), int(reseted["snapshot_gigabytes"])) self.assertEqual(int(default["share_networks"]), int(reseted["share_networks"])) if (utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION) and CONF.share.run_share_group_tests): self.assertEqual(int(default["share_groups"]), int(reseted["share_groups"])) self.assertEqual(int(default["share_group_snapshots"]), int(reseted["share_group_snapshots"]))
def test_create_delete_snapshot(self): # create snapshot snap = self.create_snapshot_wait_for_active(self.share["id"]) detailed_elements = { 'name', 'id', 'description', 'created_at', 'share_proto', 'size', 'share_size', 'share_id', 'status', 'links' } msg = ("At least one expected element missing from share " "response. Expected %(expected)s, got %(actual)s." % { "expected": detailed_elements, "actual": snap.keys(), }) self.assertTrue(detailed_elements.issubset(snap.keys()), msg) # In v2.17 and beyond, we expect user_id and project_id keys if utils.is_microversion_supported('2.17'): detailed_elements.update({'user_id', 'project_id'}) self.assertTrue(detailed_elements.issubset(snap.keys()), msg) else: self.assertNotIn('user_id', detailed_elements) self.assertNotIn('project_id', detailed_elements) # delete snapshot self.shares_client.delete_snapshot(snap["id"]) self.shares_client.wait_for_resource_deletion(snapshot_id=snap["id"]) self.assertRaises(lib_exc.NotFound, self.shares_client.get_snapshot, snap['id'])
def test_create_delete_snapshot(self): # create snapshot snap = self.create_snapshot_wait_for_active(self.share["id"]) detailed_elements = {'name', 'id', 'description', 'created_at', 'share_proto', 'size', 'share_size', 'share_id', 'status', 'links'} msg = ( "At least one expected element missing from share " "response. Expected %(expected)s, got %(actual)s." % { "expected": detailed_elements, "actual": snap.keys(), } ) self.assertTrue(detailed_elements.issubset(snap.keys()), msg) # In v2.17 and beyond, we expect user_id and project_id keys if utils.is_microversion_supported('2.17'): detailed_elements.update({'user_id', 'project_id'}) self.assertTrue(detailed_elements.issubset(snap.keys()), msg) else: self.assertNotIn('user_id', detailed_elements) self.assertNotIn('project_id', detailed_elements) # delete snapshot self.shares_client.delete_snapshot(snap["id"]) self.shares_client.wait_for_resource_deletion(snapshot_id=snap["id"]) self.assertRaises(lib_exc.NotFound, self.shares_client.get_snapshot, snap['id'])
def test_show_quotas(self): quotas = self.shares_v2_client.show_quotas(self.tenant_id) self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) if utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION): self.assertGreater(int(quotas["share_groups"]), -2) self.assertGreater(int(quotas["share_group_snapshots"]), -2)
def test_default_quotas(self): quotas = self.client.default_quotas(self.tenant_id)['quota_set'] self.assertGreater(int(quotas["gigabytes"]), -2) self.assertGreater(int(quotas["snapshot_gigabytes"]), -2) self.assertGreater(int(quotas["shares"]), -2) self.assertGreater(int(quotas["snapshots"]), -2) self.assertGreater(int(quotas["share_networks"]), -2) if utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION): self.assertGreater(int(quotas["share_groups"]), -2) self.assertGreater(int(quotas["share_group_snapshots"]), -2) if utils.share_replica_quotas_are_supported(): self.assertGreater(int(quotas["share_replicas"]), -2) self.assertGreater(int(quotas["replica_gigabytes"]), -2)
def test_gateway_mtu_neutron_net_id_with_neutron(self): self.create_share(share_type_id=self.share_type_id, cleanup_in_class=False) share_net_details = self.shares_v2_client.get_share_network( self.shares_v2_client.share_network_id)['share_network'] share_net_info = ( utils.share_network_get_default_subnet(share_net_details) if utils.share_network_subnets_are_supported() else share_net_details) if utils.is_microversion_supported('2.18'): subnet_details = self.subnets_client.show_subnet( share_net_info['neutron_subnet_id']) self.assertEqual(subnet_details['subnet']['gateway_ip'], share_net_info['gateway']) if utils.is_microversion_supported('2.20'): network_details = self.networks_client.show_network( share_net_info['neutron_net_id']) self.assertEqual(network_details['network']['mtu'], share_net_info['mtu']) self.assertEqual(network_details['network']['id'], share_net_info['neutron_net_id'])
def test_create_get_delete_share(self): share = self.create_share(self.protocol) detailed_elements = {'name', 'id', 'availability_zone', 'description', 'project_id', 'host', 'created_at', 'share_proto', 'metadata', 'size', 'snapshot_id', 'share_network_id', 'status', 'share_type', 'volume_type', 'links', 'is_public'} msg = ( "At least one expected element missing from share " "response. Expected %(expected)s, got %(actual)s." % { "expected": detailed_elements, "actual": share.keys(), } ) self.assertTrue(detailed_elements.issubset(share.keys()), msg) self.assertFalse(share['is_public']) # Get share using v 2.1 - we expect key 'snapshot_support' to be absent share_get = self.shares_v2_client.get_share(share['id'], version='2.1') detailed_elements.add('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # Get share using v 2.2 - we expect key 'snapshot_support' to exist share_get = self.shares_v2_client.get_share(share['id'], version='2.2') detailed_elements.add('snapshot_support') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) if utils.is_microversion_supported('2.9'): # Get share using v 2.9 - key 'export_location' is expected # to be absent share_get = self.shares_v2_client.get_share( share['id'], version='2.9') detailed_elements.remove('export_location') self.assertTrue(detailed_elements.issubset(share_get.keys()), msg) # Delete share self.shares_v2_client.delete_share(share['id']) self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_share, share['id'])
def test_migration_files(self): if self.protocol == "CIFS": raise self.skipException("Test for CIFS protocol not supported " "at this moment. Skipping.") if not CONF.share.run_migration_tests: raise self.skipException("Migration tests disabled. Skipping.") pools = self.shares_admin_client.list_pools()['pools'] if len(pools) < 2: raise self.skipException("At least two different pool entries " "are needed to run migration tests. " "Skipping.") self.security_group = self._create_security_group() self.create_share() share = self.shares_client.get_share(self.share['id']) dest_pool = next((x for x in pools if x['name'] != share['host']), None) self.assertIsNotNone(dest_pool) self.assertIsNotNone(dest_pool.get('name')) dest_pool = dest_pool['name'] instance1 = self.boot_instance() self.allow_access_ip(self.share['id'], instance=instance1, cleanup=False) ssh_client = self.init_ssh(instance1) # TODO(vponomaryov): use separate API for getting export location for # share when "v2" client is used. first_location = self.share['export_locations'][0] self.mount_share(first_location, ssh_client) ssh_client.exec_command("mkdir -p /mnt/f1") ssh_client.exec_command("mkdir -p /mnt/f2") ssh_client.exec_command("mkdir -p /mnt/f3") ssh_client.exec_command("mkdir -p /mnt/f4") ssh_client.exec_command("mkdir -p /mnt/f1/ff1") ssh_client.exec_command("sleep 1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M" " count=1") ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M" " count=1") ssh_client.exec_command("chmod -R 555 /mnt/f3") ssh_client.exec_command("chmod -R 777 /mnt/f4") self.umount_share(ssh_client) share = self.migrate_share(share['id'], dest_pool) if utils.is_microversion_supported("2.9"): second_location = ( self.shares_v2_client.list_share_export_locations( share['id'])[0]['path']) else: # NOTE(vponomaryov): following approach is valid for picking up # export location only using microversions lower than '2.9'. second_location = share['export_locations'][0] self.assertEqual(dest_pool, share['host']) self.assertNotEqual(first_location, second_location) self.assertEqual('migration_success', share['task_state']) self.mount_share(second_location, ssh_client) output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt") self.umount_share(ssh_client) self.assertTrue('1m1.bin' in output) self.assertTrue('1m2.bin' in output) self.assertTrue('1m3.bin' in output) self.assertTrue('1m4.bin' in output) self.assertTrue('1m5.bin' in output)
def test_reset_tenant_quotas(self): # Get default_quotas default = self.client.default_quotas(self.tenant_id)['quota_set'] # Get current quotas custom = self.client.show_quotas(self.tenant_id)['quota_set'] # Make quotas for update data = { "shares": int(custom["shares"]) + 2, "snapshots": int(custom["snapshots"]) + 2, "gigabytes": int(custom["gigabytes"]) + 2, "snapshot_gigabytes": int(custom["snapshot_gigabytes"]) + 2, "share_networks": int(custom["share_networks"]) + 2, } if (utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION) and CONF.share.run_share_group_tests): data["share_groups"] = int(custom["share_groups"]) + 2 data["share_group_snapshots"] = ( int(custom["share_group_snapshots"]) + 2) if utils.share_replica_quotas_are_supported(): data["share_replicas"] = int(custom["share_replicas"]) + 2 data["replica_gigabytes"] = int(custom["replica_gigabytes"]) + 2 # set new quota, turn off cleanup - we'll do it right below updated = self.update_quotas(self.tenant_id, cleanup=False, **data) self.assertEqual(data["shares"], int(updated["shares"])) self.assertEqual(data["snapshots"], int(updated["snapshots"])) self.assertEqual(data["gigabytes"], int(updated["gigabytes"])) self.assertEqual( data["snapshot_gigabytes"], int(updated["snapshot_gigabytes"])) self.assertEqual( data["share_networks"], int(updated["share_networks"])) if (utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION) and CONF.share.run_share_group_tests): self.assertEqual( data["share_groups"], int(updated["share_groups"])) self.assertEqual( data["share_group_snapshots"], int(updated["share_group_snapshots"])) if utils.share_replica_quotas_are_supported(): self.assertEqual( data["share_replicas"], int(updated["share_replicas"])) self.assertEqual( data["replica_gigabytes"], int(updated["replica_gigabytes"])) # Reset customized quotas self.client.reset_quotas(self.tenant_id) # Verify quotas reseted = self.client.show_quotas(self.tenant_id)['quota_set'] self.assertEqual(int(default["shares"]), int(reseted["shares"])) self.assertEqual(int(default["snapshots"]), int(reseted["snapshots"])) self.assertEqual(int(default["gigabytes"]), int(reseted["gigabytes"])) self.assertEqual( int(default["snapshot_gigabytes"]), int(reseted["snapshot_gigabytes"])) self.assertEqual( int(default["share_networks"]), int(reseted["share_networks"])) if (utils.is_microversion_supported(SHARE_GROUPS_MICROVERSION) and CONF.share.run_share_group_tests): self.assertEqual( int(default["share_groups"]), int(reseted["share_groups"])) self.assertEqual( int(default["share_group_snapshots"]), int(reseted["share_group_snapshots"])) if utils.share_replica_quotas_are_supported(): self.assertEqual( int(default["share_replicas"]), int(reseted["share_replicas"])) self.assertEqual( int(default["replica_gigabytes"]), int(reseted["replica_gigabytes"]))
def skip_if_microversion_not_supported(self, microversion): if not utils.is_microversion_supported(microversion): raise self.skipException("Microversion '%s' is not supported." % microversion)
def _validate_migration_successful(self, dest_pool, share, status_to_wait, version=CONF.share.max_api_microversion, complete=True, share_network_id=None, share_type_id=None): statuses = ((status_to_wait, ) if not isinstance(status_to_wait, (tuple, list, set)) else status_to_wait) new_exports = self.shares_v2_client.list_share_export_locations( share['id'], version=version)['export_locations'] self.assertNotEmpty(new_exports) new_exports = [ x['path'] for x in new_exports if x['is_admin_only'] is False ] self.assertNotEmpty(new_exports) self.assertIn(share['task_state'], statuses) if share_network_id: self.assertEqual(share_network_id, share['share_network_id']) if share_type_id: self.assertEqual(share_type_id, share['share_type']) # Share migrated if complete: self.assertEqual(dest_pool, share['host']) rules = self.shares_v2_client.list_access_rules( share['id'])['access_list'] expected_rules = [{ 'state': constants.RULE_STATE_ACTIVE, 'access_to': '50.50.50.50', 'access_type': 'ip', 'access_level': 'rw', }, { 'state': constants.RULE_STATE_ACTIVE, 'access_to': '51.51.51.51', 'access_type': 'ip', 'access_level': 'ro', }] filtered_rules = [{ 'state': rule['state'], 'access_to': rule['access_to'], 'access_level': rule['access_level'], 'access_type': rule['access_type'] } for rule in rules] for r in expected_rules: self.assertIn(r, filtered_rules) self.assertEqual(len(expected_rules), len(filtered_rules)) # In v 2.54 and beyond, we expect key 'progress' in the destination # share data if utils.is_microversion_supported('2.54'): self.assertEqual('100%', share['progress']) # Share not migrated yet else: self.assertNotEqual(dest_pool, share['host'])
def skip_if_microversion_not_supported(self, microversion): if not utils.is_microversion_supported(microversion): raise self.skipException( "Microversion '%s' is not supported." % microversion)
def test_manage_share_server(self, add_subnet_field): # Starting from v2.51 share network spans to multiple subnets. if add_subnet_field and not utils.is_microversion_supported('2.51'): msg = ("Manage share server with share network subnet is " "supported starting from microversion '2.51'.") raise self.skipException(msg) check_multiple_subnet = utils.is_microversion_ge( CONF.share.max_api_microversion, '2.70') if check_multiple_subnet: network_subnet = 'share_network_subnet_ids' else: network_subnet = 'share_network_subnet_id' # create a new share network to make sure that a new share server # will be created original_share_network = self.shares_v2_client.get_share_network( self.shares_v2_client.share_network_id)['share_network'] share_net_info = ( utils.share_network_get_default_subnet(original_share_network) if utils.share_network_subnets_are_supported() else original_share_network) share_network = self.create_share_network( neutron_net_id=share_net_info['neutron_net_id'], neutron_subnet_id=share_net_info['neutron_subnet_id'], cleanup_in_class=True) az = params = None if add_subnet_field: # Get a compatible availability zone az = self.get_availability_zones_matching_share_type( self.share_type)[0] az_subnet = self.shares_v2_client.create_subnet( share_network['id'], neutron_net_id=share_network['neutron_net_id'], neutron_subnet_id=share_network['neutron_subnet_id'], availability_zone=az)['share_network_subnet'] params = {network_subnet: az_subnet['id']} # create share share = self.create_share(share_type_id=self.share_type['id'], share_network_id=share_network['id'], availability_zone=az) share = self.shares_v2_client.get_share(share['id'])['share'] el = self.shares_v2_client.list_share_export_locations( share['id'])['export_locations'] share['export_locations'] = el share_server = self.shares_v2_client.show_share_server( share['share_server_id'])['share_server'] keys = [ "id", "host", "project_id", "status", "share_network_name", "created_at", "updated_at", "backend_details", "is_auto_deletable", "identifier", ] if add_subnet_field: keys.append(network_subnet) # all expected keys are present for key in keys: self.assertIn(key, share_server) # check that the share server is initially auto-deletable self.assertIs(True, share_server["is_auto_deletable"]) self.assertIsNotNone(share_server["identifier"]) if add_subnet_field and check_multiple_subnet: self.assertIn(az_subnet["id"], share_server[network_subnet]) elif add_subnet_field and not check_multiple_subnet: self.assertEqual(az_subnet["id"], share_server[network_subnet]) self._unmanage_share_and_wait(share) # Starting from microversion 2.49, any share server that has ever had # an unmanaged share will never be auto-deleted. share_server = self.shares_v2_client.show_share_server( share_server['id'])['share_server'] self.assertIs(False, share_server['is_auto_deletable']) # unmanage share server and manage it again self._unmanage_share_server_and_wait(share_server) managed_share_server = self._manage_share_server(share_server, fields=params) managed_share = self._manage_share( share, name="managed share that had ID %s" % share['id'], description="description for managed share", share_server_id=managed_share_server['id']) # check managed share server managed_share_server = self.shares_v2_client.show_share_server( managed_share_server['id'])['share_server'] # all expected keys are present in the managed share server for key in keys: self.assertIn(key, managed_share_server) # check that managed share server is used by the managed share self.assertEqual(managed_share['share_server_id'], managed_share_server['id']) # check that the managed share server is still not auto-deletable self.assertIs(False, managed_share_server["is_auto_deletable"]) # delete share self._delete_share_and_wait(managed_share) # delete share server self._delete_share_server_and_wait(managed_share_server['id']) if add_subnet_field: # delete the created subnet self.shares_v2_client.delete_subnet(share_network['id'], az_subnet['id'])