def test_get_share_with_share_type(self): # Data share_name = data_utils.rand_name("share") shr_type_name = data_utils.rand_name("share-type") extra_specs = self.add_extra_specs_to_dict({ "storage_protocol": CONF.share.capability_storage_protocol, }) # Create share type st_create = self.create_share_type(shr_type_name, extra_specs=extra_specs) # Create share with share type share = self.create_share(name=share_name, share_type_id=st_create["id"]) self.assertEqual(share["name"], share_name) waiters.wait_for_resource_status(self.shares_client, share["id"], "available") # Verify share info get = self.shares_v2_client.get_share(share["id"], version="2.5")['share'] self.assertEqual(share_name, get["name"]) self.assertEqual(share["id"], get["id"]) self.assertEqual(shr_type_name, get["share_type"]) get = self.shares_v2_client.get_share(share["id"], version="2.6")['share'] self.assertEqual(st_create["id"], get["share_type"]) self.assertEqual(shr_type_name, get["share_type_name"])
def resource_setup(cls): super(AccessRulesMetadataTest, cls).resource_setup() cls.protocol = cls.shares_v2_client.share_protocol cls.access_type, __ = cls._get_access_rule_data_from_config() int_range = range(20, 50) cls.access_to = { # list of unique values is required for ability to create lots # of access rules for one share using different API microversions. 'ip': set([utils.rand_ipv6_ip() for i in int_range]), # following users are fakes and access rules that use it are # expected to fail, but they are used only for API testing. 'user': ['foo_user_%d' % i for i in int_range], 'cert': ['tenant_%d.example.com' % i for i in int_range], 'cephx': ['eve%d' % i for i in int_range], } # create share type cls.share_type = cls.create_share_type() cls.share_type_id = cls.share_type['id'] # create share cls.share = cls.create_share(share_type_id=cls.share_type_id) cls.md1 = {"key1": "value1", "key2": "value2"} cls.access = cls.shares_v2_client.create_access_rule( cls.share["id"], cls.access_type, cls.access_to[cls.access_type].pop(), 'rw', metadata=cls.md1)['access'] waiters.wait_for_resource_status(cls.shares_v2_client, cls.share["id"], "active", resource_name='access_rule', rule_id=cls.access["id"])
def test_manage_invalid_provider_location(self): # Manage a snapshot with wrong provider location fails # Create snapshot snap = self.create_snapshot_wait_for_active(self.share['id']) snap = self.shares_v2_client.get_snapshot(snap['id'])['snapshot'] # Unmanage snapshot self.shares_v2_client.unmanage_snapshot(snap['id']) self.shares_client.wait_for_resource_deletion(snapshot_id=snap['id']) # Manage snapshot with invalid provider location leaves it in # manage_error state invalid_snap = self.shares_v2_client.manage_snapshot( self.share['id'], 'invalid_provider_location', driver_options={})['snapshot'] waiters.wait_for_resource_status(self.shares_v2_client, invalid_snap['id'], constants.STATUS_MANAGE_ERROR, resource_name='snapshot') self.shares_v2_client.unmanage_snapshot(invalid_snap['id']) # Manage it properly and delete managed_snap = self.shares_v2_client.manage_snapshot( self.share['id'], snap['provider_location'])['snapshot'] waiters.wait_for_resource_status(self.shares_v2_client, managed_snap['id'], constants.STATUS_AVAILABLE, resource_name='snapshot') self._delete_snapshot_and_wait(managed_snap)
def test_snapshot_after_share_replica(self): """Test the snapshot for replicated share. Create replica first and then create a snapshot. Verify that the snapshot is properly created under replica by creating a share from that snapshot. """ share = self.create_share(share_type_id=self.share_type_id, availability_zone=self.share_zone, share_network_id=self.sn_id) original_replica = self.shares_v2_client.list_share_replicas( share["id"])['share_replicas'][0] share_replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) self.addCleanup(self.delete_share_replica, original_replica['id']) waiters.wait_for_resource_status(self.shares_v2_client, share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica', status_attr='replica_state') snapshot = self.create_snapshot_wait_for_active(share["id"]) self.promote_share_replica(share_replica['id']) self.delete_share_replica(original_replica['id']) snapshot = self.shares_v2_client.get_snapshot( snapshot['id'])['snapshot'] self.assertEqual(constants.STATUS_AVAILABLE, snapshot['status']) if CONF.share.capability_create_share_from_snapshot_support: self.create_share(share_type_id=self.share_type_id, snapshot_id=snapshot['id'], share_network_id=self.sn_id)
def test_create_replica_from_snapshot_share(self): """Test replica for a share that was created from snapshot.""" share = self.create_share(share_type_id=self.share_type_id, availability_zone=self.share_zone, share_network_id=self.sn_id) orig_snapshot = self.create_snapshot_wait_for_active(share["id"]) snap_share = self.create_share(share_type_id=self.share_type_id, snapshot_id=orig_snapshot['id'], share_network_id=self.sn_id) original_replica = self.shares_v2_client.list_share_replicas( snap_share["id"])['share_replicas'][0] share_replica = self.create_share_replica(snap_share["id"], self.replica_zone, cleanup=False) self.addCleanup(self.delete_share_replica, original_replica['id']) waiters.wait_for_resource_status(self.shares_v2_client, share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica', status_attr='replica_state') self.promote_share_replica(share_replica['id']) # Delete the demoted replica so promoted replica can be cleaned # during the cleanup self.delete_share_replica(original_replica['id'])
def test_create_extend_and_write(self): default_share_size = CONF.share.share_size LOG.debug('Step 1 - create instance') instance = self.boot_instance(wait_until="BUILD") LOG.debug( 'Step 2 - create share of size {} Gb'.format(default_share_size)) share = self.create_share(size=default_share_size) LOG.debug('Step 3 - wait for active instance') instance = self.wait_for_active_instance(instance["id"]) remote_client = self.init_remote_client(instance) LOG.debug('Step 4 - grant access') location = self.get_user_export_locations(share)[0] self.allow_access(share=share, instance=instance, remote_client=remote_client, locations=location) LOG.debug('Step 5 - mount') self.mount_share(location, remote_client) total_blocks = (units.Ki * default_share_size) / 64 three_quarter_blocks = (total_blocks / 4) * 3 LOG.debug( 'Step 6 - writing {} * 64MB blocks'.format(three_quarter_blocks)) self.write_data_to_mounted_share_using_dd(remote_client, '/mnt/t1', '64M', three_quarter_blocks, '/dev/urandom') ls_result = remote_client.exec_command("sudo ls -lAh /mnt/") LOG.debug(ls_result) over_one_quarter_blocks = total_blocks - three_quarter_blocks + 5 LOG.debug('Step 6b - Write more data, should fail') self.assertRaises(exceptions.SSHExecCommandFailed, self.write_data_to_mounted_share_using_dd, remote_client, '/mnt/t2', '64M', over_one_quarter_blocks, '/dev/urandom') ls_result = remote_client.exec_command("sudo ls -lAh /mnt/") LOG.debug(ls_result) LOG.debug('Step 7 - extend and wait') extended_share_size = default_share_size + 1 self.shares_v2_client.extend_share(share["id"], new_size=extended_share_size) waiters.wait_for_resource_status(self.shares_v2_client, share["id"], constants.STATUS_AVAILABLE) share = self.shares_v2_client.get_share(share["id"])['share'] self.assertEqual(extended_share_size, int(share["size"])) LOG.debug('Step 8 - writing more data, should succeed') self.write_data_with_remount(location, remote_client, '/mnt/t3', '64M', over_one_quarter_blocks) ls_result = remote_client.exec_command("sudo ls -lAh /mnt/") LOG.debug(ls_result) LOG.debug('Step 9 - unmount') self.unmount_share(remote_client)
def _allow_access_snapshot(self, snapshot_id, access_type="ip", access_to="0.0.0.0/0", cleanup=True, client=None): """Allow snapshot access :param snapshot_id: id of the snapshot :param access_type: "ip", "user" or "cert" :param access_to :param client: shares client, normal/admin :returns: access object """ client = client or self.shares_v2_client access = client.create_snapshot_access_rule( snapshot_id, access_type, access_to)['snapshot_access'] if cleanup: self.addCleanup(client.delete_snapshot_access_rule, snapshot_id, access['id']) share_waiters.wait_for_resource_status( client, snapshot_id, 'active', resource_name='snapshot_access', rule_id=access['id'], status_attr='state') return access
def test_can_apply_new_cephx_rules_when_one_is_in_error_state(self): # Create share on "primary" tenant share_primary = self.create_share() # Add access rule to "Joe" by "primary" user self.allow_access(share_primary['id'], access_to='Joe') # Create share on "alt" tenant share_alt = self.create_share(client=self.alt_shares_v2_client) # Add access rule to "Joe" by "alt" user. # Rule must be set to "error" status. rule1 = self.allow_access(share_alt['id'], client=self.alt_shares_v2_client, access_to='Joe', status='error', raise_rule_in_error_state=False, cleanup=False) # Share's "access_rules_status" must be in "error" status share_alt_updated = self.alt_shares_v2_client.get_share( share_alt['id'])['share'] self.assertEqual('error', share_alt_updated['access_rules_status']) # Add second access rule to different client by "alt" user. self.allow_access(share_alt['id'], client=self.alt_shares_v2_client) # Check share's access_rules_status has transitioned to "active" status self.alt_shares_v2_client.delete_access_rule(share_alt['id'], rule1['id']) waiters.wait_for_resource_status(self.alt_shares_v2_client, share_alt['id'], 'active', status_attr='access_rules_status')
def test_different_users_in_same_tenant_can_use_same_cephx_id(self): # Grant access to the share access1 = self.shares_v2_client.create_access_rule( self.share['id'], self.access_type, self.access_to, 'rw')['access'] waiters.wait_for_resource_status(self.shares_v2_client, self.share["id"], "active", resource_name='access_rule', rule_id=access1["id"]) # Create a new user in the current project project = self.os_admin.projects_client.show_project( self.shares_v2_client.tenant_id)['project'] user_client = self.create_user_and_get_client(project) # Create second share by the new user share2 = self.create_share(client=user_client.shares_v2_client, share_protocol=self.protocol, share_type_id=self.share_type_id) # Grant access to the second share using the same cephx ID that was # used in access1 access2 = user_client.shares_v2_client.create_access_rule( share2['id'], self.access_type, self.access_to, 'rw')['access'] waiters.wait_for_resource_status(user_client.shares_v2_client, share2['id'], "active", resource_name='access_rule', rule_id=access2['id'])
def _test_manage(self, snapshot, version=CONF.share.max_api_microversion): name = ("Name for 'managed' snapshot that had ID %s" % snapshot['id']) description = "Description for 'managed' snapshot" utils.skip_if_manage_not_supported_for_version(version) # Manage snapshot share_id = snapshot['share_id'] snapshot = self.shares_v2_client.manage_snapshot( share_id, snapshot['provider_location'], name=name, description=description, # Some drivers require additional parameters passed as driver # options, as follows: # - size: Hitachi HNAS Driver driver_options={'size': snapshot['size']}, version=version, )['snapshot'] # Add managed snapshot to cleanup queue self.method_resources.insert( 0, { 'type': 'snapshot', 'id': snapshot['id'], 'client': self.shares_v2_client }) # Wait for success waiters.wait_for_resource_status(self.shares_v2_client, snapshot['id'], constants.STATUS_AVAILABLE, resource_name='snapshot') # Verify manage snapshot API response expected_keys = [ "status", "links", "share_id", "name", "share_proto", "created_at", "description", "id", "share_size", "size", "provider_location" ] if utils.is_microversion_ge(version, '2.17'): expected_keys.extend(["user_id", "project_id"]) actual_keys = snapshot.keys() # Strict key check self.assertEqual(set(expected_keys), set(actual_keys)) # Verify data of managed snapshot get_snapshot = self.shares_v2_client.get_snapshot( snapshot['id'])['snapshot'] self.assertEqual(name, get_snapshot['name']) self.assertEqual(description, get_snapshot['description']) self.assertEqual(snapshot['share_id'], get_snapshot['share_id']) # Delete snapshot self.shares_v2_client.delete_snapshot(get_snapshot['id']) self.shares_client.wait_for_resource_deletion( snapshot_id=get_snapshot['id']) self.assertRaises(lib_exc.NotFound, self.shares_v2_client.get_snapshot, get_snapshot['id'])
def test_create_duplicate_single_host_rules(self, access_to): """Test rules for individual clients with and without max-prefix.""" if ':' in access_to and utils.is_microversion_lt( CONF.share.max_api_microversion, '2.38'): reason = ("Skipped. IPv6 rules are accepted from and beyond " "API version 2.38, the configured maximum API version " "is %s" % CONF.share.max_api_microversion) raise self.skipException(reason) rule = self.shares_v2_client.create_access_rule( self.share["id"], "ip", access_to)['access'] self.addCleanup(self.shares_v2_client.delete_access_rule, self.share["id"], rule['id']) waiters.wait_for_resource_status(self.shares_v2_client, self.share["id"], "active", status_attr='access_rules_status') self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.create_access_rule, self.share["id"], "ip", access_to) if '/' in access_to: access_to = access_to.split("/")[0] else: access_to = ('%s/32' % access_to if '.' in access_to else '%s/128' % access_to) self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.create_access_rule, self.share["id"], "ip", access_to)
def test_delete_share_in_manage_error(self): share = self._create_share_for_manage() valid_params = self._get_manage_params_from_share(share) # forge bad param to have a share in manage_error state invalid_params = valid_params.copy() invalid_params.update( {'export_path': data_utils.rand_name(name='invalid-share-export')}) invalid_share = self.shares_v2_client.manage_share( **invalid_params)['share'] waiters.wait_for_resource_status(self.shares_v2_client, invalid_share['id'], constants.STATUS_MANAGE_ERROR) self._unmanage_share_and_wait(share) # the attempt to delete a share in manage_error should raise an # exception self.assertRaises(lib_exc.Forbidden, self.shares_v2_client.delete_share, invalid_share['id']) # cleanup self.shares_v2_client.unmanage_share(invalid_share['id']) managed_share = self._manage_share_and_wait(valid_params) self._delete_share_and_wait(managed_share) # Delete share server, since it can't be "auto-deleted" if (CONF.share.multitenancy_enabled and not CONF.share.share_network_id): # For a pre-configured share_network_id, we don't # delete the share server. self._delete_share_server_and_wait( managed_share['share_server_id'])
def _verify_in_sync_replica_promotion(self, share, original_replica): # Verify that 'in-sync' replica has been promoted successfully # NOTE(Yogi1): Cleanup needs to be disabled for replica that is # being promoted since it will become the 'primary'/'active' replica. replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) # Wait for replica state to update after creation waiters.wait_for_resource_status(self.shares_v2_client, replica['id'], constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica', status_attr='replica_state') # Promote the first in_sync replica to active state promoted_replica = self.promote_share_replica(replica['id']) # Delete the demoted replica so promoted replica can be cleaned # during the cleanup of the share. self.addCleanup(self.delete_share_replica, original_replica['id']) self._verify_active_replica_count(share["id"]) # Verify the replica_state for promoted replica promoted_replica = self.shares_v2_client.get_share_replica( promoted_replica["id"])['share_replica'] self.assertEqual(constants.REPLICATION_STATE_ACTIVE, promoted_replica["replica_state"])
def test_extend_replicated_share(self): # Test extend share new_size = self.share["size"] + 1 self.admin_client.extend_share(self.share["id"], new_size) waiters.wait_for_resource_status(self.admin_client, self.share["id"], "available") share = self.admin_client.get_share(self.share["id"])['share'] self.assertEqual(new_size, int(share["size"]))
def test_shrink_replicated_share(self): share = self.admin_client.get_share(self.share["id"])['share'] new_size = self.share["size"] - 1 self.admin_client.shrink_share(self.share["id"], new_size) waiters.wait_for_resource_status(self.admin_client, share["id"], "available") shrink_share = self.admin_client.get_share(self.share["id"])['share'] self.assertEqual(new_size, int(shrink_share["size"]))
def _reset_resource_available(self, resource_id, resource_type="shares"): self.shares_v2_client.reset_state(resource_id, s_type=resource_type, status="available") waiters.wait_for_resource_status(self.shares_v2_client, resource_id, "available", resource_name=resource_type[:-1])
def test_reset_share_task_state(self): for task_state in self.task_states: self.shares_v2_client.reset_task_state(self.share["id"], task_state) waiters.wait_for_resource_status(self.shares_v2_client, self.share["id"], task_state, status_attr='task_state')
def _manage_share_and_wait(self, params, state=constants.STATUS_AVAILABLE): # Manage the share and wait for the expected state. # Return the managed share object. managed_share = self.shares_v2_client.manage_share(**params)['share'] waiters.wait_for_resource_status(self.shares_v2_client, managed_share['id'], state) return managed_share
def _create_delete_ro_access_rule(self, version): """Common test case for usage in test suites with different decorators. :param self: instance of test class """ if utils.is_microversion_eq(version, '1.0'): rule = self.shares_client.create_access_rule(self.share["id"], self.access_type, self.access_to, 'ro')['access'] else: rule = self.shares_v2_client.create_access_rule( self.share["id"], self.access_type, self.access_to, 'ro', version=version)['access'] self.assertEqual('ro', rule['access_level']) for key in ('deleted', 'deleted_at', 'instance_mappings'): self.assertNotIn(key, rule.keys()) # rules must start out in 'new' until 2.28 & 'queued_to_apply' after 2.28 if utils.is_microversion_le(version, "2.27"): self.assertEqual("new", rule['state']) else: self.assertEqual("queued_to_apply", rule['state']) if utils.is_microversion_le(version, '2.9'): waiters.wait_for_resource_status(self.shares_client, self.share["id"], "active", resource_name='access_rule', rule_id=rule["id"]) else: waiters.wait_for_resource_status(self.shares_v2_client, self.share["id"], "active", status_attr='access_rules_status', version=version) # If the 'access_rules_status' transitions to 'active', # rule state must too rules = self.shares_v2_client.list_access_rules( self.share['id'])['access_list'] rule = [r for r in rules if r['id'] == rule['id']][0] self.assertEqual("active", rule['state']) if utils.is_microversion_eq(version, '1.0'): self.shares_client.delete_access_rule(self.share["id"], rule["id"]) self.shares_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id']) else: self.shares_v2_client.delete_access_rule(self.share["id"], rule["id"], version=version) self.shares_v2_client.wait_for_resource_deletion( rule_id=rule["id"], share_id=self.share['id'], version=version)
def _create_snapshot(self, share_id, client=None, **kwargs): client = client or self.shares_v2_client snapshot = client.create_snapshot(share_id, **kwargs)['snapshot'] self.addCleanup( client.wait_for_resource_deletion, snapshot_id=snapshot['id']) self.addCleanup(client.delete_snapshot, snapshot['id']) share_waiters.wait_for_resource_status( client, snapshot["id"], "available", resource_name='snapshot') return snapshot
def test_share_server_reset_state(self): # Get network and subnet from existing share_network and reuse it # to be able to delete share_server after test ends. new_sn = self.create_share_network( add_security_services=True, neutron_net_id=self.share_net_info['neutron_net_id'], neutron_subnet_id=self.share_net_info['neutron_subnet_id']) share = self.create_share( share_type_id=self.share_type_id, share_network_id=new_sn['id'] ) share = self.shares_v2_client.get_share(share['id'])['share'] # obtain share server share_server = self.shares_v2_client.show_share_server( share['share_server_id'] )['share_server'] for state in (constants.SERVER_STATE_ACTIVE, constants.SERVER_STATE_CREATING, constants.SERVER_STATE_DELETING, constants.SERVER_STATE_ERROR, constants.SERVER_STATE_MANAGE_ERROR, constants.SERVER_STATE_MANAGE_STARTING, constants.SERVER_STATE_UNMANAGE_ERROR, constants.SERVER_STATE_UNMANAGE_STARTING): # leave it in a new state self.shares_v2_client.share_server_reset_state( share_server['id'], status=state, ) waiters.wait_for_resource_status( self.shares_v2_client, share_server['id'], state, resource_name="share_server" ) # bring the share server back in the active state self.shares_v2_client.share_server_reset_state( share_server['id'], status=constants.SERVER_STATE_ACTIVE, ) waiters.wait_for_resource_status( self.shares_v2_client, share_server['id'], constants.SERVER_STATE_ACTIVE, resource_name="share_server" ) # delete share self.shares_v2_client.delete_share(share["id"]) self.shares_v2_client.wait_for_resource_deletion( share_id=share["id"] ) # delete share network. This will trigger share server deletion self.shares_v2_client.delete_share_network(new_sn["id"]) self.shares_v2_client.wait_for_resource_deletion( sn_id=new_sn['id'])
def test_revert_to_latest_snapshot(self, version): snapshot = self.create_snapshot_wait_for_active(self.share['id'], cleanup_in_class=False) self.shares_v2_client.revert_to_snapshot(self.share['id'], snapshot['id'], version=version) waiters.wait_for_resource_status(self.shares_v2_client, self.share['id'], constants.STATUS_AVAILABLE)
def test_migration_get_progress_None(self): self.shares_v2_client.reset_task_state(self.share["id"], None) waiters.wait_for_resource_status(self.shares_v2_client, self.share["id"], None, status_attr='task_state') self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.migration_get_progress, self.share['id'])
def test_reset_snapshot_state(self, status): snapshot = self.create_snapshot_wait_for_active(self.share["id"]) self.shares_v2_client.reset_state(snapshot["id"], s_type="snapshots", status=status) waiters.wait_for_resource_status(self.shares_v2_client, snapshot["id"], status, resource_name='snapshot') self.addCleanup(self._reset_resource_available, snapshot["id"], "snapshots")
def deny_access(self, share_id, access_rule_id, client=None): """Deny share access :param share_id: id of the share :param access_rule_id: id of the rule that will be deleted """ client = client or self.shares_client client.delete_access_rule(share_id, access_rule_id) share_waiters.wait_for_resource_status( self.shares_v2_client, share_id, "active", status_attr='access_rules_status')
def test_share_server_migration_cancel(self): """Test the share server migration cancel.""" share_network_id = self.provide_share_network(self.shares_v2_client, self.networks_client) share = self.create_share(share_protocol=self.protocol, share_type_id=self.share_type['id'], share_network_id=share_network_id, cleanup_in_class=False) share = self.shares_v2_client.get_share(share['id'])['share'] # Initial migration setup. share, src_server_id, dest_host, snapshot_id = self._setup_migration( share) preserve_snapshots = True if snapshot_id else False # Start share server migration. self.shares_v2_client.share_server_migration_start( src_server_id, dest_host, preserve_snapshots=preserve_snapshots) expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE waiters.wait_for_resource_status(self.shares_v2_client, src_server_id, expected_state, resource_name='share_server', status_attr='task_state') # Get for the destination share server. dest_server_id = self._get_share_server_destination_for_migration( src_server_id) dest_server = self.shares_v2_client.show_share_server( dest_server_id)['share_server'] self.assertEqual(dest_host, dest_server['host']) self.assertEqual(share_network_id, dest_server['share_network_id']) # Validate the share instances status. share_status = constants.STATUS_SERVER_MIGRATING self._validate_state_of_resources(share, share_status, snapshot_id) # Cancel the share server migration. self.shares_v2_client.share_server_migration_cancel(src_server_id) # Wait for the migration cancelled status. expected_state = constants.TASK_STATE_MIGRATION_CANCELLED waiters.wait_for_resource_status(self.shares_v2_client, src_server_id, expected_state, resource_name='share_server', status_attr='task_state') # After the cancel operation, we need to validate again the resources. expected_status = constants.STATUS_AVAILABLE self._validate_state_of_resources(share, expected_status, snapshot_id)
def resource_cleanup(cls): states = [constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS, constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE] waiters.wait_for_resource_status( cls.shares_v2_client, cls.server_id, states, resource_name="share_server", status_attr="task_state") cls.shares_v2_client.share_server_migration_cancel(cls.server_id) waiters.wait_for_resource_status( cls.shares_v2_client, cls.share['id'], status="available") super(ShareServerMigrationStartNegativesNFS, cls).resource_cleanup()
def test_snapshot_before_and_after_share_replica(self): """Test the snapshot for replicated share. Verify that snapshot can be created before and after share replica being created. Verify snapshots by creating share from the snapshots. """ share = self.create_share(share_type_id=self.share_type_id, availability_zone=self.share_zone, share_network_id=self.sn_id) snapshot1 = self.create_snapshot_wait_for_active(share["id"]) original_replica = self.shares_v2_client.list_share_replicas( share["id"])['share_replicas'][0] share_replica = self.create_share_replica(share["id"], self.replica_zone, cleanup=False) self.addCleanup(self.delete_share_replica, original_replica['id']) waiters.wait_for_resource_status(self.shares_v2_client, share_replica['id'], constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica', status_attr='replica_state') snapshot2 = self.create_snapshot_wait_for_active(share["id"]) # Wait for snapshot1 to become available waiters.wait_for_resource_status(self.shares_v2_client, snapshot1['id'], "available", resource_name='snapshot') self.promote_share_replica(share_replica['id']) # Remove the original active replica to ensure that snapshot is # still being created successfully. self.delete_share_replica(original_replica['id']) snapshot1 = self.shares_v2_client.get_snapshot( snapshot1['id'])['snapshot'] self.assertEqual(constants.STATUS_AVAILABLE, snapshot1['status']) snapshot2 = self.shares_v2_client.get_snapshot( snapshot2['id'])['snapshot'] self.assertEqual(constants.STATUS_AVAILABLE, snapshot2['status']) if CONF.share.capability_create_share_from_snapshot_support: self.create_share(share_type_id=self.share_type_id, snapshot_id=snapshot1['id'], share_network_id=self.sn_id) self.create_share(share_type_id=self.share_type_id, snapshot_id=snapshot2['id'], share_network_id=self.sn_id)
def test_reset_share_instance_state(self, status): sh_instance = self.shares_v2_client.get_instances_of_share( self.share["id"])['share_instances'][0] share_instance_id = sh_instance["id"] self.shares_v2_client.reset_state(share_instance_id, s_type="share_instances", status=status) waiters.wait_for_resource_status(self.shares_v2_client, share_instance_id, status, resource_name='share_instance') self.addCleanup(self._reset_resource_available, share_instance_id, "share_instances")
def test_migrate_share_not_available(self): self.shares_client.reset_state(self.share['id'], constants.STATUS_ERROR) waiters.wait_for_resource_status(self.shares_v2_client, self.share['id'], constants.STATUS_ERROR) self.assertRaises(lib_exc.BadRequest, self.shares_v2_client.migrate_share, self.share['id'], self.dest_pool) self.shares_client.reset_state(self.share['id'], constants.STATUS_AVAILABLE) waiters.wait_for_resource_status(self.shares_v2_client, self.share['id'], constants.STATUS_AVAILABLE)