def ddtest_create_bootable_volume_from_last_of_3_snapshots_of_a_server( self, image, flavor, volume_type=BlockstorageDatasets.default_volume_type_model()): self.create_bootable_volume_from_third_snapshot_of_server_test( image, flavor, volume_type)
class CinderCLI_VolumeSmoke(CinderCLI_IntegrationFixture): @tags('quicksmoke') @data_driven_test(BlockstorageDatasets.volume_types()) def ddtest_create_minimum_sized_volume(self, volume_type_name, volume_type_id): size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name) name = self.random_volume_name() resp = self.cinder.client.create(size=size, volume_type=volume_type_id, display_name=name) self.assertIsNotNone(resp.entity, 'Could not parse cinder create response') self.assertEquals(resp.return_code, 0, "Cinder command returned an error code.") volume = resp.entity self.addCleanup(self.cinder.client.delete, volume.id_) self.assertEqual(int(size), int(volume.size), "Volume size reported incorrectly") self.assertEqual(name, volume.display_name, "Volume display name reported incorrectly") @data_driven_test(BlockstorageDatasets.volume_types()) def ddtest_delete_volume_by_id(self, volume_type_name, volume_type_id): # setup size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name) volume = self.volumes.behaviors.create_available_volume( size=size, volume_type=volume_type_id) resp = self.cinder.client.delete(volume.id_) self.assertEquals(len(resp.standard_out), 0, "Volume delete returned output on standard error") self.addCleanup(self.cinder.client.delete, volume.id_) self.assertTrue( self.volumes.behaviors.delete_volume_confirmed( volume.id_, size, ), "Could not confirm that volume {0} was deleted".format(volume.id_))
def ddtest_verify_data_on_custom_snapshot_after_copy_to_volume( self, image, flavor, volume_type=BlockstorageDatasets.default_volume_type_model()): """This test currently only works for Linux images""" # Create a server original_server = self.new_server( name=self.random_server_name(), image=image.id, flavor=flavor.id, add_cleanup=False) # Connect to server original_server_connection = self.connect_to_instance( original_server) # Write data to the root disk file_name = random_string("original_data") file_content = "a" * 1024 self.create_remote_file( original_server_connection, '/', file_name, file_content) original_server_connection.filesystem_sync() # Get hash of remote file original_hash = self.get_remote_file_md5_hash( original_server_connection, '/', file_name) # Create a snapshot of the server server_snapshot = self.make_server_snapshot(original_server) # Create a bootable volume from the server snapshot bootable_volume = self.create_volume_from_image_test( volume_type, server_snapshot) # Create block device mapping bdm = self.compute.servers.behaviors.create_block_device_mapping_v1( bootable_volume.id_, True, 'vda', bootable_volume.size, volume_type.id_) # Boot a server from the volume new_bfv_server = self.servers.behaviors.create_active_server( name=self.random_server_name(), flavor_ref=flavor.id, block_device_mapping=bdm).entity assert new_bfv_server is not None, ( "Unable to build a server from volume '{volume}' and flavor " "'{flavor}' with block device mapping: {bdm}".format( volume=bootable_volume.id_, flavor=flavor.id, bdm=bdm)) # Setup remote instance client new_bfv_server_conn = self.connect_to_instance( new_bfv_server, os_type='linux') # Get hash of remote file restored_hash = self.get_remote_file_md5_hash( new_bfv_server_conn, '/', file_name) assert original_hash == restored_hash, ( 'Restored data hash "{0}" did not match original data hash "{1}"' .format(restored_hash, original_hash))
from cafe.drivers.unittest.decorators import \ data_driven_test, DataDrivenFixture, tags from cloudcafe.blockstorage.datasets import BlockstorageDatasets from cloudroast.blockstorage.volumes_api.integration.oscli.fixtures \ import NovaCLI_IntegrationFixture volume_type_complete_dataset = BlockstorageDatasets.volume_types() @DataDrivenFixture class NovaCLI_IntegrationSmoke(NovaCLI_IntegrationFixture): @classmethod def setUpClass(cls): super(NovaCLI_IntegrationSmoke, cls).setUpClass() # Create test server cls.test_server = cls.new_server() @tags('quicksmoke') @data_driven_test(volume_type_complete_dataset) def ddtest_create_minimum_sized_volume( self, volume_type_name, volume_type_id): size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name) volume = self.new_volume(size=size, volume_type=volume_type_id) assert int(volume.size) == int(size), "Volume was the wrong size" @data_driven_test(volume_type_complete_dataset) def ddtest_create_maximum_sized_volume(
from cafe.drivers.unittest.decorators import data_driven_test from cafe.drivers.unittest.decorators import tags from cloudroast.blockstorage.volumes_api.fixtures import \ VolumesTestFixture from cloudcafe.blockstorage.datasets import BlockstorageDatasets from cafe.drivers.unittest.decorators import DataDrivenFixture volume_types_dataset = BlockstorageDatasets.volume_types() volume_types_dataset.apply_test_tags('volume-cloning-complete-dataset') configured_vtypes_dataset = BlockstorageDatasets.configured_volume_types() configured_vtypes_dataset.apply_test_tags('volume-cloning-configured-dataset') random_vtype_dataset = BlockstorageDatasets.volume_types( max_datasets=1, randomize=True) random_vtype_dataset.apply_test_tags('volume-cloning-single-random-dataset') volume_types_dataset.merge_dataset_tags(configured_vtypes_dataset) volume_types_dataset.merge_dataset_tags(random_vtype_dataset) @DataDrivenFixture class CBSVolumeCloneTests(VolumesTestFixture): @data_driven_test(volume_types_dataset) @tags('smoke') def ddtest_create_exact_clone_of_existing_volume_and_verify_attributes( self, volume_type_name, volume_type_id): """Verify that data written to a volume is intact and available on a clone of that volume""" # Setup original volume metadata = {"OriginalVolumeMetadataKey": "OriginalVolumeMetadataValue"} size = self.volumes.behaviors.get_configured_volume_type_property(
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cafe.drivers.unittest.decorators import \ data_driven_test, DataDrivenFixture from cloudcafe.blockstorage.volumes_api.common.models import statuses from cloudcafe.blockstorage.datasets import BlockstorageDatasets from cloudroast.blockstorage.volumes_api.fixtures import \ VolumesTestFixture complete_volume_types = BlockstorageDatasets.volume_types() complete_volume_types.apply_test_tags('snapshots-exhaustive-volume-types') default_volume_type = BlockstorageDatasets.default_volume_type() default_volume_type.apply_test_tags('snapshots-default-volume-type') complete_volume_types.merge_dataset_tags(default_volume_type) @DataDrivenFixture class SnapshotActions(VolumesTestFixture): @data_driven_test(complete_volume_types) def ddtest_verify_snapshot_status_progression( self, volume_type_name, volume_type_id): """Verify snapshot passes through all expected states after create""" volume = self.new_volume(vol_type=volume_type_id)
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cafe.drivers.unittest.decorators import data_driven_test from cafe.drivers.unittest.decorators import tags from cloudroast.blockstorage.volumes_api.fixtures import \ VolumesTestFixture from cloudcafe.blockstorage.datasets import BlockstorageDatasets from cafe.drivers.unittest.decorators import DataDrivenFixture volume_types_dataset = BlockstorageDatasets.volume_types() @DataDrivenFixture class VolumeActions(VolumesTestFixture): @data_driven_test(volume_types_dataset) @tags('volumes', 'smoke') def ddtest_create_volume( self, volume_type_name, volume_type_id): """Verify that a volume of minimum size can be created""" # Setup size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name) name = self.random_volume_name()
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from cafe.drivers.unittest.decorators import data_driven_test from cafe.drivers.unittest.decorators import tags from cloudroast.blockstorage.volumes_api.fixtures import \ VolumesTestFixture from cloudcafe.blockstorage.datasets import BlockstorageDatasets from cafe.drivers.unittest.decorators import DataDrivenFixture volume_types_dataset = BlockstorageDatasets.volume_types() @DataDrivenFixture class VolumeActions(VolumesTestFixture): @data_driven_test(volume_types_dataset) @tags('volumes', 'smoke') def ddtest_create_volume(self, volume_type_name, volume_type_id): """Verify that a volume of minimum size can be created""" # Setup size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name) name = self.random_volume_name() description = "{0}".format(self.__class__.__name__) metadata = {"metadata_key_one": "metadata_value_one"}
from cafe.drivers.unittest.decorators import \ data_driven_test, DataDrivenFixture, tags from cloudcafe.blockstorage.datasets import BlockstorageDatasets from cloudroast.blockstorage.volumes_api.integration.oscli.fixtures \ import NovaCLI_IntegrationFixture volume_type_complete_dataset = BlockstorageDatasets.volume_types() @DataDrivenFixture class NovaCLI_IntegrationSmoke(NovaCLI_IntegrationFixture): @classmethod def setUpClass(cls): super(NovaCLI_IntegrationSmoke, cls).setUpClass() # Create test server cls.test_server = cls.new_server() @tags('quicksmoke') @data_driven_test(volume_type_complete_dataset) def ddtest_create_minimum_sized_volume(self, volume_type_name, volume_type_id): size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name) volume = self.new_volume(size=size, volume_type=volume_type_id) assert int(volume.size) == int(size), "Volume was the wrong size" @data_driven_test(volume_type_complete_dataset) def ddtest_create_maximum_sized_volume(self, volume_type_name, volume_type_id):
from cafe.drivers.unittest.decorators import data_driven_test from cafe.drivers.unittest.decorators import tags from cloudroast.blockstorage.volumes_api.fixtures import \ VolumesTestFixture from cloudcafe.blockstorage.datasets import BlockstorageDatasets from cafe.drivers.unittest.decorators import DataDrivenFixture volume_types_dataset = BlockstorageDatasets.volume_types() volume_types_dataset.apply_test_tags('volume-cloning-complete-dataset') configured_vtypes_dataset = BlockstorageDatasets.configured_volume_types() configured_vtypes_dataset.apply_test_tags('volume-cloning-configured-dataset') random_vtype_dataset = BlockstorageDatasets.volume_types(max_datasets=1, randomize=True) random_vtype_dataset.apply_test_tags('volume-cloning-single-random-dataset') volume_types_dataset.merge_dataset_tags(configured_vtypes_dataset) volume_types_dataset.merge_dataset_tags(random_vtype_dataset) @DataDrivenFixture class CBSVolumeCloneTests(VolumesTestFixture): @data_driven_test(volume_types_dataset) @tags('smoke') def ddtest_create_exact_clone_of_existing_volume_and_verify_attributes( self, volume_type_name, volume_type_id): """Verify that data written to a volume is intact and available on a clone of that volume""" # Setup original volume metadata = {"OriginalVolumeMetadataKey": "OriginalVolumeMetadataValue"} size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name)
class CinderCLI_SnapshotSmoke(CinderCLI_IntegrationFixture): @data_driven_test(BlockstorageDatasets.volume_types()) def ddtest_snapshot_create_and_delete(self, volume_type_name, volume_type_id): # Setup size = self.volumes.behaviors.get_configured_volume_type_property( "min_size", id_=volume_type_id, name=volume_type_name) volume = self.volumes.behaviors.create_available_volume( size=size, volume_type=volume_type_id) self.addCleanup(self.cinder.client.delete, volume.id_) # Test create response display_name = self.random_snapshot_name() display_description = "Snapshot_Description" resp = self.cinder.client.snapshot_create( volume.id_, display_name=display_name, display_description=display_description) self.assertIsNotNone(resp.entity, 'Could not parse snapshot-create output') snapshot = resp.entity self.assertEquals(snapshot.display_name, display_name, "Display name did not match expected display name") self.assertEquals( snapshot.display_description, display_description, "Display description did not match expected display description") self.assertEquals( str(snapshot.size), str(volume.size), "Snapshot size '{0}' did not match source volume size '{1}'". format(snapshot.size, volume.size)) self.assertEquals(snapshot.volume_id, volume.id_, "Volume id did not match source volume id") self.assertIn( snapshot.status, [SnapshotStatuses.AVAILABLE, SnapshotStatuses.CREATING], "Snapshot created with unexpected status: {0}".format( snapshot.status)) # Wait for snapshot to attain 'available' status snapshot_timeout = \ self.volumes.behaviors.calculate_snapshot_create_timeout( volume.size) self.volumes.behaviors.wait_for_snapshot_status( snapshot.id_, SnapshotStatuses.AVAILABLE, snapshot_timeout) # Make sure snapshot progress is at 100% resp = self.cinder.client.snapshot_show(snapshot.id_) self.assertIsNotNone(resp.entity, 'Could not parse snapshot-show output') snapshot = resp.entity self.assertEquals( snapshot.progress, '100%', "Snapshot attained 'AVAILABLE' status, but progress is not 100%") # Delete Snapshot resp = self.cinder.client.snapshot_delete(snapshot.id_) self.assertEquals(resp.return_code, 0, 'Could not delete snapshot')