def create_instance_from_image(img): img_instance = None with helpers.cleanup_action( lambda: helpers.cleanup_test_resources(img_instance)): img_instance = self.provider.compute.instances.create( img_inst_label, img, helpers.get_provider_test_data(self.provider, 'vm_type'), subnet=subnet, zone=helpers.get_provider_test_data( self.provider, 'placement')) img_instance.wait_till_ready() self.assertIsInstance(img_instance, Instance) self.assertEqual( img_instance.label, img_inst_label, "Instance label {0} is not equal to the expected label" " {1}".format(img_instance.label, img_inst_label)) image_id = img.id self.assertEqual( img_instance.image_id, image_id, "Image id {0} is not equal to the expected id" " {1}".format(img_instance.image_id, image_id)) self.assertIsInstance(img_instance.public_ips, list) if img_instance.public_ips: self.assertTrue( img_instance.public_ips[0], "public ip should contain a" " valid value if a list of public_ips exist") self.assertIsInstance(img_instance.private_ips, list) self.assertTrue(img_instance.private_ips[0], "private ip should" " contain a valid value")
def test_block_device_mapping_launch_config(self): lc = self.provider.compute.instances.create_launch_config() # specifying an invalid size should raise # an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(size=-1) # Attempting to add a blank volume without specifying a size # should raise an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(source=None) # block_devices should be empty so far self.assertListEqual( lc.block_devices, [], "No block devices should have been" " added to mappings list since the configuration was" " invalid") # Add a new volume lc.add_volume_device(size=1, delete_on_terminate=True) # Override root volume size image_id = helpers.get_provider_test_data(self.provider, "image") img = self.provider.compute.images.get(image_id) lc.add_volume_device( is_root=True, source=img, # TODO: This should be greater than the ami size or tests will fail # on actual infrastructure. Needs an image.size method size=2, delete_on_terminate=True) # Attempting to add more than one root volume should raise an # exception. with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(size=1, is_root=True) # Attempting to add an incorrect source should raise an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device( source="invalid_source", delete_on_terminate=True) # Add all available ephemeral devices instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] for _ in range(inst_type.num_ephemeral_disks): lc.add_ephemeral_device() # block_devices should be populated self.assertTrue( len(lc.block_devices) == 2 + inst_type.num_ephemeral_disks, "Expected %d total block devices bit found %d" % (2 + inst_type.num_ephemeral_disks, len(lc.block_devices)))
def test_block_device_mapping_launch_config(self): lc = self.provider.compute.instances.create_launch_config() # specifying an invalid size should raise # an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(size=-1) # Attempting to add a blank volume without specifying a size # should raise an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(source=None) # block_devices should be empty so far self.assertListEqual( lc.block_devices, [], "No block devices should have been" " added to mappings list since the configuration was" " invalid") # Add a new volume lc.add_volume_device(size=1, delete_on_terminate=True) # Override root volume size image_id = helpers.get_provider_test_data(self.provider, "image") img = self.provider.compute.images.get(image_id) # The size should be greater then the ami size # and therefore, img.min_disk is used. lc.add_volume_device( is_root=True, source=img, size=img.min_disk if img and img.min_disk else 30, delete_on_terminate=True) # Attempting to add more than one root volume should raise an # exception. with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(size=1, is_root=True) # Attempting to add an incorrect source should raise an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device( source="invalid_source", delete_on_terminate=True) # Add all available ephemeral devices vm_type_name = helpers.get_provider_test_data( self.provider, "vm_type") vm_type = self.provider.compute.vm_types.find( name=vm_type_name)[0] for _ in range(vm_type.num_ephemeral_disks): lc.add_ephemeral_device() # block_devices should be populated self.assertTrue( len(lc.block_devices) == 2 + vm_type.num_ephemeral_disks, "Expected %d total block devices bit found %d" % (2 + vm_type.num_ephemeral_disks, len(lc.block_devices)))
def test_snapshot_properties(self): """ Test snapshot properties """ label = "cb-snapprop-{0}".format(helpers.get_uuid()) test_vol = self.provider.storage.volumes.create( label, 1, helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() snap_label = "cb-snap-{0}".format(label) test_snap = test_vol.create_snapshot(label=snap_label, description=snap_label) def cleanup_snap(snap): if snap: snap.delete() snap.wait_for([SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) with helpers.cleanup_action(lambda: cleanup_snap(test_snap)): test_snap.wait_till_ready() self.assertTrue(isinstance(test_vol.size, six.integer_types)) self.assertEqual( test_snap.size, test_vol.size, "Snapshot.size must match original volume's size: %s" " but is: %s" % (test_vol.size, test_snap.size)) self.assertTrue( test_vol.description is None or isinstance(test_vol.description, six.string_types), "Snapshot.description must be None or a string. Got: %s" % test_vol.description) self.assertEqual(test_vol.id, test_snap.volume_id) self.assertIsNotNone(test_vol.create_time) test_snap.label = 'snapnewname1' test_snap.description = 'snapnewdescription1' test_snap.refresh() self.assertEqual(test_snap.label, 'snapnewname1') self.assertEqual(test_snap.description, 'snapnewdescription1') # Test volume creation from a snapshot (via VolumeService) sv_label = "cb-snapvol-{0}".format(test_snap.name) snap_vol = self.provider.storage.volumes.create( sv_label, 1, helpers.get_provider_test_data(self.provider, "placement"), snapshot=test_snap) with helpers.cleanup_action(lambda: snap_vol.delete()): snap_vol.wait_till_ready() # Test volume creation from a snapshot (via Snapshot) snap_vol2 = test_snap.create_volume( helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: snap_vol2.delete()): snap_vol2.wait_till_ready()
def create_subnet(label): return self.provider.networking.subnets.create( label=label, network=net, cidr_block="10.0.10.0/24", zone=helpers.get_provider_test_data(self.provider, 'placement'))
def create_subnet(name): return self.provider.networking.subnets.create( network=net, cidr_block="10.0.0.1/24", name=name, zone=helpers.get_provider_test_data(self.provider, 'placement'))
def test_crud_snapshot(self): """ Create a new volume, create a snapshot of the volume, and check whether list_snapshots properly detects the new snapshot. Delete everything afterwards. """ name = "cb_crudsnap-{0}".format(helpers.get_uuid()) test_vol = self.provider.storage.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() def create_snap(name): return test_vol.create_snapshot(name=name, description=name) def cleanup_snap(snap): snap.delete() snap.wait_for([SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) sit.check_crud(self, self.provider.storage.snapshots, Snapshot, "cb_snap", create_snap, cleanup_snap) # Test creation of a snap via SnapshotService def create_snap2(name): return self.provider.storage.snapshots.create(name=name, volume=test_vol, description=name) if (self.provider.PROVIDER_ID == ProviderList.AWS and not isinstance(self.provider, TestMockHelperMixin)): time.sleep(15) # Or get SnapshotCreationPerVolumeRateExceeded sit.check_crud(self, self.provider.storage.snapshots, Snapshot, "cb_snaptwo", create_snap2, cleanup_snap)
def test_object_life_cycle(self): """ Test object life cycle methods by using a volume. """ name = "CBUnitTestLifeCycle-{0}".format(uuid.uuid4()) test_vol = self.provider.block_store.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) # Waiting for an invalid timeout should raise an exception with self.assertRaises(AssertionError): test_vol.wait_for([VolumeState.ERROR], timeout=-1, interval=1) with self.assertRaises(AssertionError): test_vol.wait_for([VolumeState.ERROR], timeout=1, interval=-1) # If interval < timeout, an exception should be raised with self.assertRaises(AssertionError): test_vol.wait_for([VolumeState.ERROR], timeout=10, interval=20) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() # Hitting a terminal state should raise an exception with self.assertRaises(WaitStateException): test_vol.wait_for([VolumeState.ERROR], terminal_states=[VolumeState.AVAILABLE]) # Hitting the timeout should raise an exception with self.assertRaises(WaitStateException): test_vol.wait_for([VolumeState.ERROR], timeout=0, interval=0)
def test_object_life_cycle(self): """ Test object life cycle methods by using a volume. """ name = "cb_objlifecycle-{0}".format(helpers.get_uuid()) test_vol = self.provider.storage.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) # Waiting for an invalid timeout should raise an exception with self.assertRaises(AssertionError): test_vol.wait_for([VolumeState.ERROR], timeout=-1, interval=1) with self.assertRaises(AssertionError): test_vol.wait_for([VolumeState.ERROR], timeout=1, interval=-1) # If interval < timeout, an exception should be raised with self.assertRaises(AssertionError): test_vol.wait_for([VolumeState.ERROR], timeout=10, interval=20) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() # Hitting a terminal state should raise an exception with self.assertRaises(WaitStateException): test_vol.wait_for([VolumeState.ERROR], terminal_states=[VolumeState.AVAILABLE]) # Hitting the timeout should raise an exception with self.assertRaises(WaitStateException): test_vol.wait_for([VolumeState.ERROR], timeout=0, interval=0)
def test_vm_types_standard(self): # Searching for an instance by name should return an # VMType object and searching for a non-existent # object should return an empty iterator vm_type_name = helpers.get_provider_test_data(self.provider, "vm_type") vm_type = self.provider.compute.vm_types.find(name=vm_type_name)[0] sit.check_standard_behaviour(self, self.provider.compute.vm_types, vm_type)
def test_network_properties(self): label = 'cb-propnetwork-{0}'.format(helpers.get_uuid()) subnet_label = 'cb-propsubnet-{0}'.format(helpers.get_uuid()) net = self.provider.networking.networks.create( label=label, cidr_block=BaseNetwork.CB_DEFAULT_IPV4RANGE) with helpers.cleanup_action(lambda: net.delete()): net.wait_till_ready() self.assertEqual( net.state, 'available', "Network in state '%s', yet should be 'available'" % net.state) sit.check_repr(self, net) self.assertIn( net.cidr_block, ['', BaseNetwork.CB_DEFAULT_IPV4RANGE], "Network CIDR %s does not contain the expected value %s." % (net.cidr_block, BaseNetwork.CB_DEFAULT_IPV4RANGE)) cidr = '10.0.20.0/24' sn = net.create_subnet(label=subnet_label, cidr_block=cidr, zone=helpers.get_provider_test_data( self.provider, 'placement')) with helpers.cleanup_action(lambda: sn.delete()): self.assertTrue( sn in net.subnets, "Subnet ID %s should be listed in network subnets %s." % (sn.id, net.subnets)) self.assertTrue( sn in self.provider.networking.subnets.list(network=net), "Subnet ID %s should be included in the subnets list %s." % (sn.id, self.provider.networking.subnets.list(net))) self.assertListEqual( net.subnets, [sn], "Network should have exactly one subnet: %s." % sn.id) self.assertEqual( net.id, sn.network_id, "Network ID %s and subnet's network id %s should be" " equal." % (net.id, sn.network_id)) self.assertEqual( net, sn.network, "Network obj %s and subnet's parent net obj %s" " should be equal." % (net, sn.network)) self.assertEqual( cidr, sn.cidr_block, "Should be exact cidr block that was requested") self.assertTrue( BaseNetwork.cidr_blocks_overlap(cidr, sn.cidr_block), "Subnet's CIDR %s should overlap the specified one %s." % (sn.cidr_block, cidr))
def test_crud_router(self): def _cleanup(net, subnet, router, gateway): with helpers.cleanup_action(lambda: net.delete()): with helpers.cleanup_action(lambda: router.delete()): with helpers.cleanup_action(lambda: subnet.delete()): with helpers.cleanup_action(lambda: gateway.delete()): router.detach_subnet(subnet) router.detach_gateway(gateway) label = 'cb-crudrouter-{0}'.format(helpers.get_uuid()) # Declare these variables and late binding will allow # the cleanup method access to the most current values net = None sn = None router = None gteway = None with helpers.cleanup_action(lambda: _cleanup(net, sn, router, gteway)): net = self.provider.networking.networks.create( label=label, cidr_block=BaseNetwork.CB_DEFAULT_IPV4RANGE) router = self.provider.networking.routers.create(label=label, network=net) cidr = '10.0.15.0/24' sn = net.create_subnet(label=label, cidr_block=cidr, zone=helpers.get_provider_test_data( self.provider, 'placement')) # Check basic router properties sit.check_standard_behaviour(self, self.provider.networking.routers, router) if (self.provider.PROVIDER_ID != 'gce'): self.assertEqual( router.state, RouterState.DETACHED, "Router {0} state {1} should be {2}.".format( router.id, router.state, RouterState.DETACHED)) self.assertFalse( router.network_id, "Router {0} should not be assoc. with network {1}".format( router.id, router.network_id)) self.assertTrue( len(router.subnets) == 0, "No subnet should be attached to router {1}".format( sn, router)) router.attach_subnet(sn) self.assertTrue( len(router.subnets) == 1, "Subnet {0} not attached to router {1}".format(sn, router)) gteway = net.gateways.get_or_create_inet_gateway() router.attach_gateway(gteway) # TODO: add a check for routes after that's been implemented sit.check_delete(self, self.provider.networking.routers, router)
def test_instance_properties(self): name = "CBInstProps-{0}-{1}".format( self.provider.name, uuid.uuid4()) kp = self.provider.security.key_pairs.create(name=name) sg = self.provider.security.security_groups.create( name=name, description=name) test_instance = helpers.get_test_instance(self.provider, name, keypair=kp, security_groups=[sg]) def cleanup(inst, kp, sg): inst.terminate() inst.wait_for([InstanceState.TERMINATED, InstanceState.UNKNOWN], terminal_states=[InstanceState.ERROR]) kp.delete() sg.delete() with helpers.cleanup_action(lambda: cleanup(test_instance, kp, sg)): self.assertTrue( test_instance.id in repr(test_instance), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not. eval(repr(obj)) == obj") self.assertEqual( test_instance.name, name, "Instance name {0} is not equal to the expected name" " {1}".format(test_instance.name, name)) image_id = helpers.get_provider_test_data(self.provider, "image") self.assertEqual(test_instance.image_id, image_id, "Image id {0} is not equal to the expected id" " {1}".format(test_instance.image_id, image_id)) self.assertIsInstance(test_instance.public_ips, list) self.assertIsInstance(test_instance.private_ips, list) self.assertEqual( test_instance.key_pair_name, kp.name) self.assertIsInstance(test_instance.security_groups, list) self.assertEqual( test_instance.security_groups[0], sg) # Must have either a public or a private ip ip_private = test_instance.private_ips[0] \ if test_instance.private_ips else None ip_address = test_instance.public_ips[0] \ if test_instance.public_ips else ip_private self.assertIsNotNone( ip_address, "Instance must have either a public IP or a private IP") self.assertTrue( self._is_valid_ip(ip_address), "Instance must have a valid IP address") self.assertIsInstance(test_instance.instance_type, InstanceType)
def test_instance_types_standard(self): """ Searching for an instance by name should return an InstanceType object and searching for a non-existent object should return an empty iterator """ instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] sit.check_standard_behaviour(self, self.provider.compute.instance_types, inst_type)
def test_zones(self): zone_find_count = 0 test_zone = helpers.get_provider_test_data(self.provider, "placement") for region in self.provider.compute.regions: self.assertTrue(region.name) for zone in region.zones: self.assertTrue(zone.id) self.assertTrue(zone.name) self.assertTrue(zone.region_name is None or isinstance(zone.region_name, six.string_types)) if test_zone == zone.name: zone_find_count += 1 # zone info cannot be repeated between regions self.assertEqual(zone_find_count, 1)
def test_crud_router(self): def _cleanup(net, subnet, router, gateway): with helpers.cleanup_action(lambda: net.delete()): with helpers.cleanup_action(lambda: subnet.delete()): with helpers.cleanup_action(lambda: gateway.delete()): with helpers.cleanup_action(lambda: router.delete()): router.detach_subnet(subnet) router.detach_gateway(gateway) name = 'cb_crudrouter-{0}'.format(helpers.get_uuid()) # Declare these variables and late binding will allow # the cleanup method access to the most current values net = None sn = None router = None gteway = None with helpers.cleanup_action(lambda: _cleanup(net, sn, router, gteway)): net = self.provider.networking.networks.create( name=name, cidr_block='10.0.0.0/16') router = self.provider.networking.routers.create(network=net, name=name) cidr = '10.0.1.0/24' sn = net.create_subnet(name=name, cidr_block=cidr, zone=helpers.get_provider_test_data( self.provider, 'placement')) # Check basic router properties sit.check_standard_behaviour(self, self.provider.networking.routers, router) self.assertEqual( router.state, RouterState.DETACHED, "Router {0} state {1} should be {2}.".format( router.id, router.state, RouterState.DETACHED)) # self.assertFalse( # router.network_id, # "Router {0} should not be assoc. with a network {1}".format( # router.id, router.network_id)) router.attach_subnet(sn) gteway = (self.provider.networking.gateways. get_or_create_inet_gateway(name)) router.attach_gateway(gteway) # TODO: add a check for routes after that's been implemented sit.check_delete(self, self.provider.networking.routers, router)
def test_network_properties(self): name = 'cb_propnetwork-{0}'.format(helpers.get_uuid()) subnet_name = 'cb_propsubnet-{0}'.format(helpers.get_uuid()) net = self.provider.networking.networks.create( name=name, cidr_block='10.0.0.0/16') with helpers.cleanup_action(lambda: net.delete()): net.wait_till_ready() self.assertEqual( net.state, 'available', "Network in state '%s', yet should be 'available'" % net.state) sit.check_repr(self, net) self.assertIn( net.cidr_block, ['', '10.0.0.0/16'], "Network CIDR %s does not contain the expected value." % net.cidr_block) cidr = '10.0.1.0/24' sn = net.create_subnet(name=subnet_name, cidr_block=cidr, zone=helpers.get_provider_test_data( self.provider, 'placement')) with helpers.cleanup_action(lambda: sn.delete()): self.assertTrue( sn in net.subnets, "Subnet ID %s should be listed in network subnets %s." % (sn.id, net.subnets)) self.assertTrue( sn in self.provider.networking.subnets.list(network=net), "Subnet ID %s should be included in the subnets list %s." % (sn.id, self.provider.networking.subnets.list(net))) self.assertListEqual( net.subnets, [sn], "Network should have exactly one subnet: %s." % sn.id) self.assertIn( net.id, sn.network_id, "Network ID %s should be specified in the subnet's network" " id %s." % (net.id, sn.network_id)) self.assertEqual( cidr, sn.cidr_block, "Subnet's CIDR %s should match the specified one %s." % (sn.cidr_block, cidr))
def test_instance_types_get(self): """ Searching for an instance by id should return an InstanceType object and searching for a non-existent object should return None """ compute_svc = self.provider.compute instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] self.assertEqual(inst_type, compute_svc.instance_types.get(inst_type.id)) self.assertIsNone(compute_svc.instance_types.get("non_existent_id"), "Searching for a non-existent instance id must" " return None")
def test_instance_types_get(self): """ Searching for an instance by id should return an InstanceType object and searching for a non-existent object should return None """ compute_svc = self.provider.compute instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] self.assertEqual(inst_type, compute_svc.instance_types.get(inst_type.id)) self.assertIsNone( compute_svc.instance_types.get("non_existent_id"), "Searching for a non-existent instance id must" " return None")
def test_crud_network(self): name = 'cbtestnetwork-{0}'.format(uuid.uuid4()) subnet_name = 'cbtestsubnet-{0}'.format(uuid.uuid4()) net = self.provider.network.create(name=name) with helpers.cleanup_action( lambda: net.delete() ): net.wait_till_ready() self.assertEqual( net.state, 'available', "Network in state '%s', yet should be 'available'" % net.state) self.assertIn( net.id, repr(net), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not.") self.assertIn( net.cidr_block, ['', '10.0.0.0/16'], "Network CIDR %s does not contain the expected value." % net.cidr_block) cidr = '10.0.1.0/24' sn = net.create_subnet( cidr_block=cidr, name=subnet_name, zone=helpers.get_provider_test_data(self.provider, 'placement')) with helpers.cleanup_action(lambda: sn.delete()): self.assertTrue( sn.id in [s.id for s in net.subnets()], "Subnet ID %s should be listed in network subnets %s." % (sn.id, net.subnets())) self.assertIn( net.id, sn.network_id, "Network ID %s should be specified in the subnet's network" " id %s." % (net.id, sn.network_id)) self.assertEqual( cidr, sn.cidr_block, "Subnet's CIDR %s should match the specified one %s." % ( sn.cidr_block, cidr))
def test_instance_types_find(self): """ Searching for an instance by name should return an InstanceType object and searching for a non-existent object should return an empty iterator """ instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] self.assertTrue(isinstance(inst_type, InstanceType), "Find must return an InstanceType object") self.assertFalse(self.provider.compute.instance_types.find( name="non_existent_instance_type"), "Searching for a non-existent" " instance type must return an empty list") with self.assertRaises(TypeError): self.provider.compute.instance_types.find( non_existent_param="random_value")
def test_snapshot_properties(self): """ Test snapshot properties """ name = "CBTestSnapProp-{0}".format(uuid.uuid4()) test_vol = self.provider.block_store.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() snap_name = "CBSnapProp-{0}".format(name) test_snap = test_vol.create_snapshot(name=snap_name, description=snap_name) def cleanup_snap(snap): snap.delete() snap.wait_for( [SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) with helpers.cleanup_action(lambda: cleanup_snap(test_snap)): test_snap.wait_till_ready() self.assertTrue(isinstance(test_vol.size, six.integer_types)) self.assertEqual( test_snap.size, test_vol.size, "Snapshot.size must match original volume's size: %s" " but is: %s" % (test_vol.size, test_snap.size)) self.assertTrue( test_vol.description is None or isinstance(test_vol.description, six.string_types), "Snapshot.description must be None or a string. Got: %s" % test_vol.description) self.assertEqual(test_vol.id, test_snap.volume_id) self.assertIsNotNone(test_vol.create_time) test_snap.name = 'snapnewname1' test_snap.description = 'snapnewdescription1' test_snap.refresh() self.assertEqual(test_snap.name, 'snapnewname1') self.assertEqual(test_snap.description, 'snapnewdescription1')
def test_instance_types_find(self): """ Searching for an instance by name should return an InstanceType object and searching for a non-existent object should return an empty iterator """ instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] self.assertTrue(isinstance(inst_type, InstanceType), "Find must return an InstanceType object") self.assertFalse( self.provider.compute.instance_types.find( name="non_existent_instance_type"), "Searching for a non-existent" " instance type must return an empty list") with self.assertRaises(TypeError): self.provider.compute.instance_types.find( non_existent_param="random_value")
def test_zones(self): """ Test whether regions return the correct zone information """ zone_find_count = 0 test_zone = helpers.get_provider_test_data(self.provider, "placement") for region in self.provider.compute.regions: self.assertTrue(region.name) for zone in region.zones: self.assertTrue(zone.id) self.assertTrue(zone.name) self.assertTrue( zone.region_name is None or isinstance(zone.region_name, six.string_types)) if test_zone == zone.name: zone_find_count += 1 # TODO: Can't do a check for zone_find_count == 1 because Moto # always returns the same zone for any region self.assertTrue( zone_find_count > 0, "The test zone: {0} should appear exactly" " once in the list of regions, but was not found".format( test_zone, zone_find_count))
def test_crud_snapshot(self): # Create a new volume, create a snapshot of the volume, and check # whether list_snapshots properly detects the new snapshot. # Delete everything afterwards. label = "cb-crudsnap-{0}".format(helpers.get_uuid()) test_vol = self.provider.storage.volumes.create( label, 1, helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() def create_snap(label): return test_vol.create_snapshot(label=label, description=label) def cleanup_snap(snap): if snap: snap.delete() snap.wait_for([SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) snap.refresh() self.assertTrue( snap.state == SnapshotState.UNKNOWN, "Snapshot.state must be unknown when refreshing after " "a delete but got %s" % snap.state) sit.check_crud(self, self.provider.storage.snapshots, Snapshot, "cb-snap", create_snap, cleanup_snap) # Test creation of a snap via SnapshotService def create_snap2(label): return self.provider.storage.snapshots.create( label=label, volume=test_vol, description=label) if (self.provider.PROVIDER_ID == ProviderList.AWS and not isinstance(self.provider, TestMockHelperMixin)): time.sleep(15) # Or get SnapshotCreationPerVolumeRateExceeded sit.check_crud(self, self.provider.storage.snapshots, Snapshot, "cb-snaptwo", create_snap2, cleanup_snap)
def test_zones(self): """ Test whether regions return the correct zone information """ zone_find_count = 0 test_zone = helpers.get_provider_test_data(self.provider, "placement") regions = self.provider.compute.regions.list() for region in regions: self.assertTrue(region.name) for zone in region.zones: self.assertTrue(zone.id) self.assertTrue(zone.name) self.assertTrue(zone.region_name is None or isinstance(zone.region_name, six.string_types)) if test_zone == zone.name: zone_find_count += 1 # TODO: Can't do a check for zone_find_count == 1 because Moto # always returns the same zone for any region self.assertTrue(zone_find_count > 0, "The test zone: {0} should appear exactly" " once in the list of regions, but was not found" .format(test_zone, zone_find_count))
def test_instance_properties(self): name = "cb_inst_props-{0}".format(helpers.get_uuid()) # Declare these variables and late binding will allow # the cleanup method access to the most current values test_instance = None net = None sg = None kp = None with helpers.cleanup_action(lambda: helpers.cleanup_test_resources( test_instance, net, sg, kp)): net, subnet = helpers.create_test_network(self.provider, name) kp = self.provider.security.key_pairs.create(name=name) sg = self.provider.security.security_groups.create( name=name, description=name, network_id=net.id) test_instance = helpers.get_test_instance(self.provider, name, key_pair=kp, security_groups=[sg], subnet=subnet) self.assertEqual( test_instance.name, name, "Instance name {0} is not equal to the expected name" " {1}".format(test_instance.name, name)) image_id = helpers.get_provider_test_data(self.provider, "image") self.assertEqual( test_instance.image_id, image_id, "Image id {0} is not equal to the expected id" " {1}".format(test_instance.image_id, image_id)) self.assertIsInstance(test_instance.zone_id, six.string_types) self.assertEqual( test_instance.image_id, helpers.get_provider_test_data(self.provider, "image")) self.assertIsInstance(test_instance.public_ips, list) self.assertIsInstance(test_instance.private_ips, list) self.assertEqual(test_instance.key_pair_name, kp.name) self.assertIsInstance(test_instance.security_groups, list) self.assertEqual(test_instance.security_groups[0], sg) self.assertIsInstance(test_instance.security_group_ids, list) self.assertEqual(test_instance.security_group_ids[0], sg.id) # Must have either a public or a private ip ip_private = test_instance.private_ips[0] \ if test_instance.private_ips else None ip_address = test_instance.public_ips[0] \ if test_instance.public_ips and test_instance.public_ips[0] \ else ip_private self.assertIsNotNone( ip_address, "Instance must have either a public IP or a private IP") self.assertTrue(self._is_valid_ip(ip_address), "Instance must have a valid IP address") self.assertIsInstance(test_instance.instance_type_id, six.string_types) itype = self.provider.compute.instance_types.get( test_instance.instance_type_id) self.assertEqual( itype, test_instance.instance_type, "Instance type {0} does not match expected type {1}".format( itype.name, test_instance.instance_type)) self.assertIsInstance(itype, InstanceType) expected_type = helpers.get_provider_test_data( self.provider, 'instance_type') self.assertEqual( itype.name, expected_type, "Instance type {0} does not match expected type {1}".format( itype.name, expected_type)) if isinstance(self.provider, TestMockHelperMixin): raise self.skipTest( "Skipping rest of test because Moto is not returning the" " instance's placement zone correctly") find_zone = [ zone for zone in self.provider.compute.regions.current.zones if zone.id == test_instance.zone_id ] self.assertEqual( len(find_zone), 1, "Instance's placement zone could not be " " found in zones list")
def create_vol(label): return self.provider.storage.volumes.create( label, 1, helpers.get_provider_test_data(self.provider, "placement"))
def test_crud_router(self): def _cleanup(net, subnet, router): with helpers.cleanup_action(lambda: net.delete()): with helpers.cleanup_action(lambda: subnet.delete()): with helpers.cleanup_action(lambda: router.delete()): router.remove_route(subnet.id) router.detach_network() name = 'cbtestrouter-{0}'.format(uuid.uuid4()) # Declare these variables and late binding will allow # the cleanup method access to the most current values net = None sn = None router = None with helpers.cleanup_action(lambda: _cleanup(net, sn, router)): router = self.provider.network.create_router(name=name) net = self.provider.network.create(name=name) cidr = '10.0.1.0/24' sn = net.create_subnet(cidr_block=cidr, name=name, zone=helpers.get_provider_test_data( self.provider, 'placement')) # Check basic router properties self.assertIn( router, self.provider.network.routers(), "Router {0} should exist in the router list {1}.".format( router.id, self.provider.network.routers())) self.assertIn( router.id, repr(router), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not.") self.assertEqual( router.name, name, "Router {0} name should be {1}.".format(router.name, name)) self.assertEqual( router.state, RouterState.DETACHED, "Router {0} state {1} should be {2}.".format( router.id, router.state, RouterState.DETACHED)) self.assertFalse( router.network_id, "Router {0} should not be assoc. with a network {1}".format( router.id, router.network_id)) # TODO: Cloud specific code, needs fixing # Check router connectivity # On OpenStack only one network is external and on AWS every # network is external, yet we need to use the one we've created?! if self.provider.PROVIDER_ID == 'openstack': for n in self.provider.network.list(): if n.external: external_net = n break else: external_net = net router.attach_network(external_net.id) router.refresh() self.assertEqual( router.network_id, external_net.id, "Router should be attached to network {0}, not {1}".format( external_net.id, router.network_id)) router.add_route(sn.id) # TODO: add a check for routes after that's been implemented routerl = self.provider.network.routers() found_router = [r for r in routerl if r.name == name] self.assertEqual( len(found_router), 0, "Router {0} should have been deleted but still exists." .format(name))
def test_crud_volume(self): """ Create a new volume, check whether the expected values are set, and delete it """ name = "CBUnitTestCreateVol-{0}".format(uuid.uuid4()) test_vol = self.provider.block_store.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) def cleanup_vol(vol): vol.delete() vol.wait_for([VolumeState.DELETED, VolumeState.UNKNOWN], terminal_states=[VolumeState.ERROR]) with helpers.cleanup_action(lambda: cleanup_vol(test_vol)): test_vol.wait_till_ready() self.assertTrue( test_vol.id in repr(test_vol), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not. eval(repr(obj)) == obj") volumes = self.provider.block_store.volumes.list() list_volumes = [vol for vol in volumes if vol.name == name] self.assertTrue( len(list_volumes) == 1, "List volumes does not return the expected volume %s" % name) # check iteration iter_volumes = [vol for vol in self.provider.block_store.volumes if vol.name == name] self.assertTrue( len(iter_volumes) == 1, "Iter volumes does not return the expected volume %s" % name) # check find find_vols = self.provider.block_store.volumes.find(name=name) self.assertTrue( len(find_vols) == 1, "Find volumes does not return the expected volume %s" % name) # check non-existent find # TODO: Moto has a bug with filters causing the following test # to fail. Need to add tag based filtering support for volumes # find_vols = self.provider.block_store.volumes.find( # name="non_existent_vol") # self.assertTrue( # len(find_vols) == 0, # "Find() for a non-existent volume returned %s" % find_vols) get_vol = self.provider.block_store.volumes.get( test_vol.id) self.assertTrue( list_volumes[0] == get_vol == test_vol, "Ids returned by list: {0} and get: {1} are not as " " expected: {2}" .format(list_volumes[0].id, get_vol.id, test_vol.id)) self.assertTrue( list_volumes[0].name == get_vol.name == test_vol.name, "Names returned by list: {0} and get: {1} are not as " " expected: {2}" .format(list_volumes[0].name, get_vol.name, test_vol.name)) volumes = self.provider.block_store.volumes.list() found_volumes = [vol for vol in volumes if vol.name == name] self.assertTrue( len(found_volumes) == 0, "Volume %s should have been deleted but still exists." % name)
def test_block_device_mapping_attachments(self): name = "CBInstBlkAttch-{0}-{1}".format( self.provider.name, uuid.uuid4()) # test_vol = self.provider.block_store.volumes.create( # name, # 1, # helpers.get_provider_test_data(self.provider, "placement")) # with helpers.cleanup_action(lambda: test_vol.delete()): # test_vol.wait_till_ready() # test_snap = test_vol.create_snapshot(name=name, # description=name) # # def cleanup_snap(snap): # snap.delete() # snap.wait_for( # [SnapshotState.UNKNOWN], # terminal_states=[SnapshotState.ERROR]) # # with helpers.cleanup_action(lambda: cleanup_snap(test_snap)): # test_snap.wait_till_ready() lc = self.provider.compute.instances.create_launch_config() # Add a new blank volume # lc.add_volume_device(size=1, delete_on_terminate=True) # Attach an existing volume # lc.add_volume_device(size=1, source=test_vol, # delete_on_terminate=True) # Add a new volume based on a snapshot # lc.add_volume_device(size=1, source=test_snap, # delete_on_terminate=True) # Override root volume size image_id = helpers.get_provider_test_data( self.provider, "image") img = self.provider.compute.images.get(image_id) lc.add_volume_device( is_root=True, source=img, # TODO: This should be greater than the ami size or tests # will fail on actual infrastructure. Needs an image.size # method size=2, delete_on_terminate=True) # Add all available ephemeral devices instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] for _ in range(inst_type.num_ephemeral_disks): lc.add_ephemeral_device() inst = helpers.create_test_instance( self.provider, name, zone=helpers.get_provider_test_data( self.provider, 'placement'), launch_config=lc) def cleanup(instance): instance.terminate() instance.wait_for( [InstanceState.TERMINATED, InstanceState.UNKNOWN], terminal_states=[InstanceState.ERROR]) with helpers.cleanup_action(lambda: cleanup(inst)): try: inst.wait_till_ready() except WaitStateException as e: self.fail("The block device mapped launch did not " " complete successfully: %s" % e)
def test_crud_snapshot(self): """ Create a new volume, create a snapshot of the volume, and check whether list_snapshots properly detects the new snapshot. Delete everything afterwards. """ name = "CBUnitTestCreateSnap-{0}".format(uuid.uuid4()) test_vol = self.provider.block_store.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() snap_name = "CBSnapshot-{0}".format(name) test_snap = test_vol.create_snapshot(name=snap_name, description=snap_name) def cleanup_snap(snap): snap.delete() snap.wait_for( [SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) with helpers.cleanup_action(lambda: cleanup_snap(test_snap)): test_snap.wait_till_ready() self.assertTrue( test_snap.id in repr(test_snap), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not.") snaps = self.provider.block_store.snapshots.list() list_snaps = [snap for snap in snaps if snap.name == snap_name] self.assertTrue( len(list_snaps) == 1, "List snapshots does not return the expected volume %s" % name) # check iteration iter_snaps = [ snap for snap in self.provider.block_store.snapshots if snap.name == snap_name] self.assertTrue( len(iter_snaps) == 1, "Iter snapshots does not return the expected volume %s" % name) # check find find_snap = self.provider.block_store.snapshots.find( name=snap_name) self.assertTrue( len(find_snap) == 1, "Find snaps does not return the expected snapshot %s" % name) # check non-existent find # TODO: Moto has a bug with filters causing the following test # to fail. Need to add tag based filtering support for snaps # find_snap = self.provider.block_store.snapshots.find( # name="non_existent_snap") # self.assertTrue( # len(find_snap) == 0, # "Find() for a non-existent snap returned %s" % # find_snap) get_snap = self.provider.block_store.snapshots.get( test_snap.id) self.assertTrue( list_snaps[0] == get_snap == test_snap, "Ids returned by list: {0} and get: {1} are not as " " expected: {2}" .format(list_snaps[0].id, get_snap.id, test_snap.id)) self.assertTrue( list_snaps[0].name == get_snap.name == test_snap.name, "Names returned by list: {0} and get: {1} are not as " " expected: {2}" .format(list_snaps[0].name, get_snap.name, test_snap.name)) # Test volume creation from a snapshot (via VolumeService) sv_name = "CBUnitTestSnapVol-{0}".format(name) snap_vol = self.provider.block_store.volumes.create( sv_name, 1, helpers.get_provider_test_data(self.provider, "placement"), snapshot=test_snap) with helpers.cleanup_action(lambda: snap_vol.delete()): snap_vol.wait_till_ready() # Test volume creation from a snapshot (via Snapshot) snap_vol2 = test_snap.create_volume( helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: snap_vol2.delete()): snap_vol2.wait_till_ready() snaps = self.provider.block_store.snapshots.list() found_snaps = [snap for snap in snaps if snap.name == snap_name] self.assertTrue( len(found_snaps) == 0, "Snapshot %s should have been deleted but still exists." % snap_name) # Test creation of a snap via SnapshotService snap_too_name = "CBSnapToo-{0}".format(name) test_snap_too = self.provider.block_store.snapshots.create( name=snap_too_name, volume=test_vol, description=snap_too_name) with helpers.cleanup_action(lambda: cleanup_snap(test_snap_too)): test_snap_too.wait_till_ready() self.assertTrue( test_snap_too.id in repr(test_snap_too), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not.")
def test_instance_properties(self): name = "CBInstProps-{0}-{1}".format( self.provider.name, uuid.uuid4()) kp = self.provider.security.key_pairs.create(name=name) sg = self.provider.security.security_groups.create( name=name, description=name) test_instance = helpers.get_test_instance(self.provider, name, key_pair=kp, security_groups=[sg]) def cleanup(inst, kp, sg): inst.terminate() inst.wait_for([InstanceState.TERMINATED, InstanceState.UNKNOWN], terminal_states=[InstanceState.ERROR]) kp.delete() sg.delete() with helpers.cleanup_action(lambda: cleanup(test_instance, kp, sg)): self.assertTrue( test_instance.id in repr(test_instance), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not. eval(repr(obj)) == obj") self.assertEqual( test_instance.name, name, "Instance name {0} is not equal to the expected name" " {1}".format(test_instance.name, name)) image_id = helpers.get_provider_test_data(self.provider, "image") self.assertEqual(test_instance.image_id, image_id, "Image id {0} is not equal to the expected id" " {1}".format(test_instance.image_id, image_id)) self.assertIsInstance(test_instance.zone_id, six.string_types) # FIXME: Moto is not returning the instance's placement zone # find_zone = [zone for zone in # self.provider.compute.regions.current.zones # if zone.id == test_instance.zone_id] # self.assertEqual(len(find_zone), 1, # "Instance's placement zone could not be " # " found in zones list") self.assertEqual( test_instance.image_id, helpers.get_provider_test_data(self.provider, "image")) self.assertIsInstance(test_instance.public_ips, list) self.assertIsInstance(test_instance.private_ips, list) self.assertEqual( test_instance.key_pair_name, kp.name) self.assertIsInstance(test_instance.security_groups, list) self.assertEqual( test_instance.security_groups[0], sg) self.assertIsInstance(test_instance.security_group_ids, list) self.assertEqual( test_instance.security_group_ids[0], sg.id) # Must have either a public or a private ip ip_private = test_instance.private_ips[0] \ if test_instance.private_ips else None ip_address = test_instance.public_ips[0] \ if test_instance.public_ips else ip_private self.assertIsNotNone( ip_address, "Instance must have either a public IP or a private IP") self.assertTrue( self._is_valid_ip(ip_address), "Instance must have a valid IP address") self.assertIsInstance(test_instance.instance_type_id, six.string_types) itype = self.provider.compute.instance_types.get( test_instance.instance_type_id) self.assertEqual( itype, test_instance.instance_type, "Instance type {0} does not match expected type {1}".format( itype.name, test_instance.instance_type)) self.assertIsInstance(itype, InstanceType) expected_type = helpers.get_provider_test_data(self.provider, 'instance_type') self.assertEqual( itype.name, expected_type, "Instance type {0} does not match expected type {1}".format( itype.name, expected_type))
def test_instance_properties(self): label = "cb-inst-props-{0}".format(helpers.get_uuid()) # Declare these variables and late binding will allow # the cleanup method access to the most current values test_instance = None fw = None kp = None with cb_helpers.cleanup_action(lambda: helpers.cleanup_test_resources( test_instance, fw, kp)): subnet = helpers.get_or_create_default_subnet(self.provider) net = subnet.network kp = self.provider.security.key_pairs.create(name=label) fw = self.provider.security.vm_firewalls.create( label=label, description=label, network=net.id) test_instance = helpers.get_test_instance(self.provider, label, key_pair=kp, vm_firewalls=[fw], subnet=subnet) self.assertEqual( test_instance.label, label, "Instance label {0} is not equal to the expected label" " {1}".format(test_instance.label, label)) image_id = helpers.get_provider_test_data(self.provider, "image") self.assertEqual(test_instance.image_id, image_id, "Image id {0} is not equal to the expected id" " {1}".format(test_instance.image_id, image_id)) self.assertIsInstance(test_instance.zone_id, six.string_types) self.assertEqual( test_instance.image_id, helpers.get_provider_test_data(self.provider, "image")) self.assertIsInstance(test_instance.public_ips, list) if test_instance.public_ips: self.assertTrue( test_instance.public_ips[0], "public ip should contain a" " valid value if a list of public_ips exist") self.assertIsInstance(test_instance.private_ips, list) self.assertTrue(test_instance.private_ips[0], "private ip should" " contain a valid value") self.assertEqual( test_instance.key_pair_id, kp.id) self.assertIsInstance(test_instance.vm_firewalls, list) self.assertEqual( test_instance.vm_firewalls[0], fw) self.assertIsInstance(test_instance.vm_firewall_ids, list) self.assertEqual( test_instance.vm_firewall_ids[0], fw.id) # Must have either a public or a private ip ip_private = test_instance.private_ips[0] \ if test_instance.private_ips else None ip_address = test_instance.public_ips[0] \ if test_instance.public_ips and test_instance.public_ips[0] \ else ip_private # Convert to unicode for py27 compatibility with ipaddress() ip_address = u"{}".format(ip_address) self.assertIsNotNone( ip_address, "Instance must have either a public IP or a private IP") self.assertTrue( self._is_valid_ip(ip_address), "Instance must have a valid IP address. Got: %s" % ip_address) self.assertIsInstance(test_instance.vm_type_id, six.string_types) vm_type = self.provider.compute.vm_types.get( test_instance.vm_type_id) self.assertEqual( vm_type, test_instance.vm_type, "VM type {0} does not match expected type {1}".format( vm_type.name, test_instance.vm_type)) self.assertIsInstance(vm_type, VMType) expected_type = helpers.get_provider_test_data(self.provider, 'vm_type') self.assertEqual( vm_type.name, expected_type, "VM type {0} does not match expected type {1}".format( vm_type.name, expected_type)) find_zone = [zone for zone in self.provider.compute.regions.current.zones if zone.id == test_instance.zone_id] self.assertEqual(len(find_zone), 1, "Instance's placement zone could not be " " found in zones list")
def test_block_device_mapping_attachments(self): label = "cb-blkattch-{0}".format(helpers.get_uuid()) if self.provider.PROVIDER_ID == ProviderList.OPENSTACK: raise self.skipTest("Not running BDM tests because OpenStack is" " not stable enough yet") test_vol = self.provider.storage.volumes.create( label, 1) with cb_helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() test_snap = test_vol.create_snapshot(label=label, description=label) def cleanup_snap(snap): if snap: snap.delete() snap.wait_for([SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) with cb_helpers.cleanup_action(lambda: cleanup_snap(test_snap)): test_snap.wait_till_ready() lc = self.provider.compute.instances.create_launch_config() # Add a new blank volume lc.add_volume_device(size=1, delete_on_terminate=True) # Attach an existing volume lc.add_volume_device(size=1, source=test_vol, delete_on_terminate=True) # Add a new volume based on a snapshot lc.add_volume_device(size=1, source=test_snap, delete_on_terminate=True) # Override root volume size image_id = helpers.get_provider_test_data( self.provider, "image") img = self.provider.compute.images.get(image_id) # The size should be greater then the ami size # and therefore, img.min_disk is used. lc.add_volume_device( is_root=True, source=img, size=img.min_disk if img and img.min_disk else 30, delete_on_terminate=True) # Add all available ephemeral devices vm_type_name = helpers.get_provider_test_data( self.provider, "vm_type") vm_type = self.provider.compute.vm_types.find( name=vm_type_name)[0] # Some providers, e.g. GCP, has a limit on total number of # attached disks; it does not matter how many of them are # ephemeral or persistent. So, wee keep in mind that we have # attached 4 disks already, and add ephemeral disks accordingly # to not exceed the limit. for _ in range(vm_type.num_ephemeral_disks - 4): lc.add_ephemeral_device() subnet = helpers.get_or_create_default_subnet( self.provider) inst = None with cb_helpers.cleanup_action( lambda: helpers.delete_instance(inst)): inst = helpers.create_test_instance( self.provider, label, subnet=subnet, launch_config=lc) try: inst.wait_till_ready() except WaitStateException as e: self.fail("The block device mapped launch did not " " complete successfully: %s" % e)
def test_default_network(self): subnet = self.provider.networking.subnets.get_or_create_default( zone=get_provider_test_data(self.provider, 'placement')) self.assertIsInstance(subnet, Subnet)
def test_crud_snapshot(self): """ Create a new volume, create a snapshot of the volume, and check whether list_snapshots properly detects the new snapshot. Delete everything afterwards. """ name = "CBUnitTestCreateSnap-{0}".format(uuid.uuid4()) test_vol = self.provider.block_store.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() snap_name = "CBSnapshot-{0}".format(name) test_snap = test_vol.create_snapshot(name=snap_name, description=snap_name) def cleanup_snap(snap): snap.delete() snap.wait_for([SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) with helpers.cleanup_action(lambda: cleanup_snap(test_snap)): test_snap.wait_till_ready() self.assertTrue( test_snap.id in repr(test_snap), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not.") snaps = self.provider.block_store.snapshots.list() list_snaps = [snap for snap in snaps if snap.name == snap_name] self.assertTrue( len(list_snaps) == 1, "List snapshots does not return the expected volume %s" % name) # check iteration iter_snaps = [ snap for snap in self.provider.block_store.snapshots if snap.name == snap_name ] self.assertTrue( len(iter_snaps) == 1, "Iter snapshots does not return the expected volume %s" % name) # check find find_snap = self.provider.block_store.snapshots.find( name=snap_name) self.assertTrue( len(find_snap) == 1, "Find snaps does not return the expected snapshot %s" % name) # check non-existent find # TODO: Moto has a bug with filters causing the following test # to fail. Need to add tag based filtering support for snaps # find_snap = self.provider.block_store.snapshots.find( # name="non_existent_snap") # self.assertTrue( # len(find_snap) == 0, # "Find() for a non-existent snap returned %s" % # find_snap) get_snap = self.provider.block_store.snapshots.get( test_snap.id) self.assertTrue( list_snaps[0] == get_snap == test_snap, "Ids returned by list: {0} and get: {1} are not as " " expected: {2}".format(list_snaps[0].id, get_snap.id, test_snap.id)) self.assertTrue( list_snaps[0].name == get_snap.name == test_snap.name, "Names returned by list: {0} and get: {1} are not as " " expected: {2}".format(list_snaps[0].name, get_snap.name, test_snap.name)) # Test volume creation from a snapshot (via VolumeService) sv_name = "CBUnitTestSnapVol-{0}".format(name) snap_vol = self.provider.block_store.volumes.create( sv_name, 1, helpers.get_provider_test_data(self.provider, "placement"), snapshot=test_snap) with helpers.cleanup_action(lambda: snap_vol.delete()): snap_vol.wait_till_ready() # Test volume creation from a snapshot (via Snapshot) snap_vol2 = test_snap.create_volume( helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: snap_vol2.delete()): snap_vol2.wait_till_ready() snaps = self.provider.block_store.snapshots.list() found_snaps = [snap for snap in snaps if snap.name == snap_name] self.assertTrue( len(found_snaps) == 0, "Snapshot %s should have been deleted but still exists." % snap_name) # Test creation of a snap via SnapshotService snap_too_name = "CBSnapToo-{0}".format(name) time.sleep(15) # Or get SnapshotCreationPerVolumeRateExceeded test_snap_too = self.provider.block_store.snapshots.create( name=snap_too_name, volume=test_vol, description=snap_too_name) with helpers.cleanup_action(lambda: cleanup_snap(test_snap_too)): test_snap_too.wait_till_ready() self.assertTrue( test_snap_too.id in repr(test_snap_too), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not.")
def test_crud_volume(self): """ Create a new volume, check whether the expected values are set, and delete it """ name = "CBUnitTestCreateVol-{0}".format(uuid.uuid4()) test_vol = self.provider.block_store.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) def cleanup_vol(vol): vol.delete() vol.wait_for([VolumeState.DELETED, VolumeState.UNKNOWN], terminal_states=[VolumeState.ERROR]) with helpers.cleanup_action(lambda: cleanup_vol(test_vol)): test_vol.wait_till_ready() self.assertTrue( test_vol.id in repr(test_vol), "repr(obj) should contain the object id so that the object" " can be reconstructed, but does not. eval(repr(obj)) == obj") volumes = self.provider.block_store.volumes.list() list_volumes = [vol for vol in volumes if vol.name == name] self.assertTrue( len(list_volumes) == 1, "List volumes does not return the expected volume %s" % name) # check iteration iter_volumes = [ vol for vol in self.provider.block_store.volumes if vol.name == name ] self.assertTrue( len(iter_volumes) == 1, "Iter volumes does not return the expected volume %s" % name) # check find find_vols = self.provider.block_store.volumes.find(name=name) self.assertTrue( len(find_vols) == 1, "Find volumes does not return the expected volume %s" % name) # check non-existent find # TODO: Moto has a bug with filters causing the following test # to fail. Need to add tag based filtering support for volumes # find_vols = self.provider.block_store.volumes.find( # name="non_existent_vol") # self.assertTrue( # len(find_vols) == 0, # "Find() for a non-existent volume returned %s" % find_vols) get_vol = self.provider.block_store.volumes.get(test_vol.id) self.assertTrue( list_volumes[0] == get_vol == test_vol, "Ids returned by list: {0} and get: {1} are not as " " expected: {2}".format(list_volumes[0].id, get_vol.id, test_vol.id)) self.assertTrue( list_volumes[0].name == get_vol.name == test_vol.name, "Names returned by list: {0} and get: {1} are not as " " expected: {2}".format(list_volumes[0].name, get_vol.name, test_vol.name)) volumes = self.provider.block_store.volumes.list() found_volumes = [vol for vol in volumes if vol.name == name] self.assertTrue( len(found_volumes) == 0, "Volume %s should have been deleted but still exists." % name)
def test_instance_methods(self): label = "cb-instmethods-{0}".format(helpers.get_uuid()) # Declare these variables and late binding will allow # the cleanup method access to the most current values net = None test_inst = None fw = None with helpers.cleanup_action(lambda: helpers.cleanup_test_resources( instance=test_inst, vm_firewall=fw, network=net)): net = self.provider.networking.networks.create( label=label, cidr_block='10.0.0.0/16') cidr = '10.0.1.0/24' subnet = net.create_subnet(label=label, cidr_block=cidr, zone=helpers.get_provider_test_data( self.provider, 'placement')) test_inst = helpers.get_test_instance(self.provider, label, subnet=subnet) fw = self.provider.security.vm_firewalls.create( label=label, description=label, network_id=net.id) # Check adding a VM firewall to a running instance test_inst.add_vm_firewall(fw) test_inst.refresh() self.assertTrue( fw in test_inst.vm_firewalls, "Expected VM firewall '%s'" " to be among instance vm_firewalls: [%s]" % (fw, test_inst.vm_firewalls)) # Check removing a VM firewall from a running instance test_inst.remove_vm_firewall(fw) test_inst.refresh() self.assertTrue( fw not in test_inst.vm_firewalls, "Expected VM firewall" " '%s' to be removed from instance vm_firewalls: [%s]" % (fw, test_inst.vm_firewalls)) # check floating ips router = self.provider.networking.routers.create(label, net) gateway = net.gateways.get_or_create_inet_gateway() def cleanup_router(router, gateway): with helpers.cleanup_action(lambda: router.delete()): with helpers.cleanup_action(lambda: gateway.delete()): router.detach_subnet(subnet) router.detach_gateway(gateway) with helpers.cleanup_action(lambda: cleanup_router(router, gateway)): router.attach_subnet(subnet) router.attach_gateway(gateway) # check whether adding an elastic ip works fip = gateway.floating_ips.create() self.assertFalse( fip.in_use, "Newly created floating IP address should not be in use.") with helpers.cleanup_action(lambda: fip.delete()): with helpers.cleanup_action( lambda: test_inst.remove_floating_ip(fip)): test_inst.add_floating_ip(fip) test_inst.refresh() # On Devstack, FloatingIP is listed under private_ips. self.assertIn(fip.public_ip, test_inst.public_ips + test_inst.private_ips) fip.refresh() self.assertTrue( fip.in_use, "Attached floating IP address should be in use.") test_inst.refresh() test_inst.reboot() test_inst.wait_till_ready() self.assertNotIn( fip.public_ip, test_inst.public_ips + test_inst.private_ips)
def test_block_device_mapping_attachments(self): name = "cb_blkattch-{0}".format(helpers.get_uuid()) if self.provider.PROVIDER_ID == ProviderList.OPENSTACK: raise self.skipTest("Not running BDM tests because OpenStack is" " not stable enough yet") test_vol = self.provider.block_store.volumes.create( name, 1, helpers.get_provider_test_data(self.provider, "placement")) with helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() test_snap = test_vol.create_snapshot(name=name, description=name) def cleanup_snap(snap): snap.delete() snap.wait_for([SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) with helpers.cleanup_action(lambda: cleanup_snap(test_snap)): test_snap.wait_till_ready() lc = self.provider.compute.instances.create_launch_config() # Add a new blank volume lc.add_volume_device(size=1, delete_on_terminate=True) # Attach an existing volume lc.add_volume_device(size=1, source=test_vol, delete_on_terminate=True) # Add a new volume based on a snapshot lc.add_volume_device(size=1, source=test_snap, delete_on_terminate=True) # Override root volume size image_id = helpers.get_provider_test_data( self.provider, "image") img = self.provider.compute.images.get(image_id) # The size should be greater then the ami size # and therefore, img.min_disk is used. lc.add_volume_device( is_root=True, source=img, size=img.min_disk if img and img.min_disk else 2, delete_on_terminate=True) # Add all available ephemeral devices instance_type_name = helpers.get_provider_test_data( self.provider, "instance_type") inst_type = self.provider.compute.instance_types.find( name=instance_type_name)[0] for _ in range(inst_type.num_ephemeral_disks): lc.add_ephemeral_device() net, subnet = helpers.create_test_network(self.provider, name) with helpers.cleanup_action( lambda: helpers.delete_test_network(net)): inst = helpers.create_test_instance(self.provider, name, subnet=subnet, launch_config=lc) with helpers.cleanup_action( lambda: helpers.delete_test_instance(inst)): try: inst.wait_till_ready() except WaitStateException as e: self.fail("The block device mapped launch did not " " complete successfully: %s" % e)