def test_block_device_mapping_launch_config(self): lc = self.provider.compute.instances.create_launch_config() # specifying an invalid size should raise # an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(size=-1) # Attempting to add a blank volume without specifying a size # should raise an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(source=None) # block_devices should be empty so far self.assertListEqual( lc.block_devices, [], "No block devices should have been" " added to mappings list since the configuration was invalid") # Add a new volume lc.add_volume_device(size=1, delete_on_terminate=True) # Override root volume size image_id = helpers.get_provider_test_data(self.provider, "image") img = self.provider.compute.images.get(image_id) # The size should be greater then the ami size # and therefore, img.min_disk is used. lc.add_volume_device( is_root=True, source=img, size=img.min_disk if img and img.min_disk else 30, delete_on_terminate=True) # Attempting to add more than one root volume should raise an # exception. with self.assertRaises(InvalidConfigurationException): lc.add_volume_device(size=1, is_root=True) # Attempting to add an incorrect source should raise an exception with self.assertRaises(InvalidConfigurationException): lc.add_volume_device( source="invalid_source", delete_on_terminate=True) # Add all available ephemeral devices vm_type_name = helpers.get_provider_test_data( self.provider, "vm_type") vm_type = self.provider.compute.vm_types.find( name=vm_type_name)[0] for _ in range(vm_type.num_ephemeral_disks): lc.add_ephemeral_device() # block_devices should be populated self.assertTrue( len(lc.block_devices) == 2 + vm_type.num_ephemeral_disks, "Expected %d total block devices bit found %d" % (2 + vm_type.num_ephemeral_disks, len(lc.block_devices)))
def create_instance_from_image(img): img_instance = None with cb_helpers.cleanup_action( lambda: helpers.cleanup_test_resources(img_instance)): img_instance = self.provider.compute.instances.create( img_inst_label, img, helpers.get_provider_test_data(self.provider, 'vm_type'), subnet=subnet) img_instance.wait_till_ready() self.assertIsInstance(img_instance, Instance) self.assertEqual( img_instance.label, img_inst_label, "Instance label {0} is not equal to the expected label" " {1}".format(img_instance.label, img_inst_label)) image_id = img.id self.assertEqual( img_instance.image_id, image_id, "Image id {0} is not equal to the expected id" " {1}".format(img_instance.image_id, image_id)) self.assertIsInstance(img_instance.public_ips, list) if img_instance.public_ips: self.assertTrue( img_instance.public_ips[0], "public ip should contain a" " valid value if a list of public_ips exist") self.assertIsInstance(img_instance.private_ips, list) self.assertTrue(img_instance.private_ips[0], "private ip should" " contain a valid value")
def test_vm_types_standard(self): # Searching for an instance by name should return an # VMType object and searching for a non-existent # object should return an empty iterator vm_type_name = helpers.get_provider_test_data(self.provider, "vm_type") vm_type = self.provider.compute.vm_types.find(name=vm_type_name)[0] sit.check_standard_behaviour(self, self.provider.compute.vm_types, vm_type)
def test_zones(self): zone_find_count = 0 test_zone = helpers.get_provider_test_data(self.provider, "placement") for region in self.provider.compute.regions: self.assertTrue(region.name) for zone in region.zones: self.assertTrue(zone.id) self.assertTrue(zone.name) self.assertTrue( zone.region_name is None or isinstance(zone.region_name, six.string_types)) if test_zone == zone.name: zone_find_count += 1 # zone info cannot be repeated between regions self.assertEqual(zone_find_count, 1)
def test_instance_properties(self): label = "cb-inst-props-{0}".format(helpers.get_uuid()) # Declare these variables and late binding will allow # the cleanup method access to the most current values test_instance = None fw = None kp = None with cb_helpers.cleanup_action( lambda: helpers.cleanup_test_resources(test_instance, fw, kp)): subnet = helpers.get_or_create_default_subnet(self.provider) net = subnet.network kp = self.provider.security.key_pairs.create(name=label) fw = self.provider.security.vm_firewalls.create(label=label, description=label, network=net.id) test_instance = helpers.get_test_instance(self.provider, label, key_pair=kp, vm_firewalls=[fw], subnet=subnet) self.assertEqual( test_instance.label, label, "Instance label {0} is not equal to the expected label" " {1}".format(test_instance.label, label)) image_id = helpers.get_provider_test_data(self.provider, "image") self.assertEqual( test_instance.image_id, image_id, "Image id {0} is not equal to the expected id" " {1}".format(test_instance.image_id, image_id)) self.assertIsInstance(test_instance.zone_id, six.string_types) self.assertEqual( test_instance.image_id, helpers.get_provider_test_data(self.provider, "image")) self.assertIsInstance(test_instance.public_ips, list) if test_instance.public_ips: self.assertTrue( test_instance.public_ips[0], "public ip should contain a" " valid value if a list of public_ips exist") self.assertIsInstance(test_instance.private_ips, list) self.assertTrue(test_instance.private_ips[0], "private ip should" " contain a valid value") self.assertEqual(test_instance.key_pair_id, kp.id) self.assertIsInstance(test_instance.vm_firewalls, list) self.assertEqual(test_instance.vm_firewalls[0], fw) self.assertIsInstance(test_instance.vm_firewall_ids, list) self.assertEqual(test_instance.vm_firewall_ids[0], fw.id) # Must have either a public or a private ip ip_private = test_instance.private_ips[0] \ if test_instance.private_ips else None ip_address = test_instance.public_ips[0] \ if test_instance.public_ips and test_instance.public_ips[0] \ else ip_private # Convert to unicode for py27 compatibility with ipaddress() ip_address = u"{}".format(ip_address) self.assertIsNotNone( ip_address, "Instance must have either a public IP or a private IP") self.assertTrue( self._is_valid_ip(ip_address), "Instance must have a valid IP address. Got: %s" % ip_address) self.assertIsInstance(test_instance.vm_type_id, six.string_types) vm_type = self.provider.compute.vm_types.get( test_instance.vm_type_id) self.assertEqual( vm_type, test_instance.vm_type, "VM type {0} does not match expected type {1}".format( vm_type.name, test_instance.vm_type)) self.assertIsInstance(vm_type, VMType) expected_type = helpers.get_provider_test_data( self.provider, 'vm_type') self.assertEqual( vm_type.name, expected_type, "VM type {0} does not match expected type {1}".format( vm_type.name, expected_type)) find_zone = [ zone for zone in self.provider.compute.regions.current.zones if zone.id == test_instance.zone_id ] self.assertEqual( len(find_zone), 1, "Instance's placement zone could not be " " found in zones list")
def test_block_device_mapping_attachments(self): label = "cb-blkattch-{0}".format(helpers.get_uuid()) if self.provider.PROVIDER_ID == ProviderList.OPENSTACK: raise self.skipTest("Not running BDM tests because OpenStack is" " not stable enough yet") test_vol = self.provider.storage.volumes.create(label, 1) with cb_helpers.cleanup_action(lambda: test_vol.delete()): test_vol.wait_till_ready() test_snap = test_vol.create_snapshot(label=label, description=label) def cleanup_snap(snap): if snap: snap.delete() snap.wait_for([SnapshotState.UNKNOWN], terminal_states=[SnapshotState.ERROR]) with cb_helpers.cleanup_action(lambda: cleanup_snap(test_snap)): test_snap.wait_till_ready() lc = self.provider.compute.instances.create_launch_config() # Add a new blank volume lc.add_volume_device(size=1, delete_on_terminate=True) # Attach an existing volume lc.add_volume_device(size=1, source=test_vol, delete_on_terminate=True) # Add a new volume based on a snapshot lc.add_volume_device(size=1, source=test_snap, delete_on_terminate=True) # Override root volume size image_id = helpers.get_provider_test_data( self.provider, "image") img = self.provider.compute.images.get(image_id) # The size should be greater then the ami size # and therefore, img.min_disk is used. lc.add_volume_device( is_root=True, source=img, size=img.min_disk if img and img.min_disk else 30, delete_on_terminate=True) # Add all available ephemeral devices vm_type_name = helpers.get_provider_test_data( self.provider, "vm_type") vm_type = self.provider.compute.vm_types.find( name=vm_type_name)[0] # Some providers, e.g. GCP, has a limit on total number of # attached disks; it does not matter how many of them are # ephemeral or persistent. So, wee keep in mind that we have # attached 4 disks already, and add ephemeral disks accordingly # to not exceed the limit. for _ in range(vm_type.num_ephemeral_disks - 4): lc.add_ephemeral_device() subnet = helpers.get_or_create_default_subnet(self.provider) inst = None with cb_helpers.cleanup_action( lambda: helpers.delete_instance(inst)): inst = helpers.create_test_instance(self.provider, label, subnet=subnet, launch_config=lc) try: inst.wait_till_ready() except WaitStateException as e: self.fail("The block device mapped launch did not " " complete successfully: %s" % e)