def test_run_with_snapshot(self): """Makes sure run/stop/start instance with snapshot works.""" availability_zone = "zone1:host1" vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) snap1 = self.cloud.create_snapshot( self.context, vol1["volumeId"], name="snap-1", description="test snap of vol %s" % vol1["volumeId"] ) snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1["snapshotId"]) snap2 = self.cloud.create_snapshot( self.context, vol1["volumeId"], name="snap-2", description="test snap of vol %s" % vol1["volumeId"] ) snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2["snapshotId"]) kwargs = { "image_id": "ami-1", "instance_type": FLAGS.default_instance_type, "max_count": 1, "block_device_mapping": [ {"device_name": "/dev/vdb", "snapshot_id": snap1_uuid, "delete_on_termination": False}, {"device_name": "/dev/vdc", "snapshot_id": snap2_uuid, "delete_on_termination": True}, ], } ec2_instance_id = self._run_instance(**kwargs) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v["instance_uuid"] == instance_uuid] self.assertEqual(len(vols), 2) vol1_id = None vol2_id = None for vol in vols: snapshot_uuid = vol["snapshot_id"] if snapshot_uuid == snap1_uuid: vol1_id = vol["id"] mountpoint = "/dev/vdb" elif snapshot_uuid == snap2_uuid: vol2_id = vol["id"] mountpoint = "/dev/vdc" else: self.fail() self._assert_volume_attached(vol, instance_uuid, mountpoint) # Just make sure we found them self.assertTrue(vol1_id) self.assertTrue(vol2_id) self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") vol = self.volume_api.get(admin_ctxt, vol1_id) self._assert_volume_detached(vol) self.assertFalse(vol["deleted"])
def test_run_with_snapshot(self): """Makes sure run/stop/start instance with snapshot works.""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) snap1 = self.cloud.create_snapshot(self.context, vol1['volumeId'], name='snap-1', description='test snap of vol %s' % vol1['volumeId']) snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId']) snap2 = self.cloud.create_snapshot(self.context, vol1['volumeId'], name='snap-2', description='test snap of vol %s' % vol1['volumeId']) snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId']) kwargs = {'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'snapshot_id': snap1_uuid, 'delete_on_termination': False, }, {'device_name': '/dev/vdc', 'snapshot_id': snap2_uuid, 'delete_on_termination': True}]} ec2_instance_id = self._run_instance(**kwargs) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 2) vol1_id = None vol2_id = None for vol in vols: snapshot_uuid = vol['snapshot_id'] if snapshot_uuid == snap1_uuid: vol1_id = vol['id'] mountpoint = '/dev/vdb' elif snapshot_uuid == snap2_uuid: vol2_id = vol['id'] mountpoint = '/dev/vdc' else: self.fail() self._assert_volume_attached(vol, instance_uuid, mountpoint) #Just make sure we found them self.assertTrue(vol1_id) self.assertTrue(vol2_id) self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") vol = self.volume_api.get(admin_ctxt, vol1_id) self._assert_volume_detached(vol) self.assertFalse(vol['deleted'])
def test_stop_with_attached_volume(self): """Make sure attach info is reflected to block device mapping""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId']) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId']) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = {'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'volume_id': vol1_uuid, 'delete_on_termination': True}]} ec2_instance_id = self._run_instance(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) for vol in vols: self.assertEqual(vol['id'], vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_detached(vol) instance = db.instance_get(self.context, instance_id) self.cloud.compute_api.attach_volume(self.context, instance, volume_id=vol2_uuid, device='/dev/vdc') vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol1, instance_uuid, '/dev/vdb') vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.compute_api.detach_volume(self.context, volume_id=vol1_uuid) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) self._assert_volume_detached(vol1) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) self.cloud.terminate_instances(self.context, [ec2_instance_id])
def test_stop_start_with_volume(self): """Make sure run instance with block device mapping works""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId']) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId']) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = {'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'volume_id': vol1_uuid, 'delete_on_termination': False}, {'device_name': '/dev/vdc', 'volume_id': vol2_uuid, 'delete_on_termination': True}, ]} ec2_instance_id = self._run_instance(**kwargs) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 2) for vol in vols: self.assertTrue(str(vol['id']) == str(vol1_uuid) or str(vol['id']) == str(vol2_uuid)) if(str(vol['id']) == str(vol1_uuid)): self.volume_api.attach(self.context, vol, instance_uuid, '/dev/vdb') elif(str(vol['id']) == str(vol2_uuid)): self.volume_api.attach(self.context, vol, instance_uuid, '/dev/vdc') vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdc') result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 2) for vol in vols: self.assertTrue(str(vol['id']) == str(vol1_uuid) or str(vol['id']) == str(vol2_uuid)) self.assertTrue(vol['mountpoint'] == '/dev/vdb' or vol['mountpoint'] == '/dev/vdc') self.assertEqual(vol['instance_uuid'], instance_uuid) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") #Here we puke... self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") vol = self.volume_api.get(admin_ctxt, vol2_uuid) self.assertFalse(vol['deleted']) self.cloud.delete_volume(self.context, vol1['volumeId']) self._restart_compute_service()
def test_run_with_snapshot(self): """Makes sure run/stop/start instance with snapshot works.""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) snap1 = self.cloud.create_snapshot(self.context, vol1['volumeId'], name='snap-1', description='test snap of vol %s' % vol1['volumeId']) snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId']) snap2 = self.cloud.create_snapshot(self.context, vol1['volumeId'], name='snap-2', description='test snap of vol %s' % vol1['volumeId']) snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId']) kwargs = { 'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [{ 'device_name': '/dev/vdb', 'snapshot_id': snap1_uuid, 'delete_on_termination': False, }, { 'device_name': '/dev/vdc', 'snapshot_id': snap2_uuid, 'delete_on_termination': True }] } ec2_instance_id = self._run_instance(**kwargs) instance_uuid = ec2utils.ec2_instance_id_to_uuid( self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 2) vol1_id = None vol2_id = None for vol in vols: snapshot_uuid = vol['snapshot_id'] if snapshot_uuid == snap1_uuid: vol1_id = vol['id'] mountpoint = '/dev/vdb' elif snapshot_uuid == snap2_uuid: vol2_id = vol['id'] mountpoint = '/dev/vdc' else: self.fail() self._assert_volume_attached(vol, instance_uuid, mountpoint) #Just make sure we found them self.assertTrue(vol1_id) self.assertTrue(vol2_id) self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") vol = self.volume_api.get(admin_ctxt, vol1_id) self._assert_volume_detached(vol) self.assertFalse(vol['deleted'])
def test_stop_with_attached_volume(self): """Make sure attach info is reflected to block device mapping""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId']) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId']) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = { 'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [{ 'device_name': '/dev/vdb', 'volume_id': vol1_uuid, 'delete_on_termination': True }] } ec2_instance_id = self._run_instance(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance_uuid = ec2utils.ec2_instance_id_to_uuid( self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) for vol in vols: self.assertEqual(vol['id'], vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_detached(vol) instance = db.instance_get(self.context, instance_id) self.cloud.compute_api.attach_volume(self.context, instance, volume_id=vol2_uuid, device='/dev/vdc') vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol1, instance_uuid, '/dev/vdb') vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.compute_api.detach_volume(self.context, volume_id=vol1_uuid) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 1) self._assert_volume_detached(vol1) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) self.cloud.terminate_instances(self.context, [ec2_instance_id])
def test_stop_start_with_volume(self): """Make sure run instance with block device mapping works""" availability_zone = 'zone1:host1' vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId']) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId']) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = { 'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, 'max_count': 1, 'block_device_mapping': [ { 'device_name': '/dev/vdb', 'volume_id': vol1_uuid, 'delete_on_termination': False }, { 'device_name': '/dev/vdc', 'volume_id': vol2_uuid, 'delete_on_termination': True }, ] } ec2_instance_id = self._run_instance(**kwargs) instance_uuid = ec2utils.ec2_instance_id_to_uuid( self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 2) for vol in vols: self.assertTrue( str(vol['id']) == str(vol1_uuid) or str(vol['id']) == str(vol2_uuid)) if (str(vol['id']) == str(vol1_uuid)): self.volume_api.attach(self.context, vol, instance_uuid, '/dev/vdb') elif (str(vol['id']) == str(vol2_uuid)): self.volume_api.attach(self.context, vol, instance_uuid, '/dev/vdc') vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdc') result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdb') vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol, instance_uuid, '/dev/vdc') self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v['instance_uuid'] == instance_uuid] self.assertEqual(len(vols), 2) for vol in vols: self.assertTrue( str(vol['id']) == str(vol1_uuid) or str(vol['id']) == str(vol2_uuid)) self.assertTrue(vol['mountpoint'] == '/dev/vdb' or vol['mountpoint'] == '/dev/vdc') self.assertEqual(vol['instance_uuid'], instance_uuid) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") #Here we puke... self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") vol = self.volume_api.get(admin_ctxt, vol2_uuid) self.assertFalse(vol['deleted']) self.cloud.delete_volume(self.context, vol1['volumeId']) self._restart_compute_service()
def test_stop_with_attached_volume(self): """Make sure attach info is reflected to block device mapping""" availability_zone = "zone1:host1" vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1["volumeId"]) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2["volumeId"]) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = { "image_id": "ami-1", "instance_type": FLAGS.default_instance_type, "max_count": 1, "block_device_mapping": [ {"device_name": "/dev/vdb", "volume_id": vol1_uuid, "delete_on_termination": True} ], } ec2_instance_id = self._run_instance(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v["instance_uuid"] == instance_uuid] self.assertEqual(len(vols), 1) for vol in vols: self.assertEqual(vol["id"], vol1_uuid) self._assert_volume_attached(vol, instance_uuid, "/dev/vdb") vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_detached(vol) instance = db.instance_get(self.context, instance_id) self.cloud.compute_api.attach_volume(self.context, instance, volume_id=vol2_uuid, device="/dev/vdc") vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol1, instance_uuid, "/dev/vdb") vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, "/dev/vdc") self.cloud.compute_api.detach_volume(self.context, volume_id=vol1_uuid) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol2 = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol2, instance_uuid, "/dev/vdc") self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v["instance_uuid"] == instance_uuid] self.assertEqual(len(vols), 1) self._assert_volume_detached(vol1) vol1 = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_detached(vol1) self.cloud.terminate_instances(self.context, [ec2_instance_id])
def test_stop_start_with_volume(self): """Make sure run instance with block device mapping works""" availability_zone = "zone1:host1" vol1 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol2 = self.cloud.create_volume(self.context, size=1, availability_zone=availability_zone) vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1["volumeId"]) vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2["volumeId"]) # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) kwargs = { "image_id": "ami-1", "instance_type": FLAGS.default_instance_type, "max_count": 1, "block_device_mapping": [ {"device_name": "/dev/vdb", "volume_id": vol1_uuid, "delete_on_termination": False}, {"device_name": "/dev/vdc", "volume_id": vol2_uuid, "delete_on_termination": True}, ], } ec2_instance_id = self._run_instance(**kwargs) instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context, ec2_instance_id) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v["instance_uuid"] == instance_uuid] self.assertEqual(len(vols), 2) for vol in vols: self.assertTrue(str(vol["id"]) == str(vol1_uuid) or str(vol["id"]) == str(vol2_uuid)) if str(vol["id"]) == str(vol1_uuid): self.volume_api.attach(self.context, vol, instance_uuid, "/dev/vdb") elif str(vol["id"]) == str(vol2_uuid): self.volume_api.attach(self.context, vol, instance_uuid, "/dev/vdc") vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, "/dev/vdb") vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol, instance_uuid, "/dev/vdc") result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, "/dev/vdb") vol = self.volume_api.get(self.context, vol1_uuid) self._assert_volume_attached(vol, instance_uuid, "/dev/vdb") vol = self.volume_api.get(self.context, vol2_uuid) self._assert_volume_attached(vol, instance_uuid, "/dev/vdc") self.cloud.start_instances(self.context, [ec2_instance_id]) vols = self.volume_api.get_all(self.context) vols = [v for v in vols if v["instance_uuid"] == instance_uuid] self.assertEqual(len(vols), 2) for vol in vols: self.assertTrue(str(vol["id"]) == str(vol1_uuid) or str(vol["id"]) == str(vol2_uuid)) self.assertTrue(vol["mountpoint"] == "/dev/vdb" or vol["mountpoint"] == "/dev/vdc") self.assertEqual(vol["instance_uuid"], instance_uuid) self.assertEqual(vol["status"], "in-use") self.assertEqual(vol["attach_status"], "attached") # Here we puke... self.cloud.terminate_instances(self.context, [ec2_instance_id]) admin_ctxt = context.get_admin_context(read_deleted="no") vol = self.volume_api.get(admin_ctxt, vol2_uuid) self.assertFalse(vol["deleted"]) self.cloud.delete_volume(self.context, vol1["volumeId"]) self._restart_compute_service()