def test_cinder(self, cinder): # test normal flow context = mock.NonCallableMock(session=mock.sentinel.session) res = clients.cinder(context) self.assertEqual(cinder.return_value, res) cinder.assert_called_with('2', service_type='volumev2', session=mock.sentinel.session)
def create_snapshot(context, volume_id, description=None): volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) # NOTE(ft): Easy fix to allow snapshot creation in statuses other than # AVAILABLE without cinder modifications. Potential race condition # though. Seems arguably non-fatal. if os_volume.status not in [ 'available', 'in-use', 'attaching', 'detaching' ]: msg = (_("'%s' is not in a state where snapshots are allowed.") % volume_id) raise exception.IncorrectState(reason=msg) with common.OnCrashCleaner() as cleaner: os_snapshot = cinder.volume_snapshots.create( os_volume.id, force=True, display_description=description) cleaner.addCleanup(os_snapshot.delete) snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id}) cleaner.addCleanup(db_api.delete_item, context, snapshot['id']) os_snapshot.update(display_name=snapshot['id']) return _format_snapshot(context, snapshot, os_snapshot, volume_id=volume_id)
def create_volume(context, availability_zone=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=None, kms_key_id=None): if snapshot_id is not None: snapshot = ec2utils.get_db_item(context, snapshot_id) os_snapshot_id = snapshot['os_id'] else: os_snapshot_id = None cinder = clients.cinder(context) with common.OnCrashCleaner() as cleaner: os_volume = cinder.volumes.create(size, snapshot_id=os_snapshot_id, volume_type=volume_type, availability_zone=availability_zone) cleaner.addCleanup(os_volume.delete) #volume = db_api.add_item(context, 'vol', {'os_id': os_volume.id}) #cleaner.addCleanup(db_api.delete_item, context, os_volume.id) #os_volume.update(display_name=volume['id']) return _format_volume(context, os_volume, snapshot_id=snapshot_id)
def detach_volume(context, volume_id, instance_id=None, device=None, force=None): #volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume_id) os_instance_id = next(iter(os_volume.attachments), {}).get('server_id') if not os_instance_id: # TODO(ft): Change the message with the real AWS message reason = _('Volume %(vol_id)s is not attached to anything') raise exception.IncorrectState(reason=reason % {'vol_id': volume_id}) nova = clients.nova(context) nova.volumes.delete_server_volume(os_instance_id, os_volume.id) os_volume.get() instance_id = next((i['id'] for i in db_api.get_items(context, 'i') if i['os_id'] == os_instance_id), None) # [varun]: Sending delete on termination as false (last param below) # when volume is detached delete on termination flag does not make sense # therefore sending false to make consistent with AWS return _format_attachment(context, os_volume, instance_id=instance_id, delete_on_termination_flag=False)
def delete_snapshot(context, snapshot_id): snapshot = ec2utils.get_db_item(context, snapshot_id) cinder = clients.cinder(context) try: cinder.volume_snapshots.delete(snapshot['os_id']) except cinder_exception.NotFound: pass # NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud # It will be deleted by describer in the future return True
def test_cinder(self, cinder): context = mock.Mock( auth_token='fake_token', service_catalog=[{'type': 'volume', 'endpoints': [{'publicURL': 'cinder_url'}]}]) res = clients.cinder(context) self.assertEqual(cinder.return_value, res) cinder.assert_called_with( '1', auth_url='keystone_url', cacert=None, insecure=False, service_type='volume', username=None, api_key=None) self.assertEqual('fake_token', res.client.auth_token) self.assertEqual('cinder_url', res.client.management_url)
def delete_volume(context, volume_id): #volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) try: cinder.volumes.delete(volume_id) except cinder_exception.BadRequest: # TODO(andrey-mp): raise correct errors for different cases raise exception.UnsupportedOperation() except cinder_exception.NotFound: pass # NOTE(andrey-mp) Don't delete item from DB until it disappears from Cloud # It will be deleted by describer in the future return True
def attach_volume(context, volume_id, instance_id, device): #volume = ec2utils.get_db_item(context, volume_id) ec2utils.validate_device_name(str(device).lower()) instance = ec2utils.get_db_item(context, instance_id) nova = clients.nova(context) try: nova.volumes.create_server_volume(instance['os_id'], volume_id, device) except (nova_exception.Conflict, nova_exception.BadRequest): # TODO(andrey-mp): raise correct errors for different cases raise exception.UnsupportedOperation() cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume_id) # [varun]: Sending delete on termination as false (last param below) # when volume is attached delete on termination flag will be false by # default therefore sending false to make consistent with AWS return _format_attachment(context, os_volume, instance_id=instance_id, delete_on_termination_flag=False)
def create_snapshot(context, volume_id, description=None): volume = ec2utils.get_db_item(context, volume_id) cinder = clients.cinder(context) os_volume = cinder.volumes.get(volume['os_id']) # NOTE(ft): Easy fix to allow snapshot creation in statuses other than # AVAILABLE without cinder modifications. Potential race condition # though. Seems arguably non-fatal. if os_volume.status not in ['available', 'in-use', 'attaching', 'detaching']: msg = (_("'%s' is not in a state where snapshots are allowed.") % volume_id) raise exception.IncorrectState(reason=msg) with common.OnCrashCleaner() as cleaner: os_snapshot = cinder.volume_snapshots.create( os_volume.id, force=True, display_description=description) cleaner.addCleanup(os_snapshot.delete) snapshot = db_api.add_item(context, 'snap', {'os_id': os_snapshot.id}) cleaner.addCleanup(db_api.delete_item, context, snapshot['id']) os_snapshot.update(display_name=snapshot['id']) return _format_snapshot(context, snapshot, os_snapshot, volume_id=volume_id)
def register_image(context, name=None, image_location=None, description=None, architecture=None, root_device_name=None, block_device_mapping=None, virtualization_type=None, kernel_id=None, ramdisk_id=None, sriov_net_support=None): if not image_location and not root_device_name: # NOTE(ft): for backward compatibility with a hypothetical code # which uses name as image_location image_location = name if not image_location and not root_device_name: msg = _("Either imageLocation or rootDeviceName must be set.") raise exception.InvalidParameterCombination(msg) if not image_location and not name: msg = _('The request must contain the parameter name') raise exception.MissingParameter(msg) # TODO(ft): check parameters properties = {} metadata = {'properties': properties} if name: # TODO(ft): check the name is unique (at least for EBS image case) metadata['name'] = name if image_location: properties['image_location'] = image_location if 'name' not in metadata: # NOTE(ft): it's needed for backward compatibility metadata['name'] = image_location if root_device_name: properties['root_device_name'] = root_device_name cinder = clients.cinder(context) if block_device_mapping: mappings = instance_api._parse_block_device_mapping( context, block_device_mapping) # TODO(ft): merge with image manifets's virtual device mappings short_root_device_name = ( ec2utils.block_device_strip_dev(root_device_name)) for bdm in mappings: instance_api._populate_parsed_bdm_parameter( bdm, short_root_device_name) if 'volume_size' in bdm: continue try: if bdm['source_type'] == 'snapshot': snapshot = cinder.volume_snapshots.get(bdm['snapshot_id']) bdm['volume_size'] = snapshot.size elif bdm['source_type'] == 'volume': volume = cinder.volumes.get(bdm['volume_id']) bdm['volume_size'] = volume.size except cinder_exception.NotFound: pass properties['bdm_v2'] = True properties['block_device_mapping'] = json.dumps(mappings) if architecture is not None: properties['architecture'] = architecture if kernel_id: properties['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id if ramdisk_id: properties['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id with common.OnCrashCleaner() as cleaner: if 'image_location' in properties: os_image = _s3_create(context, metadata) else: metadata.update({'size': 0, 'is_public': False}) # TODO(ft): set default values of image properties glance = clients.glance(context) os_image = glance.images.create(**metadata) cleaner.addCleanup(os_image.delete) kind = _get_os_image_kind(os_image) image = db_api.add_item(context, kind, {'os_id': os_image.id, 'is_public': False, 'description': description}) return {'imageId': image['id']}
def register_image(context, name=None, image_location=None, description=None, architecture=None, root_device_name=None, block_device_mapping=None, virtualization_type=None, kernel_id=None, ramdisk_id=None, sriov_net_support=None): if not image_location and not root_device_name: # NOTE(ft): for backward compatibility with a hypothetical code # which uses name as image_location image_location = name if not image_location and not root_device_name: msg = _("Either imageLocation or rootDeviceName must be set.") raise exception.InvalidParameterCombination(msg) if not image_location and not name: msg = _('The request must contain the parameter name') raise exception.MissingParameter(msg) # TODO(ft): check parameters properties = {} metadata = {'properties': properties} if name: # TODO(ft): check the name is unique (at least for EBS image case) metadata['name'] = name if image_location: properties['image_location'] = image_location if 'name' not in metadata: # NOTE(ft): it's needed for backward compatibility metadata['name'] = image_location if root_device_name: properties['root_device_name'] = root_device_name cinder = clients.cinder(context) if block_device_mapping: mappings = instance_api._parse_block_device_mapping( context, block_device_mapping) # TODO(ft): merge with image manifets's virtual device mappings short_root_device_name = ( ec2utils.block_device_strip_dev(root_device_name)) for bdm in mappings: instance_api._populate_parsed_bdm_parameter( bdm, short_root_device_name) if 'volume_size' in bdm: continue try: if bdm['source_type'] == 'snapshot': snapshot = cinder.volume_snapshots.get(bdm['snapshot_id']) bdm['volume_size'] = snapshot.size elif bdm['source_type'] == 'volume': volume = cinder.volumes.get(bdm['volume_id']) bdm['volume_size'] = volume.size except cinder_exception.NotFound: pass properties['bdm_v2'] = True properties['block_device_mapping'] = json.dumps(mappings) if architecture is not None: properties['architecture'] = architecture if kernel_id: properties['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id if ramdisk_id: properties['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id with common.OnCrashCleaner() as cleaner: if 'image_location' in properties: os_image = _s3_create(context, metadata) else: metadata.update({'size': 0, 'is_public': False}) # TODO(ft): set default values of image properties glance = clients.glance(context) os_image = glance.images.create(**metadata) cleaner.addCleanup(os_image.delete) kind = _get_os_image_kind(os_image) image = db_api.add_item(context, kind, { 'os_id': os_image.id, 'is_public': False, 'description': description }) return {'imageId': image['id']}
def get_os_items(self): return clients.cinder(self.context).volume_snapshots.list()