def _build_block_device_mappings(context, ec2_instance, os_instance_id): mappings = {'root': ec2_instance.get('rootDeviceName', ''), 'ami': ec2utils.block_device_strip_dev( ec2_instance.get('rootDeviceName', ''))} if 'blockDeviceMapping' in ec2_instance: # NOTE(yamahata): I'm not sure how ebs device should be numbered. # Right now sort by device name for deterministic # result. ebs_devices = [ebs['deviceName'] for ebs in ec2_instance['blockDeviceMapping']] ebs_devices.sort() ebs_devices = {'ebs%d' % num: ebs for num, ebs in enumerate(ebs_devices)} mappings.update(ebs_devices) # TODO(ft): extend Nova API to get ephemerals and swap return mappings
def _format_image(context, image, os_image, images_dict, ids_dict, snapshot_ids=None): ec2_image = { 'imageId': image['id'], 'imageOwnerId': os_image.owner, 'imageType': IMAGE_TYPES[ec2utils.get_ec2_id_kind(image['id'])], 'isPublic': os_image.is_public, 'architecture': os_image.properties.get('architecture'), 'creationDate': os_image.created_at } if 'description' in image: ec2_image['description'] = image['description'] if 'state' in image: state = image['state'] else: state = GLANCE_STATUS_TO_EC2.get(os_image.status, 'error') if state in ('available', 'pending'): state = _s3_image_state_map.get(os_image.properties.get('image_state'), state) ec2_image['imageState'] = state kernel_id = os_image.properties.get('kernel_id') if kernel_id: ec2_image['kernelId'] = ec2utils.os_id_to_ec2_id( context, 'aki', kernel_id, items_by_os_id=images_dict, ids_by_os_id=ids_dict) ramdisk_id = os_image.properties.get('ramdisk_id') if ramdisk_id: ec2_image['ramdiskId'] = ec2utils.os_id_to_ec2_id( context, 'ari', ramdisk_id, items_by_os_id=images_dict, ids_by_os_id=ids_dict) name = os_image.name img_loc = os_image.properties.get('image_location') if img_loc: ec2_image['imageLocation'] = img_loc else: ec2_image['imageLocation'] = "%s (%s)" % (img_loc, name) if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name ec2_image['name'] = img_loc else: ec2_image['name'] = name properties = ec2utils.deserialize_os_image_properties(os_image) root_device_name = ( ec2utils.block_device_properties_root_device_name(properties)) mappings = _format_mappings(context, properties, root_device_name, snapshot_ids, os_image.owner) if mappings: ec2_image['blockDeviceMapping'] = mappings root_device_type = 'instance-store' if root_device_name: ec2_image['rootDeviceName'] = root_device_name short_root_device_name = ec2utils.block_device_strip_dev( root_device_name) if any((short_root_device_name == ec2utils.block_device_strip_dev( bdm.get('deviceName'))) for bdm in mappings): root_device_type = 'ebs' ec2_image['rootDeviceType'] = root_device_type return ec2_image
def register_image(context, name=None, image_location=None, description=None, architecture=None, root_device_name=None, block_device_mapping=None, virtualization_type=None, kernel_id=None, ramdisk_id=None, sriov_net_support=None): # Setup default flags is_s3_import = False is_url_import = False # Process the input arguments if not image_location and not root_device_name: # NOTE(ft): for backward compatibility with a hypothetical code # which uses name as image_location image_location = name if not image_location and not root_device_name: msg = _("Either imageLocation or rootDeviceName must be set.") raise exception.InvalidParameterCombination(msg) if not image_location and not name: msg = _('The request must contain the parameter name') raise exception.MissingParameter(msg) # TODO(ft): check parameters metadata = {} if name: # TODO(ft): check the name is unique (at least for EBS image case) metadata['name'] = name if image_location: # Resolve the import type metadata['image_location'] = image_location parsed_url = six.moves.urllib.parse.urlparse(image_location) is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3') is_url_import = not is_s3_import # Check if the name is in the metadata if 'name' not in metadata: # NOTE(ft): it's needed for backward compatibility metadata['name'] = image_location if root_device_name: metadata['root_device_name'] = root_device_name cinder = clients.cinder(context) if block_device_mapping: mappings = instance_api._parse_block_device_mapping( context, block_device_mapping) # TODO(ft): merge with image manifets's virtual device mappings short_root_device_name = ( ec2utils.block_device_strip_dev(root_device_name)) for bdm in mappings: instance_api._populate_parsed_bdm_parameter( bdm, short_root_device_name) if 'volume_size' in bdm: continue try: if bdm['source_type'] == 'snapshot': snapshot = cinder.volume_snapshots.get(bdm['snapshot_id']) bdm['volume_size'] = snapshot.size elif bdm['source_type'] == 'volume': volume = cinder.volumes.get(bdm['volume_id']) bdm['volume_size'] = volume.size except cinder_exception.NotFound: pass metadata['bdm_v2'] = 'True' metadata['block_device_mapping'] = json.dumps(mappings) if architecture is not None: metadata['architecture'] = architecture if kernel_id: metadata['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id if ramdisk_id: metadata['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id # Begin the import/registration process with common.OnCrashCleaner() as cleaner: # Setup the glance client glance = clients.glance(context) # Check if this is an S3 import if is_s3_import: os_image = _s3_create(context, metadata) # Condition for all non-S3 imports else: # Create the image in glance metadata.update({ 'visibility': 'private', 'container_format': 'bare', 'disk_format': 'raw' }) os_image = glance.images.create(**metadata) # Kick-off the URL image import if from URL if is_url_import: glance.images.image_import(os_image.id, method='web-download', uri=metadata['image_location']) # Otherwise, use the default method else: glance.images.upload(os_image.id, '', image_size=0) # Add cleanups and complete the registration process cleaner.addCleanup(glance.images.delete, os_image.id) kind = _get_os_image_kind(os_image) image = db_api.add_item(context, kind, { 'os_id': os_image.id, 'is_public': False, 'description': description }) # Return the image ID for the registration process return {'imageId': image['id']}
def register_image(context, name=None, image_location=None, description=None, architecture=None, root_device_name=None, block_device_mapping=None, virtualization_type=None, kernel_id=None, ramdisk_id=None, sriov_net_support=None): if not image_location and not root_device_name: # NOTE(ft): for backward compatibility with a hypothetical code # which uses name as image_location image_location = name if not image_location and not root_device_name: msg = _("Either imageLocation or rootDeviceName must be set.") raise exception.InvalidParameterCombination(msg) if not image_location and not name: msg = _('The request must contain the parameter name') raise exception.MissingParameter(msg) # TODO(ft): check parameters properties = {} metadata = {'properties': properties} if name: # TODO(ft): check the name is unique (at least for EBS image case) metadata['name'] = name if image_location: properties['image_location'] = image_location if 'name' not in metadata: # NOTE(ft): it's needed for backward compatibility metadata['name'] = image_location if root_device_name: properties['root_device_name'] = root_device_name cinder = clients.cinder(context) if block_device_mapping: mappings = instance_api._parse_block_device_mapping( context, block_device_mapping) # TODO(ft): merge with image manifets's virtual device mappings short_root_device_name = ( ec2utils.block_device_strip_dev(root_device_name)) for bdm in mappings: instance_api._populate_parsed_bdm_parameter( bdm, short_root_device_name) if 'volume_size' in bdm: continue try: if bdm['source_type'] == 'snapshot': snapshot = cinder.volume_snapshots.get(bdm['snapshot_id']) bdm['volume_size'] = snapshot.size elif bdm['source_type'] == 'volume': volume = cinder.volumes.get(bdm['volume_id']) bdm['volume_size'] = volume.size except cinder_exception.NotFound: pass properties['bdm_v2'] = True properties['block_device_mapping'] = json.dumps(mappings) if architecture is not None: properties['architecture'] = architecture if kernel_id: properties['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id if ramdisk_id: properties['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id with common.OnCrashCleaner() as cleaner: if 'image_location' in properties: os_image = _s3_create(context, metadata) else: metadata.update({'size': 0, 'is_public': False}) # TODO(ft): set default values of image properties glance = clients.glance(context) os_image = glance.images.create(**metadata) cleaner.addCleanup(os_image.delete) kind = _get_os_image_kind(os_image) image = db_api.add_item(context, kind, { 'os_id': os_image.id, 'is_public': False, 'description': description }) return {'imageId': image['id']}
def test_block_device_strip_dev(self): self.assertEqual(ec2utils.block_device_strip_dev('/dev/sda'), 'sda') self.assertEqual(ec2utils.block_device_strip_dev('sda'), 'sda')
def _format_image(context, image, os_image, images_dict, ids_dict, snapshot_ids=None): ec2_image = {'imageId': image['id'], 'imageOwnerId': os_image.owner, 'imageType': IMAGE_TYPES[ ec2utils.get_ec2_id_kind(image['id'])], 'isPublic': os_image.is_public, 'architecture': os_image.properties.get('architecture'), 'creationDate': os_image.created_at } if 'description' in image: ec2_image['description'] = image['description'] if 'state' in image: state = image['state'] else: state = GLANCE_STATUS_TO_EC2.get(os_image.status, 'error') if state in ('available', 'pending'): state = _s3_image_state_map.get(os_image.properties.get('image_state'), state) ec2_image['imageState'] = state kernel_id = os_image.properties.get('kernel_id') if kernel_id: ec2_image['kernelId'] = ec2utils.os_id_to_ec2_id( context, 'aki', kernel_id, items_by_os_id=images_dict, ids_by_os_id=ids_dict) ramdisk_id = os_image.properties.get('ramdisk_id') if ramdisk_id: ec2_image['ramdiskId'] = ec2utils.os_id_to_ec2_id( context, 'ari', ramdisk_id, items_by_os_id=images_dict, ids_by_os_id=ids_dict) name = os_image.name img_loc = os_image.properties.get('image_location') if img_loc: ec2_image['imageLocation'] = img_loc else: ec2_image['imageLocation'] = "%s (%s)" % (img_loc, name) if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name ec2_image['name'] = img_loc else: ec2_image['name'] = name properties = ec2utils.deserialize_os_image_properties(os_image) root_device_name = ( ec2utils.block_device_properties_root_device_name(properties)) mappings = _format_mappings(context, properties, root_device_name, snapshot_ids, os_image.owner) if mappings: ec2_image['blockDeviceMapping'] = mappings root_device_type = 'instance-store' if root_device_name: ec2_image['rootDeviceName'] = root_device_name short_root_device_name = ec2utils.block_device_strip_dev( root_device_name) if any((short_root_device_name == ec2utils.block_device_strip_dev(bdm.get('deviceName'))) for bdm in mappings): root_device_type = 'ebs' ec2_image['rootDeviceType'] = root_device_type return ec2_image
def register_image(context, name=None, image_location=None, description=None, architecture=None, root_device_name=None, block_device_mapping=None, virtualization_type=None, kernel_id=None, ramdisk_id=None, sriov_net_support=None): if not image_location and not root_device_name: # NOTE(ft): for backward compatibility with a hypothetical code # which uses name as image_location image_location = name if not image_location and not root_device_name: msg = _("Either imageLocation or rootDeviceName must be set.") raise exception.InvalidParameterCombination(msg) if not image_location and not name: msg = _('The request must contain the parameter name') raise exception.MissingParameter(msg) # TODO(ft): check parameters properties = {} metadata = {'properties': properties} if name: # TODO(ft): check the name is unique (at least for EBS image case) metadata['name'] = name if image_location: properties['image_location'] = image_location if 'name' not in metadata: # NOTE(ft): it's needed for backward compatibility metadata['name'] = image_location if root_device_name: properties['root_device_name'] = root_device_name cinder = clients.cinder(context) if block_device_mapping: mappings = instance_api._parse_block_device_mapping( context, block_device_mapping) # TODO(ft): merge with image manifets's virtual device mappings short_root_device_name = ( ec2utils.block_device_strip_dev(root_device_name)) for bdm in mappings: instance_api._populate_parsed_bdm_parameter( bdm, short_root_device_name) if 'volume_size' in bdm: continue try: if bdm['source_type'] == 'snapshot': snapshot = cinder.volume_snapshots.get(bdm['snapshot_id']) bdm['volume_size'] = snapshot.size elif bdm['source_type'] == 'volume': volume = cinder.volumes.get(bdm['volume_id']) bdm['volume_size'] = volume.size except cinder_exception.NotFound: pass properties['bdm_v2'] = True properties['block_device_mapping'] = json.dumps(mappings) if architecture is not None: properties['architecture'] = architecture if kernel_id: properties['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id if ramdisk_id: properties['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id with common.OnCrashCleaner() as cleaner: if 'image_location' in properties: os_image = _s3_create(context, metadata) else: metadata.update({'size': 0, 'is_public': False}) # TODO(ft): set default values of image properties glance = clients.glance(context) os_image = glance.images.create(**metadata) cleaner.addCleanup(os_image.delete) kind = _get_os_image_kind(os_image) image = db_api.add_item(context, kind, {'os_id': os_image.id, 'is_public': False, 'description': description}) return {'imageId': image['id']}
def register_image(context, name=None, image_location=None, description=None, architecture=None, root_device_name=None, block_device_mapping=None, virtualization_type=None, kernel_id=None, ramdisk_id=None, sriov_net_support=None): # Setup default flags is_s3_import = False is_url_import = False # Process the input arguments if not image_location and not root_device_name: # NOTE(ft): for backward compatibility with a hypothetical code # which uses name as image_location image_location = name if not image_location and not root_device_name: msg = _("Either imageLocation or rootDeviceName must be set.") raise exception.InvalidParameterCombination(msg) if not image_location and not name: msg = _('The request must contain the parameter name') raise exception.MissingParameter(msg) # TODO(ft): check parameters metadata = {} if name: # TODO(ft): check the name is unique (at least for EBS image case) metadata['name'] = name if image_location: # Resolve the import type metadata['image_location'] = image_location parsed_url = six.moves.urllib.parse.urlparse(image_location) is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3') is_url_import = not is_s3_import # Check if the name is in the metadata if 'name' not in metadata: # NOTE(ft): it's needed for backward compatibility metadata['name'] = image_location if root_device_name: metadata['root_device_name'] = root_device_name cinder = clients.cinder(context) if block_device_mapping: mappings = instance_api._parse_block_device_mapping( context, block_device_mapping) # TODO(ft): merge with image manifets's virtual device mappings short_root_device_name = ( ec2utils.block_device_strip_dev(root_device_name)) for bdm in mappings: instance_api._populate_parsed_bdm_parameter( bdm, short_root_device_name) if 'volume_size' in bdm: continue try: if bdm['source_type'] == 'snapshot': snapshot = cinder.volume_snapshots.get(bdm['snapshot_id']) bdm['volume_size'] = snapshot.size elif bdm['source_type'] == 'volume': volume = cinder.volumes.get(bdm['volume_id']) bdm['volume_size'] = volume.size except cinder_exception.NotFound: pass metadata['bdm_v2'] = 'True' metadata['block_device_mapping'] = json.dumps(mappings) if architecture is not None: metadata['architecture'] = architecture if kernel_id: metadata['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id if ramdisk_id: metadata['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id # Begin the import/registration process with common.OnCrashCleaner() as cleaner: # Setup the glance client glance = clients.glance(context) # Check if this is an S3 import if is_s3_import: os_image = _s3_create(context, metadata) # Condition for all non-S3 imports else: # Create the image in glance metadata.update({'visibility': 'private', 'container_format': 'bare', 'disk_format': 'raw'}) os_image = glance.images.create(**metadata) # Kick-off the URL image import if from URL if is_url_import: glance.images.image_import(os_image.id, method='web-download', uri=metadata['image_location']) # Otherwise, use the default method else: glance.images.upload(os_image.id, '', image_size=0) # Add cleanups and complete the registration process cleaner.addCleanup(glance.images.delete, os_image.id) kind = _get_os_image_kind(os_image) image = db_api.add_item(context, kind, {'os_id': os_image.id, 'is_public': False, 'description': description}) # Return the image ID for the registration process return {'imageId': image['id']}