コード例 #1
0
ファイル: image.py プロジェクト: jpoley/ec2-api
 def get_os_items(self):
     os_images = list(clients.glance(self.context).images.list())
     self.ec2_created_os_images = {
         os_image.properties['ec2_id']: os_image
         for os_image in os_images
         if (os_image.properties.get('ec2_id') and
             self.context.project_id == os_image.owner)}
     return os_images
コード例 #2
0
ファイル: image.py プロジェクト: cybojanek/ec2-api
 def get_os_items(self):
     os_images = list(clients.glance(self.context).images.list())
     self.ec2_created_os_images = {
         os_image.properties['ec2_id']: os_image
         for os_image in os_images
         if (os_image.properties.get('ec2_id') and
             self.context.project_id == os_image.owner)}
     return os_images
コード例 #3
0
def reset_image_attribute(context, image_id, attribute):
    if attribute != 'launchPermission':
        raise exception.InvalidRequest()

    os_image = ec2utils.get_os_image(context, image_id)
    _check_owner(context, os_image)
    glance = clients.glance(context)
    glance.images.update(os_image.id, visibility='private')
    return True
コード例 #4
0
ファイル: image.py プロジェクト: openstack/ec2-api
def reset_image_attribute(context, image_id, attribute):
    if attribute != 'launchPermission':
        raise exception.InvalidRequest()

    os_image = ec2utils.get_os_image(context, image_id)
    _check_owner(context, os_image)
    glance = clients.glance(context)
    glance.images.update(os_image.id, visibility='private')
    return True
コード例 #5
0
ファイル: ec2utils.py プロジェクト: zeus911/ec2-api
def get_os_image(context, ec2_image_id):
    kind = get_ec2_id_kind(ec2_image_id)
    ids = db_api.get_items_ids(context, kind, item_ids=(ec2_image_id,))
    if not ids:
        raise exception.InvalidAMIIDNotFound(id=ec2_image_id)
    _id, os_id = ids[0]
    if not os_id:
        return None
    glance = clients.glance(context)
    try:
        return glance.images.get(os_id)
    except glance_exception.HTTPNotFound:
        raise exception.InvalidAMIIDNotFound(id=ec2_image_id)
コード例 #6
0
def deregister_image(context, image_id):
    os_image = ec2utils.get_os_image(context, image_id)
    if not os_image:
        image = db_api.get_item_by_id(context, image_id)
        if image.get('state') != 'failed':
            # TODO(ft): figure out corresponding AWS error
            raise exception.IncorrectState(
                reason='Image is still being created')
    else:
        _check_owner(context, os_image)

        glance = clients.glance(context)
        try:
            glance.images.delete(os_image.id)
        except glance_exception.HTTPNotFound:
            pass
    db_api.delete_item(context, image_id)
    return True
コード例 #7
0
ファイル: image.py プロジェクト: jpoley/ec2-api
def deregister_image(context, image_id):
    os_image = ec2utils.get_os_image(context, image_id)
    if not os_image:
        image = db_api.get_item_by_id(context, image_id)
        if image.get('state') != 'failed':
            # TODO(ft): figure out corresponding AWS error
            raise exception.IncorrectState(
                reason='Image is still being created')
    else:
        _check_owner(context, os_image)

        glance = clients.glance(context)
        try:
            glance.images.delete(os_image.id)
        except glance_exception.HTTPNotFound:
            pass
    db_api.delete_item(context, image_id)
    return True
コード例 #8
0
def _s3_create(context, metadata):
    """Gets a manifest from s3 and makes an image."""

    # Parse the metadata into bucket and manifest path
    parsed_url = six.moves.urllib.parse.urlparse(metadata['image_location'])
    if parsed_url.hostname is not None:
        # Handle s3://<BUCKET_NAME>/<KEY_PATH> case
        bucket_name = parsed_url.hostname
        manifest_path = parsed_url.path[1:]
    else:
        # Handle <BUCKET_NAME>/<KEY_PATH> case
        bucket_name = parsed_url.path.split('/')[0]
        manifest_path = '/'.join(parsed_url.path.split('/')[1:])

    # Continue with S3 import
    s3_client = _s3_conn(context)
    image_location = '/'.join([bucket_name, manifest_path])
    key = s3_client.get_object(Bucket=bucket_name, Key=manifest_path)
    body = key['Body']
    if isinstance(body, six.string_types):
        manifest = body
    else:
        # TODO(andrey-mp): check big objects
        manifest = body.read()

    (image_metadata, image_parts, encrypted_key,
     encrypted_iv) = _s3_parse_manifest(context, manifest)
    metadata.update(image_metadata)
    metadata.update({'image_state': 'pending', 'visibility': 'private'})

    # TODO(bcwaldon): right now, this removes user-defined ids
    # We need to re-enable this.
    metadata.pop('id', None)

    glance = clients.glance(context)
    image = glance.images.create(**metadata)

    def _update_image_state(image_state):
        glance.images.update(image.id, image_state=image_state)

    def delayed_create():
        """This handles the fetching and decrypting of the part files."""
        context.update_store()
        try:
            image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
            log_vars = {
                'image_location': image_location,
                'image_path': image_path
            }

            _update_image_state('downloading')
            try:
                parts = []
                for part_name in image_parts:
                    part = _s3_download_file(s3_client, bucket_name, part_name,
                                             image_path)
                    parts.append(part)

                # NOTE(vish): this may be suboptimal, should we use cat?
                enc_filename = os.path.join(image_path, 'image.encrypted')
                with open(enc_filename, 'w') as combined:
                    for filename in parts:
                        with open(filename) as part:
                            shutil.copyfileobj(part, combined)

            except Exception:
                LOG.exception(
                    'Failed to download %(image_location)s '
                    'to %(image_path)s', log_vars)
                _update_image_state('failed_download')
                return

            _update_image_state('decrypting')
            try:
                dec_filename = os.path.join(image_path, 'image.tar.gz')
                _s3_decrypt_image(context, enc_filename, encrypted_key,
                                  encrypted_iv, dec_filename)
            except Exception:
                LOG.exception(
                    'Failed to decrypt %(image_location)s '
                    'to %(image_path)s', log_vars)
                _update_image_state('failed_decrypt')
                return

            _update_image_state('untarring')
            try:
                unz_filename = _s3_untarzip_image(image_path, dec_filename)
            except Exception:
                LOG.exception(
                    'Failed to untar %(image_location)s '
                    'to %(image_path)s', log_vars)
                _update_image_state('failed_untar')
                return

            _update_image_state('uploading')
            try:
                with open(unz_filename) as image_file:
                    glance.images.upload(image.id, image_file)
            except Exception:
                LOG.exception(
                    'Failed to upload %(image_location)s '
                    'to %(image_path)s', log_vars)
                _update_image_state('failed_upload')
                return

            _update_image_state('available')

            shutil.rmtree(image_path)
        except glance_exception.HTTPNotFound:
            LOG.info('Image %swas deleted underneath us', image.id)
        except Exception:
            LOG.exception('Failed to complete image %s creation', image.id)

    eventlet.spawn_n(delayed_create)

    return image
コード例 #9
0
def modify_image_attribute(context,
                           image_id,
                           attribute=None,
                           user_group=None,
                           operation_type=None,
                           description=None,
                           launch_permission=None,
                           product_code=None,
                           user_id=None,
                           value=None):
    os_image = ec2utils.get_os_image(context, image_id)
    if not os_image:
        # TODO(ft): figure out corresponding AWS error
        raise exception.IncorrectState(
            reason='Image is still being created or failed')

    attributes = set()

    # NOTE(andrey-mp): launchPermission structure is converted here
    # to plain parameters: attribute, user_group, operation_type, user_id
    if launch_permission is not None:
        attributes.add('launchPermission')
        user_group = list()
        user_id = list()
        if len(launch_permission) == 0:
            msg = _('No operation specified for launchPermission attribute.')
            raise exception.InvalidParameterCombination(msg)
        if len(launch_permission) > 1:
            msg = _('Only one operation can be specified.')
            raise exception.InvalidParameterCombination(msg)
        operation_type, permissions = launch_permission.popitem()
        for index_key in permissions:
            permission = permissions[index_key]
            if 'group' in permission:
                user_group.append(permission['group'])
            if 'user_id' in permission:
                user_id.append(permission['user_id'])
    if attribute == 'launchPermission':
        attributes.add('launchPermission')

    if description is not None:
        attributes.add('description')
        value = description
    if attribute == 'description':
        attributes.add('description')

    # check attributes
    if len(attributes) == 0:
        if product_code is not None:
            attribute = 'productCodes'
        if attribute in [
                'kernel', 'ramdisk', 'productCodes', 'blockDeviceMapping'
        ]:
            raise exception.InvalidParameter(
                _('Parameter %s is invalid. '
                  'The attribute is not supported.') % attribute)
        raise exception.InvalidParameterCombination('No attributes specified.')
    if len(attributes) > 1:
        raise exception.InvalidParameterCombination(
            _('Fields for multiple attribute types specified: %s') %
            str(attributes))

    if 'launchPermission' in attributes:
        if not user_group:
            msg = _('No operation specified for launchPermission attribute.')
            raise exception.InvalidParameterCombination(msg)
        if len(user_group) != 1 and user_group[0] != 'all':
            msg = _('only group "all" is supported')
            raise exception.InvalidParameterValue(parameter='UserGroup',
                                                  value=user_group,
                                                  reason=msg)
        if operation_type not in ['add', 'remove']:
            msg = _('operation_type must be add or remove')
            raise exception.InvalidParameterValue(parameter='OperationType',
                                                  value='operation_type',
                                                  reason=msg)

        _check_owner(context, os_image)
        glance = clients.glance(context)
        visibility = 'public' if operation_type == 'add' else 'private'
        glance.images.update(os_image.id, visibility=visibility)
        return True

    if 'description' in attributes:
        if not value:
            raise exception.MissingParameter(
                'The request must contain the parameter description')

        _check_owner(context, os_image)
        image = ec2utils.get_db_item(context, image_id)
        image['description'] = value
        db_api.update_item(context, image)
        return True
コード例 #10
0
def register_image(context,
                   name=None,
                   image_location=None,
                   description=None,
                   architecture=None,
                   root_device_name=None,
                   block_device_mapping=None,
                   virtualization_type=None,
                   kernel_id=None,
                   ramdisk_id=None,
                   sriov_net_support=None):

    # Setup default flags
    is_s3_import = False
    is_url_import = False

    # Process the input arguments
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    metadata = {}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:

        # Resolve the import type
        metadata['image_location'] = image_location
        parsed_url = six.moves.urllib.parse.urlparse(image_location)
        is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3')
        is_url_import = not is_s3_import

        # Check if the name is in the metadata
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        metadata['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        metadata['bdm_v2'] = 'True'
        metadata['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        metadata['architecture'] = architecture
    if kernel_id:
        metadata['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id
    if ramdisk_id:
        metadata['ramdisk_id'] = ec2utils.get_os_image(context, ramdisk_id).id

    # Begin the import/registration process
    with common.OnCrashCleaner() as cleaner:

        # Setup the glance client
        glance = clients.glance(context)

        # Check if this is an S3 import
        if is_s3_import:
            os_image = _s3_create(context, metadata)

        # Condition for all non-S3 imports
        else:

            # Create the image in glance
            metadata.update({
                'visibility': 'private',
                'container_format': 'bare',
                'disk_format': 'raw'
            })
            os_image = glance.images.create(**metadata)

            # Kick-off the URL image import if from URL
            if is_url_import:
                glance.images.image_import(os_image.id,
                                           method='web-download',
                                           uri=metadata['image_location'])

            # Otherwise, use the default method
            else:
                glance.images.upload(os_image.id, '', image_size=0)

        # Add cleanups and complete the registration process
        cleaner.addCleanup(glance.images.delete, os_image.id)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {
            'os_id': os_image.id,
            'is_public': False,
            'description': description
        })

    # Return the image ID for the registration process
    return {'imageId': image['id']}
コード例 #11
0
ファイル: image.py プロジェクト: jpoley/ec2-api
def register_image(context, name=None, image_location=None,
                   description=None, architecture=None,
                   root_device_name=None, block_device_mapping=None,
                   virtualization_type=None, kernel_id=None,
                   ramdisk_id=None, sriov_net_support=None):
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    properties = {}
    metadata = {'properties': properties}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:
        properties['image_location'] = image_location
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        properties['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        properties['bdm_v2'] = True
        properties['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        properties['architecture'] = architecture
    if kernel_id:
        properties['kernel_id'] = ec2utils.get_os_image(context,
                                                        kernel_id).id
    if ramdisk_id:
        properties['ramdisk_id'] = ec2utils.get_os_image(context,
                                                         ramdisk_id).id

    with common.OnCrashCleaner() as cleaner:
        if 'image_location' in properties:
            os_image = _s3_create(context, metadata)
        else:
            metadata.update({'size': 0,
                             'is_public': False})
            # TODO(ft): set default values of image properties
            glance = clients.glance(context)
            os_image = glance.images.create(**metadata)
        cleaner.addCleanup(os_image.delete)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {'os_id': os_image.id,
                                                'is_public': False,
                                                'description': description})
    return {'imageId': image['id']}
コード例 #12
0
ファイル: image.py プロジェクト: jpoley/ec2-api
def create_image(context, instance_id, name=None, description=None,
                 no_reboot=False, block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    def delayed_create(context, image, name, os_instance):
        try:
            os_instance.stop()

            # wait instance for really stopped
            start_time = time.time()
            while os_instance.status != 'SHUTOFF':
                time.sleep(1)
                os_instance.get()
                # NOTE(yamahata): timeout and error. 1 hour for now for safety.
                #                 Is it too short/long?
                #                 Or is there any better way?
                timeout = 1 * 60 * 60
                if time.time() > start_time + timeout:
                    err = (_("Couldn't stop instance within %d sec") % timeout)
                    raise exception.EC2Exception(message=err)

            # NOTE(ft): create an image with ec2_id metadata to let other code
            # link os and db objects in race conditions
            os_image_id = os_instance.create_image(
                name, metadata={'ec2_id': image['id']})
            image['os_id'] = os_image_id
            db_api.update_item(context, image)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)
            try:
                image['state'] = 'failed'
                db_api.update_item(context, image)
            except Exception:
                LOG.warning(_LW("Couldn't set 'failed' state for db image %s"),
                            image.id, exc_info=True)

        try:
            os_instance.start()
        except Exception:
            LOG.warning(_LW('Failed to start instance %(i_id)s after '
                            'completed creation of image %(image_id)s'),
                        {'i_id': instance['id'],
                         'image_id': image['id']},
                        exc_info=True)

    image = {'is_public': False,
             'description': description}
    if restart_instance:
        # NOTE(ft): image type is hardcoded, because we don't know it now,
        # but cannot change it later. But Nova doesn't specify container format
        # for snapshots of volume backed instances, so that it is 'ami' in fact
        image = db_api.add_item(context, 'ami', image)
        eventlet.spawn_n(delayed_create, context, image, name, os_instance)
    else:
        glance = clients.glance(context)
        with common.OnCrashCleaner() as cleaner:
            os_image_id = os_instance.create_image(name)
            cleaner.addCleanup(glance.images.delete, os_image_id)
            # TODO(andrey-mp): snapshot and volume also must be deleted in case
            # of error
            os_image = glance.images.get(os_image_id)
            image['os_id'] = os_image_id
            image = db_api.add_item(context, _get_os_image_kind(os_image),
                                    image)
    return {'imageId': image['id']}
コード例 #13
0
ファイル: image.py プロジェクト: openstack/ec2-api
def _s3_create(context, metadata):
    """Gets a manifest from s3 and makes an image."""

    # Parse the metadata into bucket and manifest path
    parsed_url = six.moves.urllib.parse.urlparse(metadata['image_location'])
    if parsed_url.hostname is not None:
        # Handle s3://<BUCKET_NAME>/<KEY_PATH> case
        bucket_name = parsed_url.hostname
        manifest_path = parsed_url.path[1:]
    else:
        # Handle <BUCKET_NAME>/<KEY_PATH> case
        bucket_name = parsed_url.path.split('/')[0]
        manifest_path = '/'.join(parsed_url.path.split('/')[1:])

    # Continue with S3 import
    s3_client = _s3_conn(context)
    image_location = '/'.join([bucket_name, manifest_path])
    key = s3_client.get_object(Bucket=bucket_name, Key=manifest_path)
    body = key['Body']
    if isinstance(body, six.string_types):
        manifest = body
    else:
        # TODO(andrey-mp): check big objects
        manifest = body.read()

    (image_metadata, image_parts,
     encrypted_key, encrypted_iv) = _s3_parse_manifest(context, manifest)
    metadata.update(image_metadata)
    metadata.update({'image_state': 'pending',
                     'visibility': 'private'})

    # TODO(bcwaldon): right now, this removes user-defined ids
    # We need to re-enable this.
    metadata.pop('id', None)

    glance = clients.glance(context)
    image = glance.images.create(**metadata)

    def _update_image_state(image_state):
        glance.images.update(image.id, image_state=image_state)

    def delayed_create():
        """This handles the fetching and decrypting of the part files."""
        context.update_store()
        try:
            image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
            log_vars = {'image_location': image_location,
                        'image_path': image_path}

            _update_image_state('downloading')
            try:
                parts = []
                for part_name in image_parts:
                    part = _s3_download_file(s3_client, bucket_name,
                                             part_name, image_path)
                    parts.append(part)

                # NOTE(vish): this may be suboptimal, should we use cat?
                enc_filename = os.path.join(image_path, 'image.encrypted')
                with open(enc_filename, 'w') as combined:
                    for filename in parts:
                        with open(filename) as part:
                            shutil.copyfileobj(part, combined)

            except Exception:
                LOG.exception('Failed to download %(image_location)s '
                              'to %(image_path)s', log_vars)
                _update_image_state('failed_download')
                return

            _update_image_state('decrypting')
            try:
                dec_filename = os.path.join(image_path, 'image.tar.gz')
                _s3_decrypt_image(context, enc_filename, encrypted_key,
                                  encrypted_iv, dec_filename)
            except Exception:
                LOG.exception('Failed to decrypt %(image_location)s '
                              'to %(image_path)s', log_vars)
                _update_image_state('failed_decrypt')
                return

            _update_image_state('untarring')
            try:
                unz_filename = _s3_untarzip_image(image_path, dec_filename)
            except Exception:
                LOG.exception('Failed to untar %(image_location)s '
                              'to %(image_path)s', log_vars)
                _update_image_state('failed_untar')
                return

            _update_image_state('uploading')
            try:
                with open(unz_filename) as image_file:
                    glance.images.upload(image.id, image_file)
            except Exception:
                LOG.exception('Failed to upload %(image_location)s '
                              'to %(image_path)s', log_vars)
                _update_image_state('failed_upload')
                return

            _update_image_state('available')

            shutil.rmtree(image_path)
        except glance_exception.HTTPNotFound:
            LOG.info('Image %swas deleted underneath us', image.id)
        except Exception:
            LOG.exception('Failed to complete image %s creation', image.id)

    eventlet.spawn_n(delayed_create)

    return image
コード例 #14
0
ファイル: image.py プロジェクト: openstack/ec2-api
def modify_image_attribute(context, image_id, attribute=None,
                           user_group=None, operation_type=None,
                           description=None, launch_permission=None,
                           product_code=None, user_id=None, value=None):
    os_image = ec2utils.get_os_image(context, image_id)
    if not os_image:
        # TODO(ft): figure out corresponding AWS error
        raise exception.IncorrectState(
            reason='Image is still being created or failed')

    attributes = set()

    # NOTE(andrey-mp): launchPermission structure is converted here
    # to plain parameters: attribute, user_group, operation_type, user_id
    if launch_permission is not None:
        attributes.add('launchPermission')
        user_group = list()
        user_id = list()
        if len(launch_permission) == 0:
            msg = _('No operation specified for launchPermission attribute.')
            raise exception.InvalidParameterCombination(msg)
        if len(launch_permission) > 1:
            msg = _('Only one operation can be specified.')
            raise exception.InvalidParameterCombination(msg)
        operation_type, permissions = launch_permission.popitem()
        for index_key in permissions:
            permission = permissions[index_key]
            if 'group' in permission:
                user_group.append(permission['group'])
            if 'user_id' in permission:
                user_id.append(permission['user_id'])
    if attribute == 'launchPermission':
        attributes.add('launchPermission')

    if description is not None:
        attributes.add('description')
        value = description
    if attribute == 'description':
        attributes.add('description')

    # check attributes
    if len(attributes) == 0:
        if product_code is not None:
            attribute = 'productCodes'
        if attribute in ['kernel', 'ramdisk', 'productCodes',
                         'blockDeviceMapping']:
            raise exception.InvalidParameter(
                _('Parameter %s is invalid. '
                  'The attribute is not supported.') % attribute)
        raise exception.InvalidParameterCombination('No attributes specified.')
    if len(attributes) > 1:
        raise exception.InvalidParameterCombination(
            _('Fields for multiple attribute types specified: %s')
            % str(attributes))

    if 'launchPermission' in attributes:
        if not user_group:
            msg = _('No operation specified for launchPermission attribute.')
            raise exception.InvalidParameterCombination(msg)
        if len(user_group) != 1 and user_group[0] != 'all':
            msg = _('only group "all" is supported')
            raise exception.InvalidParameterValue(parameter='UserGroup',
                                                  value=user_group,
                                                  reason=msg)
        if operation_type not in ['add', 'remove']:
            msg = _('operation_type must be add or remove')
            raise exception.InvalidParameterValue(parameter='OperationType',
                                                  value='operation_type',
                                                  reason=msg)

        _check_owner(context, os_image)
        glance = clients.glance(context)
        visibility = 'public' if operation_type == 'add' else 'private'
        glance.images.update(os_image.id, visibility=visibility)
        return True

    if 'description' in attributes:
        if not value:
            raise exception.MissingParameter(
                'The request must contain the parameter description')

        _check_owner(context, os_image)
        image = ec2utils.get_db_item(context, image_id)
        image['description'] = value
        db_api.update_item(context, image)
        return True
コード例 #15
0
ファイル: image.py プロジェクト: openstack/ec2-api
def register_image(context, name=None, image_location=None,
                   description=None, architecture=None,
                   root_device_name=None, block_device_mapping=None,
                   virtualization_type=None, kernel_id=None,
                   ramdisk_id=None, sriov_net_support=None):

    # Setup default flags
    is_s3_import = False
    is_url_import = False

    # Process the input arguments
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    metadata = {}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:

        # Resolve the import type
        metadata['image_location'] = image_location
        parsed_url = six.moves.urllib.parse.urlparse(image_location)
        is_s3_import = (parsed_url.scheme == '') or (parsed_url.scheme == 's3')
        is_url_import = not is_s3_import

        # Check if the name is in the metadata
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        metadata['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        metadata['bdm_v2'] = 'True'
        metadata['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        metadata['architecture'] = architecture
    if kernel_id:
        metadata['kernel_id'] = ec2utils.get_os_image(context,
                                                        kernel_id).id
    if ramdisk_id:
        metadata['ramdisk_id'] = ec2utils.get_os_image(context,
                                                         ramdisk_id).id

    # Begin the import/registration process
    with common.OnCrashCleaner() as cleaner:

        # Setup the glance client
        glance = clients.glance(context)

        # Check if this is an S3 import
        if is_s3_import:
            os_image = _s3_create(context, metadata)

        # Condition for all non-S3 imports
        else:

            # Create the image in glance
            metadata.update({'visibility': 'private',
                             'container_format': 'bare',
                             'disk_format': 'raw'})
            os_image = glance.images.create(**metadata)

            # Kick-off the URL image import if from URL
            if is_url_import:
                glance.images.image_import(os_image.id, method='web-download',
                                           uri=metadata['image_location'])

            # Otherwise, use the default method
            else:
                glance.images.upload(os_image.id, '', image_size=0)

        # Add cleanups and complete the registration process
        cleaner.addCleanup(glance.images.delete, os_image.id)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {'os_id': os_image.id,
                                                'is_public': False,
                                                'description': description})

    # Return the image ID for the registration process
    return {'imageId': image['id']}
コード例 #16
0
def create_image(context,
                 instance_id,
                 name=None,
                 description=None,
                 no_reboot=False,
                 block_device_mapping=None):
    instance = ec2utils.get_db_item(context, instance_id)

    if not instance_api._is_ebs_instance(context, instance['os_id']):
        msg = _('Instance does not have a volume attached at root (null).')
        raise exception.InvalidParameterValue(value=instance_id,
                                              parameter='InstanceId',
                                              reason=msg)

    nova = clients.nova(context)
    os_instance = nova.servers.get(instance['os_id'])
    restart_instance = False
    if not no_reboot and os_instance.status != 'SHUTOFF':
        if os_instance.status != 'ACTIVE':
            # TODO(ft): Change the error code and message with the real AWS
            # ones
            msg = _('Instance must be run or stopped')
            raise exception.IncorrectState(reason=msg)

        restart_instance = True

    # meaningful image name
    name_map = dict(instance=instance['os_id'], now=timeutils.isotime())
    name = name or _('image of %(instance)s at %(now)s') % name_map

    def delayed_create(context, image, name, os_instance):
        try:
            os_instance.stop()

            # wait instance for really stopped
            start_time = time.time()
            while os_instance.status != 'SHUTOFF':
                time.sleep(1)
                os_instance.get()
                # NOTE(yamahata): timeout and error. 1 hour for now for safety.
                #                 Is it too short/long?
                #                 Or is there any better way?
                timeout = 1 * 60 * 60
                if time.time() > start_time + timeout:
                    err = (_("Couldn't stop instance within %d sec") % timeout)
                    raise exception.EC2Exception(message=err)

            # NOTE(ft): create an image with ec2_id metadata to let other code
            # link os and db objects in race conditions
            os_image_id = os_instance.create_image(
                name, metadata={'ec2_id': image['id']})
            image['os_id'] = os_image_id
            db_api.update_item(context, image)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)
            try:
                image['state'] = 'failed'
                db_api.update_item(context, image)
            except Exception:
                LOG.warning(_LW("Couldn't set 'failed' state for db image %s"),
                            image.id,
                            exc_info=True)

        try:
            os_instance.start()
        except Exception:
            LOG.warning(_LW('Failed to start instance %(i_id)s after '
                            'completed creation of image %(image_id)s'), {
                                'i_id': instance['id'],
                                'image_id': image['id']
                            },
                        exc_info=True)

    image = {'is_public': False, 'description': description}
    if restart_instance:
        # NOTE(ft): image type is hardcoded, because we don't know it now,
        # but cannot change it later. But Nova doesn't specify container format
        # for snapshots of volume backed instances, so that it is 'ami' in fact
        image = db_api.add_item(context, 'ami', image)
        eventlet.spawn_n(delayed_create, context, image, name, os_instance)
    else:
        glance = clients.glance(context)
        with common.OnCrashCleaner() as cleaner:
            os_image_id = os_instance.create_image(name)
            cleaner.addCleanup(glance.images.delete, os_image_id)
            # TODO(andrey-mp): snapshot and volume also must be deleted in case
            # of error
            os_image = glance.images.get(os_image_id)
            image['os_id'] = os_image_id
            image = db_api.add_item(context, _get_os_image_kind(os_image),
                                    image)
    return {'imageId': image['id']}
コード例 #17
0
def register_image(context,
                   name=None,
                   image_location=None,
                   description=None,
                   architecture=None,
                   root_device_name=None,
                   block_device_mapping=None,
                   virtualization_type=None,
                   kernel_id=None,
                   ramdisk_id=None,
                   sriov_net_support=None):
    if not image_location and not root_device_name:
        # NOTE(ft): for backward compatibility with a hypothetical code
        # which uses name as image_location
        image_location = name
    if not image_location and not root_device_name:
        msg = _("Either imageLocation or rootDeviceName must be set.")
        raise exception.InvalidParameterCombination(msg)
    if not image_location and not name:
        msg = _('The request must contain the parameter name')
        raise exception.MissingParameter(msg)

    # TODO(ft): check parameters
    properties = {}
    metadata = {'properties': properties}
    if name:
        # TODO(ft): check the name is unique (at least for EBS image case)
        metadata['name'] = name
    if image_location:
        properties['image_location'] = image_location
        if 'name' not in metadata:
            # NOTE(ft): it's needed for backward compatibility
            metadata['name'] = image_location
    if root_device_name:
        properties['root_device_name'] = root_device_name
    cinder = clients.cinder(context)
    if block_device_mapping:
        mappings = instance_api._parse_block_device_mapping(
            context, block_device_mapping)
        # TODO(ft): merge with image manifets's virtual device mappings
        short_root_device_name = (
            ec2utils.block_device_strip_dev(root_device_name))
        for bdm in mappings:
            instance_api._populate_parsed_bdm_parameter(
                bdm, short_root_device_name)
            if 'volume_size' in bdm:
                continue
            try:
                if bdm['source_type'] == 'snapshot':
                    snapshot = cinder.volume_snapshots.get(bdm['snapshot_id'])
                    bdm['volume_size'] = snapshot.size
                elif bdm['source_type'] == 'volume':
                    volume = cinder.volumes.get(bdm['volume_id'])
                    bdm['volume_size'] = volume.size
            except cinder_exception.NotFound:
                pass
        properties['bdm_v2'] = True
        properties['block_device_mapping'] = json.dumps(mappings)
    if architecture is not None:
        properties['architecture'] = architecture
    if kernel_id:
        properties['kernel_id'] = ec2utils.get_os_image(context, kernel_id).id
    if ramdisk_id:
        properties['ramdisk_id'] = ec2utils.get_os_image(context,
                                                         ramdisk_id).id

    with common.OnCrashCleaner() as cleaner:
        if 'image_location' in properties:
            os_image = _s3_create(context, metadata)
        else:
            metadata.update({'size': 0, 'is_public': False})
            # TODO(ft): set default values of image properties
            glance = clients.glance(context)
            os_image = glance.images.create(**metadata)
        cleaner.addCleanup(os_image.delete)
        kind = _get_os_image_kind(os_image)
        image = db_api.add_item(context, kind, {
            'os_id': os_image.id,
            'is_public': False,
            'description': description
        })
    return {'imageId': image['id']}
コード例 #18
0
ファイル: image.py プロジェクト: jpoley/ec2-api
def _s3_create(context, metadata):
    """Gets a manifest from s3 and makes an image."""
    image_location = metadata['properties']['image_location'].lstrip('/')
    bucket_name = image_location.split('/')[0]
    manifest_path = image_location[len(bucket_name) + 1:]
    bucket = _s3_conn(context).get_bucket(bucket_name)
    key = bucket.get_key(manifest_path)
    manifest = key.get_contents_as_string()

    (image_metadata, image_parts,
     encrypted_key, encrypted_iv) = _s3_parse_manifest(context, manifest)
    properties = metadata['properties']
    properties.update(image_metadata['properties'])
    properties['image_state'] = 'pending'
    metadata.update(image_metadata)
    metadata.update({'properties': properties,
                     'is_public': False})

    # TODO(bcwaldon): right now, this removes user-defined ids
    # We need to re-enable this.
    metadata.pop('id', None)

    glance = clients.glance(context)
    image = glance.images.create(**metadata)

    def _update_image_state(image_state):
        image.update(properties={'image_state': image_state})

    def delayed_create():
        """This handles the fetching and decrypting of the part files."""
        context.update_store()
        try:
            image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
            log_vars = {'image_location': image_location,
                        'image_path': image_path}

            _update_image_state('downloading')
            try:
                parts = []
                for part_name in image_parts:
                    part = _s3_download_file(bucket, part_name, image_path)
                    parts.append(part)

                # NOTE(vish): this may be suboptimal, should we use cat?
                enc_filename = os.path.join(image_path, 'image.encrypted')
                with open(enc_filename, 'w') as combined:
                    for filename in parts:
                        with open(filename) as part:
                            shutil.copyfileobj(part, combined)

            except Exception:
                LOG.exception(_LE('Failed to download %(image_location)s '
                                  'to %(image_path)s'), log_vars)
                _update_image_state('failed_download')
                return

            _update_image_state('decrypting')
            try:
                dec_filename = os.path.join(image_path, 'image.tar.gz')
                _s3_decrypt_image(context, enc_filename, encrypted_key,
                                  encrypted_iv, dec_filename)
            except Exception:
                LOG.exception(_LE('Failed to decrypt %(image_location)s '
                                  'to %(image_path)s'), log_vars)
                _update_image_state('failed_decrypt')
                return

            _update_image_state('untarring')
            try:
                unz_filename = _s3_untarzip_image(image_path, dec_filename)
            except Exception:
                LOG.exception(_LE('Failed to untar %(image_location)s '
                                  'to %(image_path)s'), log_vars)
                _update_image_state('failed_untar')
                return

            _update_image_state('uploading')
            try:
                with open(unz_filename) as image_file:
                    image.update(data=image_file)
            except Exception:
                LOG.exception(_LE('Failed to upload %(image_location)s '
                                  'to %(image_path)s'), log_vars)
                _update_image_state('failed_upload')
                return

            _update_image_state('available')

            shutil.rmtree(image_path)
        except glance_exception.HTTPNotFound:
            LOG.info(_LI('Image %swas deleted underneath us'), image.id)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)

    eventlet.spawn_n(delayed_create)

    return image
コード例 #19
0
def _s3_create(context, metadata):
    """Gets a manifest from s3 and makes an image."""
    image_location = metadata['properties']['image_location'].lstrip('/')
    bucket_name = image_location.split('/')[0]
    manifest_path = image_location[len(bucket_name) + 1:]
    bucket = _s3_conn(context).get_bucket(bucket_name)
    key = bucket.get_key(manifest_path)
    manifest = key.get_contents_as_string()

    (image_metadata, image_parts, encrypted_key,
     encrypted_iv) = _s3_parse_manifest(context, manifest)
    properties = metadata['properties']
    properties.update(image_metadata['properties'])
    properties['image_state'] = 'pending'
    metadata.update(image_metadata)
    metadata.update({'properties': properties, 'is_public': False})

    # TODO(bcwaldon): right now, this removes user-defined ids
    # We need to re-enable this.
    metadata.pop('id', None)

    glance = clients.glance(context)
    image = glance.images.create(**metadata)

    def _update_image_state(image_state):
        image.update(properties={'image_state': image_state})

    def delayed_create():
        """This handles the fetching and decrypting of the part files."""
        context.update_store()
        try:
            image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)
            log_vars = {
                'image_location': image_location,
                'image_path': image_path
            }

            _update_image_state('downloading')
            try:
                parts = []
                for part_name in image_parts:
                    part = _s3_download_file(bucket, part_name, image_path)
                    parts.append(part)

                # NOTE(vish): this may be suboptimal, should we use cat?
                enc_filename = os.path.join(image_path, 'image.encrypted')
                with open(enc_filename, 'w') as combined:
                    for filename in parts:
                        with open(filename) as part:
                            shutil.copyfileobj(part, combined)

            except Exception:
                LOG.exception(
                    _LE('Failed to download %(image_location)s '
                        'to %(image_path)s'), log_vars)
                _update_image_state('failed_download')
                return

            _update_image_state('decrypting')
            try:
                dec_filename = os.path.join(image_path, 'image.tar.gz')
                _s3_decrypt_image(context, enc_filename, encrypted_key,
                                  encrypted_iv, dec_filename)
            except Exception:
                LOG.exception(
                    _LE('Failed to decrypt %(image_location)s '
                        'to %(image_path)s'), log_vars)
                _update_image_state('failed_decrypt')
                return

            _update_image_state('untarring')
            try:
                unz_filename = _s3_untarzip_image(image_path, dec_filename)
            except Exception:
                LOG.exception(
                    _LE('Failed to untar %(image_location)s '
                        'to %(image_path)s'), log_vars)
                _update_image_state('failed_untar')
                return

            _update_image_state('uploading')
            try:
                with open(unz_filename) as image_file:
                    image.update(data=image_file)
            except Exception:
                LOG.exception(
                    _LE('Failed to upload %(image_location)s '
                        'to %(image_path)s'), log_vars)
                _update_image_state('failed_upload')
                return

            _update_image_state('available')

            shutil.rmtree(image_path)
        except glance_exception.HTTPNotFound:
            LOG.info(_LI('Image %swas deleted underneath us'), image.id)
        except Exception:
            LOG.exception(_LE('Failed to complete image %s creation'),
                          image.id)

    eventlet.spawn_n(delayed_create)

    return image
コード例 #20
0
ファイル: image.py プロジェクト: cybojanek/ec2-api
def import_image(context, architecture=None, client_data=None,
                 client_token=None, description=None, disk_container=None,
                 hypervisor=None, license_type=None, platform=None,
                 role_name=None):
    # Check architecture
    validate_enum(architecture, ('i386', 'x86_64'), 'architecture',
                  allow_empty=True)

    # Ignore client_data...?
    if client_data is not None:
        raise exception.Unsupported(reason='Client data is not supported')

    # Ignore client_token...?
    if client_token is not None:
        raise exception.Unsupported(reason='Client token is not supported')

    # Description retrieved below

    # Check disk_container
    disk = disk_container[0]

    url = disk.get('url')
    disk_format = disk.get('format', 'RAW')

    if disk.get('snapshotid') is not None:
        raise exception.Unsupported(reason='Snapshot IDs not supported')

    if url is None or disk.get('userbucket') is not None:
        raise exception.Unsupported(reason='Buckets not implemented. Need URL')

    # disk_container descrption overrides default descrption
    description = disk.get('description') or description

    # hypervisor set below

    # Ignore license_type
    validate_enum(license_type, ('AWS', 'BYOL'), 'license_type', allow_empty=True)

    # Check platform
    validate_enum(platform, ('Linux', 'Windows'), 'platform', allow_empty=True)

    if role_name is not None:
        raise exception.Unsupported(reason='Buckets not implemented. Need URL')

    # Create EC2 image
    ec2_image = { 'is_public': False }
    if description is not None:
        ec2_image['description'] = description
    ec2_image = db_api.add_item(context, 'ami', ec2_image)

    # Create properties for openstack
    properties = {}
    if architecture is not None:
        properties['architecture'] = architecture
    if hypervisor is not None:
        properties['hypervisor_type'] = hypervisor
    if license_type is not None:
        properties['license_type'] = license_type
    if platform is not None:
        properties['os_type'] = platform
    if description is not None:
        properties['description'] = description

    # Set EC2 id for retrieval on the way back
    properties['ec2_id'] = ec2_image['id']

    # Connect to glance
    glance = clients.glance(context)

    # NOTE: container_format is not currently used, so we can always default
    #       to bare. Read more here:
    #       http://docs.openstack.org/developer/glance/formats.html
    os_image = glance.images.create(
            name=ec2_image['id'], copy_from=url, disk_format=disk_format,
            container_format='bare', properties=properties, is_public=False)

    # Update EC2 id
    ec2_image['os_id'] = os_image.id
    db_api.update_item(context, ec2_image)

    return {'imageId': ec2_image['id']}