def attach_volume(self, blockdevice, tag=True):

        context = self._config.context
        if "volume_id" in context.ami:
            volumes = self._connection.get_all_volumes(
                volume_ids=[context.ami.volume_id])
            if not volumes:
                raise VolumeException('Failed to find volume: {0}'.format(
                    context.ami.volume_id))
            self._volume = volumes[0]
            return

        self.allocate_base_volume(tag=tag)
        # must do this as amazon still wants /dev/sd*
        ec2_device_name = blockdevice.replace('xvd', 'sd')
        log.debug('Attaching volume {0} to {1}:{2}({3})'.format(
            self._volume.id, self._instance.id, ec2_device_name, blockdevice))
        self._volume.attach(self._instance.id, ec2_device_name)
        if not self.is_volume_attached(blockdevice):
            log.debug('{0} attachment to {1}:{2}({3}) timed out'.format(
                self._volume.id, self._instance.id, ec2_device_name,
                blockdevice))
            self._volume.add_tag('status', 'used')
            # trigger a retry
            raise VolumeException(
                'Timed out waiting for {0} to attach to {1}:{2}'.format(
                    self._volume.id, self._instance.id, blockdevice))
        log.debug('Volume {0} attached to {1}:{2}'.format(
            self._volume.id, self._instance.id, blockdevice))
Esempio n. 2
0
 def _volume_attached(self, blockdevice):
     status = self._volume.update()
     if status != 'in-use':
         raise VolumeException('Volume {0} not yet attached to {1}:{2}'.format(self._volume.id, self._instance.id, blockdevice))
     elif not os_node_exists(blockdevice):
         raise VolumeException('{0} does not exist yet.'.format(blockdevice))
     else:
         return True
Esempio n. 3
0
 def _volume_detached(self, blockdevice):
     status = self._volume.update()
     if status != 'available':
         raise VolumeException('Volume {0} not yet detached from {1}'.format(self._volume.id, self._instance.id))
     elif os_node_exists(blockdevice):
         raise VolumeException('Device node {0} still exists'.format(blockdevice))
     else:
         return True
Esempio n. 4
0
 def _unmount(self):
     if mounted(self._mountpoint):
         if busy_mount(self._mountpoint).success:
             raise VolumeException('Unable to unmount {0} from {1}'.format(self._dev, self._mountpoint))
         result = unmount(self._mountpoint)
         if not result.success:
             raise VolumeException('Unable to unmount {0} from {1}: {2}'.format(self._dev, self._mountpoint,
                                                                                result.result.std_err))
Esempio n. 5
0
 def _resize(self):
     log.info('Checking and repairing root volume as necessary')
     fsck_op = fsck(self.context.volume.dev)
     if not fsck_op.success:
         raise VolumeException('fsck of {} failed: {}'.format(
             self.context.volume.dev, fsck_op.result.std_err))
     log.info('Attempting to resize root fs to fill volume')
     resize_op = resize2fs(self.context.volume.dev)
     if not resize_op.success:
         raise VolumeException('resize of {} failed: {}'.format(
             self.context.volume.dev, resize_op.result.std_err))
Esempio n. 6
0
    def __enter__(self):
        context = self._config.context
        volume_size = context.ami.get('root_volume_size', None)
        if volume_size is None:
            volume_size = self._cloud.plugin_config.get(
                'root_volume_size', None)
        if volume_size is not None:
            volume_size = int(volume_size)
            if volume_size > int(self.plugin_config.max_root_volume_size):
                raise VolumeException(
                    'Requested root volume size {} exceeds 10G maximum for '
                    'S3-backed AMIs'.format(volume_size))

        environ["AMINATOR_STORE_TYPE"] = "s3"
        if context.ami.get("name", None):
            environ["AMINATOR_AMI_NAME"] = context.ami.name
        if context.ami.get("cert", None):
            environ["AMINATOR_CERT"] = context.ami.cert
        if context.ami.get("privatekey", None):
            environ["AMINATOR_PRIVATEKEY"] = context.ami.privatekey
        if context.ami.get("ec2_user", None):
            environ["AMINATOR_EC2_USER"] = context.ami.ec2_user
        if context.ami.get("tmpdir", None):
            environ["AMINATOR_TMPDIR"] = context.ami.tmpdir
        if context.ami.get("bucket", None):
            environ["AMINATOR_BUCKET"] = context.ami.bucket

        return super(TaggingS3FinalizerPlugin, self).__enter__()
Esempio n. 7
0
 def __exit__(self, exc_type, exc_value, trace):
     if exc_type: log.exception("Exception: {0}: {1}".format(exc_type.__name__,exc_value))
     if exc_type and self._config.context.get("preserve_on_error", False):
         return False
     if not self._teardown_chroot():
         raise VolumeException('Error tearing down chroot')
     return False
Esempio n. 8
0
    def _mount(self):
        if self._config.volume_dir.startswith(('~', '/')):
            self._volume_root = os.path.expanduser(self._config.volume_dir)
        else:
            self._volume_root = os.path.join(self._config.aminator_root,
                                             self._config.volume_dir)
        self._mountpoint = os.path.join(self._volume_root,
                                        os.path.basename(self._dev))
        if not os.path.exists(self._mountpoint):
            os.makedirs(self._mountpoint)

        if not mounted(self._mountpoint):
            # Handle optional partition
            dev = self._dev
            if self._blockdevice.partition is not None:
                dev = '{0}{1}'.format(dev, self._blockdevice.partition)

            mountspec = MountSpec(dev, None, self._mountpoint, None)

            result = mount(mountspec)
            if not result.success:
                msg = 'Unable to mount {0.dev} at {0.mountpoint}: {1}'.format(
                    mountspec, result.result.std_err)
                log.critical(msg)
                raise VolumeException(msg)
        log.debug(
            'Mounted {0.dev} at {0.mountpoint} successfully'.format(mountspec))
    def allocate_base_volume(self, tag=True):
        cloud_config = self._config.plugins[self.full_name]
        context = self._config.context

        self._volume = Volume(connection=self._connection)

        rootdev = context.base_ami.block_device_mapping[
            context.base_ami.root_device_name]
        volume_type = context.cloud.get(
            'provisioner_ebs_type',
            cloud_config.get('provisioner_ebs_type', 'standard'))
        volume_size = context.ami.get('root_volume_size', None)
        if volume_size is None:
            volume_size = cloud_config.get('root_volume_size', None)
            if volume_size is None:
                volume_size = rootdev.size
        volume_size = int(volume_size)
        if volume_size < 1:
            raise VolumeException(
                'root_volume_size must be a positive integer, received {}'.
                format(volume_size))
        if volume_size < rootdev.size:
            raise VolumeException(
                'root_volume_size ({}) must be at least as large as the root '
                'volume of the base AMI ({})'.format(volume_size,
                                                     rootdev.size))
        self._volume.id = self._connection.create_volume(
            size=volume_size,
            zone=self._instance.placement,
            volume_type=volume_type,
            snapshot=rootdev.snapshot_id).id
        if not self._volume_available():
            log.critical('{0}: unavailable.')
            return False

        if tag:
            tags = {
                'purpose': cloud_config.get('tag_ami_purpose', 'amination'),
                'status': 'busy',
                'ami': context.base_ami.id,
                'ami-name': context.base_ami.name,
                'arch': context.base_ami.architecture,
            }
            self._connection.create_tags([self._volume.id], tags)
        self._volume.update()
        log.debug('Volume {0} created'.format(self._volume.id))
 def _wait_for_state(self, resource, state):
     if self._state_check(resource, state):
         log.debug('{0} reached state {1}'.format(
             resource.__class__.__name__, state))
         return True
     else:
         raise VolumeException(
             'Timed out waiting for {0} to get to {1}({2})'.format(
                 resource.id, state, resource.status))
Esempio n. 11
0
 def __exit__(self, exc_type, exc_value, trace):
     if exc_type:
         log.debug('Exception encountered in Linux distro plugin context manager',
                   exc_info=(exc_type, exc_value, trace))
     if exc_type and self._config.context.get("preserve_on_error", False):
         return False
     if not self._teardown_chroot():
         raise VolumeException('Error tearing down chroot')
     return False
Esempio n. 12
0
    def detach_volume(self, blockdevice):
        context = self._config.context
        if "volume_id" in context.ami:
            return

        log.debug('Detaching volume {0} from {1}'.format(self._volume.id, self._instance.id))
        self._volume.detach()
        if not self._volume_detached(blockdevice):
            raise VolumeException('Time out waiting for {0} to detach from {1}'.format(self._volume.id, self._instance.id))
        log.debug('Successfully detached volume {0} from {1}'.format(self._volume.id, self._instance.id))
Esempio n. 13
0
 def _resize(self):
     log.info('Checking and repairing root volume as necessary')
     fsck_op = fsck(self.context.volume.dev)
     if not fsck_op.success:
         raise VolumeException('fsck of {} failed: {}'.format(
             self.context.volume.dev, fsck_op.result.std_err))
     log.info('Attempting to resize root fs to fill volume')
     if self._blockdevice.partition is not None:
         log.info('Growing partition if necessary')
         growpart_op = growpart(self._dev, self._blockdevice.partition)
         if not growpart_op.success:
             volmsg = 'growpart of {} partition {} failed: {}'
             raise VolumeException(
                 volmsg.format(self._dev, self._blockdevice.partition,
                               growpart_op.result.std_err))
     resize_op = resize2fs(self.context.volume.dev)
     if not resize_op.success:
         raise VolumeException('resize of {} failed: {}'.format(
             self.context.volume.dev, resize_op.result.std_err))
 def _unmount(self, mountspec):
     recursive_unmount = self.plugin_config.get('recursive_unmount', False)
     if mounted(mountspec):
         result = unmount(mountspec, recursive=recursive_unmount)
         if not result.success:
             err = 'Failed to unmount {0}: {1}'
             err = err.format(mountspec.mountpoint, result.result.std_err)
             open_files = busy_mount(mountspec.mountpoint)
             if open_files.success:
                 err = '{0}. Device has open files:\n{1}'.format(
                     err, open_files.result.std_out)
             raise VolumeException(err)
     log.debug('Unmounted {0.mountpoint}'.format(mountspec))
    def __enter__(self):
        if self._config.volume_dir.startswith(('~', '/')):
            root_base = os.path.expanduser(self._config.volume_dir)
        else:
            root_base = os.path.join(self._config.aminator_root,
                                     self._config.volume_dir)
        root_mountpoint = os.path.join(
            root_base, os.path.basename(self.context.volume.dev))
        self._root_mountspec = MountSpec(self.context.volume.dev, None,
                                         root_mountpoint, None)

        try:
            chroot_setup = self._configure_chroot()
        except Exception as e:
            chroot_setup = False
            log.critical(
                'Error encountered during chroot setup. Attempting to clean up volumes.'
            )
            self._teardown_chroot_mounts()
        if not chroot_setup:
            raise VolumeException('Error configuring chroot')
        return self
Esempio n. 16
0
 def __exit__(self, exc_type, exc_value, trace):
     if exc_type and self._config.context.get("preserve_on_error", False):
         return False
     if not self._teardown_chroot():
         raise VolumeException('Error tearing down chroot')
     return False
Esempio n. 17
0
 def __enter__(self):
     if not self._configure_chroot():
         raise VolumeException('Error configuring chroot')
     return self
Esempio n. 18
0
 def __exit__(self, exc_type, exc_value, trace):
     if not self._teardown_chroot():
         raise VolumeException('Error tearing down chroot')
     return False