def test_console_path(self, is_snap_lxd): ctx = context.get_admin_context() instance = fake_instance.fake_instance_obj( ctx, name='test', memory_mb=0) is_snap_lxd.return_value = False attributes = common.InstanceAttributes(instance) self.assertEqual( '/var/log/lxd/instance-00000001/console.log', attributes.console_path) is_snap_lxd.return_value = True attributes = common.InstanceAttributes(instance) self.assertEqual( '/var/snap/lxd/common/lxd/logs/instance-00000001/console.log', attributes.console_path)
def test_cleanup(self, execute, rmtree, getpwuid, _): mock_profile = mock.Mock() self.client.profiles.get.return_value = mock_profile pwuid = mock.Mock() pwuid.pw_name = 'user' getpwuid.return_value = pwuid ctx = context.get_admin_context() instance = fake_instance.fake_instance_obj( ctx, name='test', memory_mb=0) network_info = [_VIF] instance_dir = common.InstanceAttributes(instance).instance_dir block_device_info = mock.Mock() lxd_driver = driver.LXDDriver(None) lxd_driver.init_host(None) lxd_driver.firewall_driver = mock.Mock() lxd_driver.cleanup(ctx, instance, network_info, block_device_info) self.vif_driver.unplug.assert_called_once_with( instance, network_info[0]) lxd_driver.firewall_driver.unfilter_instance.assert_called_once_with( instance, network_info) execute.assert_called_once_with( 'chown', '-R', 'user:user', instance_dir, run_as_root=True) rmtree.assert_called_once_with(instance_dir) mock_profile.delete.assert_called_once_with()
def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): # Ensure that the instance directory exists instance_dir = common.InstanceAttributes(instance).instance_dir if not os.path.exists(instance_dir): fileutils.ensure_tree(instance_dir) # Step 1 - Setup the profile on the dest host flavor.to_profile(self.client, instance, network_info, block_device_info) # Step 2 - Open a websocket on the srct and and # generate the container config self._migrate(migration['source_compute'], instance) # Step 3 - Start the network and container self.plug_vifs(instance, network_info) self.client.container.get(instance.name).start(wait=True)
def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None, destroy_vifs=True): """Clean up the filesystem around the container. See `nova.virt.driver.ComputeDriver.cleanup` for more information. """ if destroy_vifs: self.unplug_vifs(instance, network_info) self.firewall_driver.unfilter_instance(instance, network_info) lxd_config = self.client.host_info storage.detach_ephemeral(block_device_info, lxd_config, instance) name = pwd.getpwuid(os.getuid()).pw_name container_dir = common.InstanceAttributes(instance).instance_dir if os.path.exists(container_dir): utils.execute( 'chown', '-R', '{}:{}'.format(name, name), container_dir, run_as_root=True) shutil.rmtree(container_dir) try: self.client.profiles.get(instance.name).delete() except lxd_exceptions.LXDAPIException as e: if e.response.status_code == 404: LOG.warning('Failed to delete instance. ' 'Profile does not exist for %(instance)s.', {'instance': instance.name}) else: raise
def _base_config(instance, _): instance_attributes = common.InstanceAttributes(instance) return { 'environment.product_name': 'OpenStack Nova', 'raw.lxc': 'lxc.console.logfile={}\n'.format( instance_attributes.console_path), }
def test_instance_dir(self): ctx = context.get_admin_context() instance = fake_instance.fake_instance_obj(ctx, name='test', memory_mb=0) attributes = common.InstanceAttributes(instance) self.assertEqual('/i/instance-00000001', attributes.instance_dir)
def test_console_path(self): ctx = context.get_admin_context() instance = fake_instance.fake_instance_obj(ctx, name='test', memory_mb=0) attributes = common.InstanceAttributes(instance) self.assertEqual('/var/log/lxd/instance-00000001/console.log', attributes.console_path)
def test_instance_dir(self, is_snap_lxd): ctx = context.get_admin_context() instance = fake_instance.fake_instance_obj( ctx, name='test', memory_mb=0) is_snap_lxd.return_value = False attributes = common.InstanceAttributes(instance) self.assertEqual( '/i/instance-00000001', attributes.instance_dir)
def _ephemeral_storage(instance, _, __, block_info): instance_attributes = common.InstanceAttributes(instance) ephemeral_storage = driver.block_device_info_get_ephemerals(block_info) if ephemeral_storage: devices = {} for ephemeral in ephemeral_storage: ephemeral_src = os.path.join( instance_attributes.storage_path, ephemeral['virtual_name']) devices[ephemeral['virtual_name']] = { 'path': '/mnt', 'source': ephemeral_src, 'type': 'disk', } return devices
def get_console_output(self, context, instance): """Get the output of the container console. See `nova.virt.driver.ComputeDriver.get_console_output` for more information. """ instance_attrs = common.InstanceAttributes(instance) console_path = instance_attrs.console_path if not os.path.exists(console_path): return '' uid = pwd.getpwuid(os.getuid()).pw_uid utils.execute( 'chown', '%s:%s' % (uid, uid), console_path, run_as_root=True) utils.execute( 'chmod', '755', instance_attrs.container_path, run_as_root=True) with open(console_path, 'rb') as f: log_data, _ = _last_bytes(f, MAX_CONSOLE_BYTES) return log_data
def _ephemeral_storage(instance, client, __, block_info): instance_attributes = common.InstanceAttributes(instance) ephemeral_storage = driver.block_device_info_get_ephemerals(block_info) if ephemeral_storage: devices = {} for ephemeral in ephemeral_storage: ephemeral_src = os.path.join(instance_attributes.storage_path, ephemeral['virtual_name']) device = { 'path': '/mnt', 'source': ephemeral_src, 'type': 'disk', } if CONF.lxd.pool: extensions = client.host_info.get('api_extensions', []) if 'storage' in extensions: device['pool'] = CONF.lxd.pool else: msg = _("Host does not have storage pool support") raise exception.NovaException(msg) devices[ephemeral['virtual_name']] = device return devices
def attach_ephemeral(client, block_device_info, lxd_config, instance): """Attach ephemeral storage to an instance.""" ephemeral_storage = driver.block_device_info_get_ephemerals( block_device_info) if ephemeral_storage: storage_driver = lxd_config['environment']['storage'] container = client.containers.get(instance.name) container_id_map = container.config['volatile.last_state.idmap'].split( ',') storage_id = container_id_map[2].split(':')[1] instance_attrs = common.InstanceAttributes(instance) for ephemeral in ephemeral_storage: storage_dir = os.path.join(instance_attrs.storage_path, ephemeral['virtual_name']) if storage_driver == 'zfs': # NOTE(ajkavanagh) - BUG/1782329 - this is temporary until # storage pools is implemented. LXD 3 removed the # storage.zfs_pool_name key from the config. So, if it fails, # we need to grab the configured storage pool and use that as # the name instead. try: zfs_pool = lxd_config['config']['storage.zfs_pool_name'] except KeyError: zfs_pool = CONF.lxd.pool utils.execute('zfs', 'create', '-o', 'mountpoint=%s' % storage_dir, '-o', 'quota=%sG' % instance.ephemeral_gb, '%s/%s-ephemeral' % (zfs_pool, instance.name), run_as_root=True) elif storage_driver == 'btrfs': # We re-use the same btrfs subvolumes that LXD uses, # so the ephemeral storage path is updated in the profile # before the container starts. storage_dir = os.path.join(instance_attrs.container_path, ephemeral['virtual_name']) profile = client.profiles.get(instance.name) storage_name = ephemeral['virtual_name'] profile.devices[storage_name]['source'] = storage_dir profile.save() utils.execute('btrfs', 'subvolume', 'create', storage_dir, run_as_root=True) utils.execute('btrfs', 'qgroup', 'limit', '%sg' % instance.ephemeral_gb, storage_dir, run_as_root=True) elif storage_driver == 'lvm': fileutils.ensure_tree(storage_dir) lvm_pool = lxd_config['config']['storage.lvm_vg_name'] lvm_volume = '%s-%s' % (instance.name, ephemeral['virtual_name']) lvm_path = '/dev/%s/%s' % (lvm_pool, lvm_volume) cmd = ('lvcreate', '-L', '%sG' % instance.ephemeral_gb, '-n', lvm_volume, lvm_pool) utils.execute(*cmd, run_as_root=True, attempts=3) utils.execute('mkfs', '-t', 'ext4', lvm_path, run_as_root=True) cmd = ('mount', '-t', 'ext4', lvm_path, storage_dir) utils.execute(*cmd, run_as_root=True) else: reason = _("Unsupport LXD storage detected. Supported" " storage drivers are zfs and btrfs.") raise exception.NovaException(reason) utils.execute('chown', storage_id, storage_dir, run_as_root=True)
def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create a new lxd container as a nova instance. Creating a new container requires a number of steps. First, the image is fetched from glance, if needed. Next, the network is connected. A profile is created in LXD, and then the container is created and started. See `nova.virt.driver.ComputeDriver.spawn` for more information. """ try: self.client.containers.get(instance.name) raise exception.InstanceExists(name=instance.name) except lxd_exceptions.LXDAPIException as e: if e.response.status_code != 404: raise # Re-raise the exception if it wasn't NotFound instance_dir = common.InstanceAttributes(instance).instance_dir if not os.path.exists(instance_dir): fileutils.ensure_tree(instance_dir) # Check to see if LXD already has a copy of the image. If not, # fetch it. try: self.client.images.get_by_alias(instance.image_ref) except lxd_exceptions.LXDAPIException as e: if e.response.status_code != 404: raise _sync_glance_image_to_lxd( self.client, context, instance.image_ref) # Plug in the network if network_info: timeout = CONF.vif_plugging_timeout if (utils.is_neutron() and timeout): events = [('network-vif-plugged', vif['id']) for vif in network_info if not vif.get( 'active', True)] else: events = [] try: with self.virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=_neutron_failed_callback): self.plug_vifs(instance, network_info) except eventlet.timeout.Timeout: LOG.warn('Timeout waiting for vif plugging callback for ' 'instance %(uuid)s', {'uuid': instance['name']}) if CONF.vif_plugging_is_fatal: self.destroy( context, instance, network_info, block_device_info) raise exception.InstanceDeployFailure( 'Timeout waiting for vif plugging', instance_id=instance['name']) # Create the profile try: profile = flavor.to_profile( self.client, instance, network_info, block_device_info) except lxd_exceptions.LXDAPIException as e: with excutils.save_and_reraise_exception(): self.cleanup( context, instance, network_info, block_device_info) # Create the container container_config = { 'name': instance.name, 'profiles': [profile.name], 'source': { 'type': 'image', 'alias': instance.image_ref, }, } try: container = self.client.containers.create( container_config, wait=True) except lxd_exceptions.LXDAPIException as e: with excutils.save_and_reraise_exception(): self.cleanup( context, instance, network_info, block_device_info) lxd_config = self.client.host_info storage.attach_ephemeral( self.client, block_device_info, lxd_config, instance) if configdrive.required_by(instance): configdrive_path = self._add_configdrive( context, instance, injected_files, admin_password, network_info) profile = self.client.profiles.get(instance.name) config_drive = { 'configdrive': { 'path': '/config-drive', 'source': configdrive_path, 'type': 'disk', 'readonly': 'True', } } profile.devices.update(config_drive) profile.save() try: self.firewall_driver.setup_basic_filtering( instance, network_info) self.firewall_driver.instance_filter( instance, network_info) container.start(wait=True) self.firewall_driver.apply_instance_filter( instance, network_info) except lxd_exceptions.LXDAPIException as e: with excutils.save_and_reraise_exception(): self.cleanup( context, instance, network_info, block_device_info)
def _add_configdrive(self, context, instance, injected_files, admin_password, network_info): """Create configdrive for the instance.""" if CONF.config_drive_format != 'iso9660': raise exception.ConfigDriveUnsupportedFormat( format=CONF.config_drive_format) container = self.client.containers.get(instance.name) container_id_map = container.config[ 'volatile.last_state.idmap'].split(',') storage_id = container_id_map[2].split(':')[1] extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata( instance, content=injected_files, extra_md=extra_md, network_info=network_info, request_context=context) iso_path = os.path.join( common.InstanceAttributes(instance).instance_dir, 'configdrive.iso') with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: try: cdb.make_drive(iso_path) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error('Creating config drive failed with ' 'error: %s', e, instance=instance) configdrive_dir = os.path.join( nova.conf.CONF.instances_path, instance.name, 'configdrive') if not os.path.exists(configdrive_dir): fileutils.ensure_tree(configdrive_dir) with utils.tempdir() as tmpdir: mounted = False try: _, err = utils.execute('mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(), os.getgid()), iso_path, tmpdir, run_as_root=True) mounted = True # Copy and adjust the files from the ISO so that we # dont have the ISO mounted during the life cycle of the # instance and the directory can be removed once the instance # is terminated for ent in os.listdir(tmpdir): shutil.copytree(os.path.join(tmpdir, ent), os.path.join(configdrive_dir, ent)) utils.execute('chmod', '-R', '775', configdrive_dir, run_as_root=True) utils.execute('chown', '-R', storage_id, configdrive_dir, run_as_root=True) finally: if mounted: utils.execute('umount', tmpdir, run_as_root=True) return configdrive_dir
def _add_configdrive(self, context, instance, injected_files, admin_password, network_info): """Create configdrive for the instance.""" if CONF.config_drive_format != 'iso9660': raise exception.ConfigDriveUnsupportedFormat( format=CONF.config_drive_format) container = self.client.containers.get(instance.name) storage_id = 0 """ Determine UID shift used for container uid mapping Sample JSON config from LXD { "volatile.apply_template": "create", ... "volatile.last_state.idmap": "[ { \"Isuid\":true, \"Isgid\":false, \"Hostid\":100000, \"Nsid\":0, \"Maprange\":65536 }, { \"Isuid\":false, \"Isgid\":true, \"Hostid\":100000, \"Nsid\":0, \"Maprange\":65536 }] ", "volatile.tap5fd6808a-7b.name": "eth0" } """ container_id_map = json.loads( container.config['volatile.last_state.idmap']) uid_map = filter(lambda id_map: id_map.get("Isuid"), container_id_map) if uid_map: storage_id = uid_map[0].get("Hostid", 0) else: # privileged containers does not have uid/gid mapping # LXD API return nothing pass extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata( instance, content=injected_files, extra_md=extra_md, network_info=network_info, request_context=context) iso_path = os.path.join( common.InstanceAttributes(instance).instance_dir, 'configdrive.iso') with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: try: cdb.make_drive(iso_path) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error('Creating config drive failed with ' 'error: %s', e, instance=instance) configdrive_dir = os.path.join( nova.conf.CONF.instances_path, instance.name, 'configdrive') if not os.path.exists(configdrive_dir): fileutils.ensure_tree(configdrive_dir) with utils.tempdir() as tmpdir: mounted = False try: _, err = utils.execute('mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(), os.getgid()), iso_path, tmpdir, run_as_root=True) mounted = True # Copy and adjust the files from the ISO so that we # dont have the ISO mounted during the life cycle of the # instance and the directory can be removed once the instance # is terminated for ent in os.listdir(tmpdir): shutil.copytree(os.path.join(tmpdir, ent), os.path.join(configdrive_dir, ent)) utils.execute('chmod', '-R', '775', configdrive_dir, run_as_root=True) utils.execute('chown', '-R', '%s:%s' % (storage_id, storage_id), configdrive_dir, run_as_root=True) finally: if mounted: utils.execute('umount', tmpdir, run_as_root=True) return configdrive_dir