Ejemplo n.º 1
0
 def attach(self, context, volmap):
     mountpoint = mount.get_mountpoint(volmap.volume.uuid)
     fileutils.ensure_tree(mountpoint)
     filename = '/'.join([mountpoint, volmap.volume.uuid])
     with open(filename, 'wb') as fd:
         content = utils.decode_file_data(volmap.contents)
         fd.write(content)
Ejemplo n.º 2
0
    def _ensure_share_mounted(self, share=None):
        """Mount SOFS if need be."""
        fileutils.ensure_tree(self.sofs_mount_point)

        if not self._sofs_is_mounted():
            self._execute('mount', '-t', 'sofs', self.sofs_config,
                          self.sofs_mount_point, run_as_root=True)
            # Check whether the mount command succeeded
            if not self._sofs_is_mounted():
                msg = _("Cannot mount Scality SOFS, check syslog for errors")
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

        fileutils.ensure_tree(self.sofs_abs_volume_dir)

        # We symlink the '00' subdir to its parent dir to maintain
        # compatibility with previous version of this driver.
        try:
            os.symlink(".", self._get_mount_point_for_share())
        except OSError as exc:
            if exc.errno == errno.EEXIST:
                if not os.path.islink(self._get_mount_point_for_share()):
                    raise
            else:
                raise
Ejemplo n.º 3
0
def write_stored_info(target, field=None, value=None):
    """Write information about an image."""

    if not field:
        return

    info_file = get_info_filename(target)
    LOG.info(_LI('Writing stored info to %s'), info_file)
    fileutils.ensure_tree(os.path.dirname(info_file))

    lock_name = 'info-%s' % os.path.split(target)[-1]
    lock_path = os.path.join(CONF.instances_path, 'locks')

    @utils.synchronized(lock_name, external=True, lock_path=lock_path)
    def write_file(info_file, field, value):
        d = {}

        if os.path.exists(info_file):
            with open(info_file, 'r') as f:
                d = _read_possible_json(f.read(), info_file)

        d[field] = value
        d['%s-timestamp' % field] = time.time()

        with open(info_file, 'w') as f:
            f.write(jsonutils.dumps(d))

    write_file(info_file, field, value)
Ejemplo n.º 4
0
 def do_setup(self, context):
     """Any initialization the volume driver does while starting."""
     self._check_prerequisites()
     self._mount_sofs()
     voldir = os.path.join(self.configuration.scality_sofs_mount_point,
                           self.configuration.scality_sofs_volume_dir)
     fileutils.ensure_tree(voldir)
Ejemplo n.º 5
0
def _cache_tftp_images(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(CONF.pxe.tftp_root, node.uuid))
    LOG.debug("Fetching kernel and ramdisk for node %s",
              node.uuid)
    deploy_utils.fetch_images(ctx, AgentTFTPImageCache(), pxe_info.values())
Ejemplo n.º 6
0
 def put(self, bucket_name):
     path = os.path.abspath(os.path.join(self.application.directory, bucket_name))
     if not path.startswith(self.application.directory) or os.path.exists(path):
         self.set_status(403)
         return
     fileutils.ensure_tree(path)
     self.finish()
Ejemplo n.º 7
0
 def __init__(self, host=None, conf=None):
     super(DhcpAgent, self).__init__(host=host)
     self.needs_resync_reasons = collections.defaultdict(list)
     self.dhcp_ready_ports = set()
     self.conf = conf or cfg.CONF
     # If 'resync_throttle' is configured more than 'resync_interval' by
     # mistake, raise exception and log with message.
     if self.conf.resync_throttle > self.conf.resync_interval:
         msg = _("DHCP agent must have resync_throttle <= resync_interval")
         LOG.exception(msg)
         raise exceptions.InvalidConfigurationOption(
             opt_name='resync_throttle',
             opt_value=self.conf.resync_throttle)
     self._periodic_resync_event = threading.Event()
     self.cache = NetworkCache()
     self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
     self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, self.conf.host)
     # create dhcp dir to store dhcp info
     dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
     fileutils.ensure_tree(dhcp_dir, mode=0o755)
     self.dhcp_version = self.dhcp_driver_cls.check_version()
     self._populate_networks_cache()
     # keep track of mappings between networks and routers for
     # metadata processing
     self._metadata_routers = {}  # {network_id: router_id}
     self._process_monitor = external_process.ProcessMonitor(
         config=self.conf,
         resource_type='dhcp')
     self._pool_size = DHCP_PROCESS_GREENLET_MIN
     self._pool = eventlet.GreenPool(size=self._pool_size)
     self._queue = queue.ResourceProcessingQueue()
Ejemplo n.º 8
0
    def cache(self, fetch_func, filename, size=None, *args, **kwargs):
        """Creates image from template.

        Ensures that template and image not already exists.
        Ensures that base directory exists.
        Synchronizes on template fetching.

        :fetch_func: Function that creates the base image
                     Should accept `target` argument.
        :filename: Name of the file in the image directory
        :size: Size of created image in bytes (optional)
        """

        @utils.synchronized(filename, external=True, lock_path=self.lock_path)
        def fetch_func_sync(target, *args, **kwargs):
            # The image may have been fetched while a subsequent
            # call was waiting to obtain the lock.
            if not os.path.exists(target):
                fetch_func(target=target, *args, **kwargs)

        base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name)
        if not os.path.exists(base_dir):
            fileutils.ensure_tree(base_dir)
        base = os.path.join(base_dir, filename)

        if not self.exists() or not os.path.exists(base):
            self.create_image(fetch_func_sync, base, size, *args, **kwargs)

        if size:
            if size > self.get_disk_size(base):
                self.resize_image(size)

            if self.preallocate and self._can_fallocate() and os.access(self.path, os.W_OK):
                utils.execute("fallocate", "-n", "-l", size, self.path)
Ejemplo n.º 9
0
 def _start(self):
     super(FileDriver, self)._start()
     for a_dir in self._reserved_dirs:
         try:
             fileutils.ensure_tree(a_dir)
         except OSError as e:
             raise coordination.ToozConnectionError(e)
Ejemplo n.º 10
0
    def __enter__(self):
        try:
            context = q_context.get_admin_context()
            db_storage_driver = cert_utils.DbCertificateStorageDriver(
                context)
            with client_cert.ClientCertificateManager(
                cert_utils.NSX_OPENSTACK_IDENTITY,
                None,
                db_storage_driver) as cert_manager:
                if not cert_manager.exists():
                    msg = _("Unable to load from nsx-db")
                    raise nsx_exc.ClientCertificateException(err_msg=msg)

                filename = self._filename
                if not os.path.exists(os.path.dirname(filename)):
                    if len(os.path.dirname(filename)) > 0:
                        fileutils.ensure_tree(os.path.dirname(filename))
                cert_manager.export_pem(filename)

                expires_in_days = cert_manager.expires_in_days()
                self._check_expiration(expires_in_days)
        except Exception as e:
            self._on_exit()
            raise e

        return self
Ejemplo n.º 11
0
def _sign_csr(csr_text, ca_folder):
    with utils.tempdir() as tmpdir:
        inbound = os.path.join(tmpdir, 'inbound.csr')
        outbound = os.path.join(tmpdir, 'outbound.csr')

        try:
            with open(inbound, 'w') as csrfile:
                csrfile.write(csr_text)
        except IOError:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to write inbound.csr'))

        LOG.debug('Flags path: %s', ca_folder)
        start = os.getcwd()

        # Change working dir to CA
        fileutils.ensure_tree(ca_folder)
        os.chdir(ca_folder)
        utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
                      './openssl.cnf', '-infiles', inbound)
        out, _err = utils.execute('openssl', 'x509', '-in', outbound,
                                  '-serial', '-noout')
        serial = out.rpartition('=')[2].strip()
        os.chdir(start)

        with open(outbound, 'r') as crtfile:
            return (serial, crtfile.read())
    def cache(self, fetch_func, filename, size=None, *args, **kwargs):
        """Creates image from template.

        Ensures that template and image not already exists.
        Ensures that base directory exists.
        Synchronizes on template fetching.

        :fetch_func: Function that creates the base image
                     Should accept `target` argument.
        :filename: Name of the file in the image directory
        :size: Size of created image in bytes (optional)
        """
        print 'whr [img back chache] start %s' % time.time()
        
        time_ref = time.time()
        time_prev = time_ref

        base_dir = os.path.join(CONF.instances_path,
                                CONF.image_cache_subdirectory_name)
        if not os.path.exists(base_dir):
            fileutils.ensure_tree(base_dir)
        base = os.path.join(base_dir, filename)

        @utils.synchronized(filename, external=True, lock_path=self.lock_path)
        def fetch_func_sync(target, *args, **kwargs):
            # NOTE(mdbooth): This method is called as a callback by the
            # create_image() method of a specific backend. It assumes that
            # target will be in the image cache, which is why it holds a
            # lock, and does not overwrite an existing file. However,
            # this is not true for all backends. Specifically Lvm writes
            # directly to the target rather than to the image cache,
            # and additionally it creates the target in advance.
            # This guard is only relevant in the context of the lock if the
            # target is in the image cache. If it isn't, we should
            # call fetch_func. The lock we're holding is also unnecessary in
            # that case, but it will not result in incorrect behaviour.
            if target != base or not os.path.exists(target):
                fetch_func(target=target, *args, **kwargs)
        print 'whr [img back cache ] %s %s' % (filename,base)
        print 'whr [im back cache] end  %0.06f' % (time.time() - time_prev)
        time_prev = time.time()
        if not self.exists() or not os.path.exists(base):
            print 'whr [img back cache] non esiste il path'
            self.create_image(fetch_func_sync, base, size,
                              *args, **kwargs)
        print 'whr [im back cache] end  %0.06f' % (time.time() - time_prev)
        time_prev = time.time()
        if size:
            print 'whr [img back cache] non ho la size'
            # create_image() only creates the base image if needed, so
            # we cannot rely on it to exist here
            if os.path.exists(base) and size > self.get_disk_size(base):
                self.resize_image(size)

            if (self.preallocate and self._can_fallocate() and
                    os.access(self.path, os.W_OK)):
                utils.execute('fallocate', '-n', '-l', size, self.path)
        print 'whr [im back cache] end  %0.06f' % (time.time() - time_prev)
        time_prev = time.time()
Ejemplo n.º 13
0
 def _get_state_file_path(self, loadbalancer_id, kind,
                          ensure_state_dir=True):
     """Returns the file name for a given kind of config file."""
     confs_dir = os.path.abspath(os.path.normpath(self.state_path))
     conf_dir = os.path.join(confs_dir, loadbalancer_id)
     if ensure_state_dir:
         fileutils.ensure_tree(conf_dir, 0o755)
     return os.path.join(conf_dir, kind)
Ejemplo n.º 14
0
def _get_conf_base(cfg_root, uuid, ensure_conf_dir):
    #TODO(mangelajo): separate responsibilities here, ensure_conf_dir
    #                 should be a separate function
    conf_dir = os.path.abspath(os.path.normpath(cfg_root))
    conf_base = os.path.join(conf_dir, uuid)
    if ensure_conf_dir:
        fileutils.ensure_tree(conf_dir, mode=0o755)
    return conf_base
Ejemplo n.º 15
0
 def _setup_allocation_data(self):
     if not os.path.exists(self._alloc_info_file_path):
         fileutils.ensure_tree(os.path.dirname(self._alloc_info_file_path))
         self._allocation_data = {}
         self._update_allocation_data_file()
     else:
         with open(self._alloc_info_file_path, "r") as f:
             self._allocation_data = json.load(f)
Ejemplo n.º 16
0
    def do_setup(self, context):
        """Setup the Windows Volume driver.

        Called one time by the manager after the driver is loaded.
        Validate the flags we care about
        """
        fileutils.ensure_tree(self.configuration.windows_iscsi_lun_path)
        fileutils.ensure_tree(CONF.image_conversion_dir)
Ejemplo n.º 17
0
def _cache_ramdisk_kernel(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(
        os.path.join(pxe_utils.get_root_dir(), node.uuid))
    LOG.debug("Fetching necessary kernel and ramdisk for node %s",
              node.uuid)
    deploy_utils.fetch_images(ctx, TFTPImageCache(), list(pxe_info.values()),
                              CONF.force_raw_images)
Ejemplo n.º 18
0
def setup_test_logging(config_opts, log_dir, log_file_path_template):
    # Have each test log into its own log file
    config_opts.set_override("debug", True)
    fileutils.ensure_tree(log_dir, mode=0o755)
    log_file = sanitize_log_path(os.path.join(log_dir, log_file_path_template))
    config_opts.set_override("log_file", log_file)
    config_opts.set_override("use_stderr", False)
    config.setup_logging()
Ejemplo n.º 19
0
def ensure_ca_filesystem():
    """Ensure the CA filesystem exists."""
    ca_dir = ca_folder()
    if not os.path.exists(ca_path()):
        genrootca_sh_path = os.path.abspath(
                os.path.join(os.path.dirname(__file__), 'CA', 'genrootca.sh'))

        fileutils.ensure_tree(ca_dir)
        utils.execute("sh", genrootca_sh_path, cwd=ca_dir)
Ejemplo n.º 20
0
def _ensure_config_dirs_exist(node_uuid):
    """Ensure that the node's and PXE configuration directories exist.

    :param node_uuid: the UUID of the node.

    """
    root_dir = get_root_dir()
    fileutils.ensure_tree(os.path.join(root_dir, node_uuid))
    fileutils.ensure_tree(os.path.join(root_dir, PXE_CFG_DIR_NAME))
Ejemplo n.º 21
0
 def _create_instance_file(self, id, name, data):
     file_dir = os.path.join(CONF.instances_path, id)
     fileutils.ensure_tree(file_dir)
     file = os.path.join(file_dir, name)
     with open(file, 'a') as f:
         f.write(data)
     os.chmod(file_dir, 0o700)
     os.chmod(file, 0o600)
     return file
Ejemplo n.º 22
0
    def resolve_driver_format(self):
        """Return the driver format for self.path.

        First checks self.disk_info_path for an entry.
        If it's not there, calls self._get_driver_format(), and then
        stores the result in self.disk_info_path

        See https://bugs.launchpad.net/nova/+bug/1221190
        """
        def _dict_from_line(line):
            if not line:
                return {}
            try:
                return jsonutils.loads(line)
            except (TypeError, ValueError) as e:
                msg = (_("Could not load line %(line)s, got error "
                        "%(error)s") %
                        {'line': line, 'error': e})
                raise exception.InvalidDiskInfo(reason=msg)

        @utils.synchronized(self.disk_info_path, external=False,
                            lock_path=self.lock_path)
        def write_to_disk_info_file():
            # Use os.open to create it without group or world write permission.
            fd = os.open(self.disk_info_path, os.O_RDONLY | os.O_CREAT, 0o644)
            with os.fdopen(fd, "r") as disk_info_file:
                line = disk_info_file.read().rstrip()
                dct = _dict_from_line(line)

            if self.path in dct:
                msg = _("Attempted overwrite of an existing value.")
                raise exception.InvalidDiskInfo(reason=msg)
            dct.update({self.path: driver_format})

            tmp_path = self.disk_info_path + ".tmp"
            fd = os.open(tmp_path, os.O_WRONLY | os.O_CREAT, 0o644)
            with os.fdopen(fd, "w") as tmp_file:
                tmp_file.write('%s\n' % jsonutils.dumps(dct))
            os.rename(tmp_path, self.disk_info_path)

        try:
            if (self.disk_info_path is not None and
                        os.path.exists(self.disk_info_path)):
                with open(self.disk_info_path) as disk_info_file:
                    line = disk_info_file.read().rstrip()
                    dct = _dict_from_line(line)
                    for path, driver_format in six.iteritems(dct):
                        if path == self.path:
                            return driver_format
            driver_format = self._get_driver_format()
            if self.disk_info_path is not None:
                fileutils.ensure_tree(os.path.dirname(self.disk_info_path))
                write_to_disk_info_file()
        except OSError as e:
            raise exception.DiskInfoReadWriteFail(reason=six.text_type(e))
        return driver_format
Ejemplo n.º 23
0
 def test_ensure_tree(self):
     tmpdir = tempfile.mkdtemp()
     try:
         testdir = "%s/foo/bar/baz" % (tmpdir,)
         fileutils.ensure_tree(testdir, TEST_PERMISSIONS)
         self.assertTrue(os.path.isdir(testdir))
         self.assertEqual(os.stat(testdir).st_mode, TEST_PERMISSIONS | stat.S_IFDIR)
     finally:
         if os.path.exists(tmpdir):
             shutil.rmtree(tmpdir)
Ejemplo n.º 24
0
 def _add_file(self, basedir, path, data):
     filepath = os.path.join(basedir, path)
     dirname = os.path.dirname(filepath)
     fileutils.ensure_tree(dirname)
     with open(filepath, 'wb') as f:
         # the given data can be either text or bytes. we can only write
         # bytes into files.
         if isinstance(data, six.text_type):
             data = data.encode('utf-8')
         f.write(data)
Ejemplo n.º 25
0
 def _copy_ploop_image(base, target, size):
     # Ploop disk is a directory with data file(root.hds) and
     # DiskDescriptor.xml, so create this dir
     fileutils.ensure_tree(target)
     image_path = os.path.join(target, "root.hds")
     libvirt_utils.copy_image(base, image_path)
     utils.execute('ploop', 'restore-descriptor', '-f', self.pcs_format,
                   target, image_path)
     if size:
         self.resize_image(size)
Ejemplo n.º 26
0
    def _inject_userdata(self, id, i_meta):
        if isinstance(id, dict):
            id = id.get('id')

        metadata_directory = os.path.join(CONF.docker.metadata_directory, id, 'userdata')
        fileutils.ensure_tree(metadata_directory)
        with novadocker_utils.ConfigDriveBuilder(instance_md=i_meta) as cdb:
                cdb.make_drive(metadata_directory)
        os.chmod(metadata_directory, 0o700)

        return metadata_directory
Ejemplo n.º 27
0
def ensure_directory_exists_without_file(path):
    dirname = os.path.dirname(path)
    if os.path.isdir(dirname):
        try:
            os.unlink(path)
        except OSError:
            with excutils.save_and_reraise_exception() as ctxt:
                if not os.path.exists(path):
                    ctxt.reraise = False
    else:
        fileutils.ensure_tree(dirname, mode=0o755)
Ejemplo n.º 28
0
 def _copy_ploop_image(base, target, size):
     # Ploop disk is a directory with data file(root.hds) and
     # DiskDescriptor.xml, so create this dir
     fileutils.ensure_tree(target)
     image_path = os.path.join(target, "root.hds")
     libvirt_utils.copy_image(base, image_path)
     nova.privsep.libvirt.ploop_restore_descriptor(target,
                                                   image_path,
                                                   self.pcs_format)
     if size:
         self.resize_image(size)
Ejemplo n.º 29
0
    def test__cache_tftp_images_master_path(self, mock_fetch_image):
        temp_dir = tempfile.mkdtemp()
        self.config(tftp_root=temp_dir, group="pxe")
        self.config(tftp_master_path=os.path.join(temp_dir, "tftp_master_path"), group="pxe")
        image_path = os.path.join(temp_dir, self.node.uuid, "deploy_kernel")
        image_info = {"deploy_kernel": ("deploy_kernel", image_path)}
        fileutils.ensure_tree(CONF.pxe.tftp_master_path)

        pxe._cache_ramdisk_kernel(None, self.node, image_info)

        mock_fetch_image.assert_called_once_with(None, mock.ANY, [("deploy_kernel", image_path)], True)
Ejemplo n.º 30
0
 def _do_create_group():
     if os.path.exists(os.path.join(group_dir, ".metadata")):
         # NOTE(sileht): We update the group metadata even
         # they are already good, so ensure dict key are convert
         # to unicode in case of the file have been written with
         # tooz < 1.36
         self._update_group_metadata(group_meta_path, group_id)
         raise coordination.GroupAlreadyExist(group_id)
     else:
         fileutils.ensure_tree(group_dir)
         self._update_group_metadata(group_meta_path, group_id)
Ejemplo n.º 31
0
 def _create_temp_file(self, *args, **kwargs):
     fileutils.ensure_tree(self._tmp_dir)
     fd, tmp = tempfile.mkstemp(dir=self._tmp_dir, *args, **kwargs)
     os.close(fd)
     return tmp
Ejemplo n.º 32
0
    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):

        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        volumes_dir = self._get_volumes_dir()
        fileutils.ensure_tree(volumes_dir)

        vol_id = name.split(':')[1]

        cfg_port = kwargs.get('portals_port')
        cfg_ips = kwargs.get('portals_ips')

        portals = ','.join(
            map(lambda ip: self._get_portal(ip, cfg_port), cfg_ips))

        if chap_auth is None:
            volume_conf = self.TARGET_FMT % (name, path, portals)
        else:
            volume_conf = self.TARGET_FMT_WITH_CHAP % (name, path, portals,
                                                       '"%s":"%s"' % chap_auth)
        LOG.debug('Creating iscsi_target for: %s', vol_id)
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(
                _LW('Persistence file already exists for volume, '
                    'found file at: %s'), volume_path)
        utils.robust_file_write(volumes_dir, vol_id, volume_conf)
        LOG.debug('Created volume path %(vp)s,\n'
                  'content: %(vc)s', {
                      'vp': volume_path,
                      'vc': volume_conf
                  })

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name:
            LOG.debug(
                'Detected old persistence file for volume '
                '%(vol)s at %(old_name)s', {
                    'vol': vol_id,
                    'old_name': old_name
                })
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = utils.execute('iscsictl',
                                       '-S',
                                       'target=%s' % name,
                                       '-f',
                                       volume_path,
                                       '-x',
                                       self.config,
                                       run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s: %(e)s"), {
                        'vol_id': vol_id,
                        'e': e
                    })

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
        finally:
            LOG.debug("StdOut from iscsictl -S: %s", out)
            LOG.debug("StdErr from iscsictl -S: %s", err)

        # Grab targets list for debug
        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s. Please verify your configuration "
                    "in %(volumes_dir)s'"), {
                        'vol_id': vol_id,
                        'volumes_dir': volumes_dir,
                    })
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
Ejemplo n.º 33
0
def _cache_ramdisk_kernel(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(os.path.join(pxe_utils.get_root_dir(), node.uuid))
    LOG.debug("Fetching necessary kernel and ramdisk for node %s", node.uuid)
    deploy_utils.fetch_images(ctx, TFTPImageCache(), list(pxe_info.values()),
                              CONF.force_raw_images)
Ejemplo n.º 34
0
 def _mount_device(self, volmap, devpath):
     mountpoint = mount.get_mountpoint(volmap.volume.uuid)
     fileutils.ensure_tree(mountpoint)
     mount.do_mount(devpath, mountpoint, CONF.volume.fstype)
Ejemplo n.º 35
0
def _check_bootstrap_new_branch(branch, version_path, addn_kwargs):
    addn_kwargs['version_path'] = version_path
    addn_kwargs['head'] = _get_branch_head(branch)
    if not os.path.exists(version_path):
        # Bootstrap initial directory structure
        fileutils.ensure_tree(version_path, mode=0o755)
Ejemplo n.º 36
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, allocations, network_info=None,
              block_device_info=None):
        """Create a new lxd container as a nova instance.

        Creating a new container requires a number of steps. First, the
        image is fetched from glance, if needed. Next, the network is
        connected. A profile is created in LXD, and then the container
        is created and started.

        See `nova.virt.driver.ComputeDriver.spawn` for more
        information.
        """
        try:
            self.client.containers.get(instance.name)
            raise exception.InstanceExists(name=instance.name)
        except lxd_exceptions.LXDAPIException as e:
            if e.response.status_code != 404:
                raise  # Re-raise the exception if it wasn't NotFound

        instance_dir = common.InstanceAttributes(instance).instance_dir
        if not os.path.exists(instance_dir):
            fileutils.ensure_tree(instance_dir)

        # Check to see if LXD already has a copy of the image. If not,
        # fetch it.
        try:
            self.client.images.get_by_alias(instance.image_ref)
        except lxd_exceptions.LXDAPIException as e:
            if e.response.status_code != 404:
                raise
            _sync_glance_image_to_lxd(
                self.client, context, instance.image_ref)

        # Plug in the network
        if network_info:
            timeout = CONF.vif_plugging_timeout
            if (utils.is_neutron() and timeout):
                events = [('network-vif-plugged', vif['id'])
                          for vif in network_info if not vif.get(
                    'active', True)]
            else:
                events = []

            try:
                with self.virtapi.wait_for_instance_event(
                        instance, events, deadline=timeout,
                        error_callback=_neutron_failed_callback):
                    self.plug_vifs(instance, network_info)
            except eventlet.timeout.Timeout:
                LOG.warn('Timeout waiting for vif plugging callback for '
                         'instance %(uuid)s', {'uuid': instance['name']})
                if CONF.vif_plugging_is_fatal:
                    self.destroy(
                        context, instance, network_info, block_device_info)
                    raise exception.InstanceDeployFailure(
                        'Timeout waiting for vif plugging',
                        instance_id=instance['name'])

        # Create the profile
        try:
            profile = flavor.to_profile(
                self.client, instance, network_info, block_device_info)
        except lxd_exceptions.LXDAPIException as e:
            with excutils.save_and_reraise_exception():
                self.cleanup(
                    context, instance, network_info, block_device_info)

        # Create the container
        container_config = {
            'name': instance.name,
            'profiles': [profile.name],
            'source': {
                'type': 'image',
                'alias': instance.image_ref,
            },
        }
        try:
            container = self.client.containers.create(
                container_config, wait=True)
        except lxd_exceptions.LXDAPIException as e:
            with excutils.save_and_reraise_exception():
                self.cleanup(
                    context, instance, network_info, block_device_info)

        lxd_config = self.client.host_info
        storage.attach_ephemeral(
            self.client, block_device_info, lxd_config, instance)
        if configdrive.required_by(instance):
            configdrive_path = self._add_configdrive(
                context, instance,
                injected_files, admin_password,
                network_info)

            profile = self.client.profiles.get(instance.name)
            config_drive = {
                'configdrive': {
                    'path': '/config-drive',
                    'source': configdrive_path,
                    'type': 'disk',
                    'readonly': 'True',
                }
            }
            profile.devices.update(config_drive)
            profile.save()

        try:
            self.firewall_driver.setup_basic_filtering(
                instance, network_info)
            self.firewall_driver.instance_filter(
                instance, network_info)

            container.start(wait=True)

            self.firewall_driver.apply_instance_filter(
                instance, network_info)
        except lxd_exceptions.LXDAPIException as e:
            with excutils.save_and_reraise_exception():
                self.cleanup(
                    context, instance, network_info, block_device_info)
Ejemplo n.º 37
0
    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):

        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
        # for now, since we intermittently hit target already exists we're
        # adding some debug info to try and pinpoint what's going on
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = self.configuration.get('iscsi_write_cache', 'on')
        driver = self.iscsi_protocol
        chap_str = ''

        if chap_auth is not None:
            chap_str = 'incominguser %s %s' % chap_auth

        target_flags = self.configuration.get('iscsi_target_flags', '')
        if target_flags:
            target_flags = 'bsoflags ' + target_flags

        volume_conf = self.VOLUME_CONF % {
            'name': name,
            'path': path,
            'driver': driver,
            'chap_auth': chap_str,
            'target_flags': target_flags,
            'write_cache': write_cache
        }

        LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(
                _LW('Persistence file already exists for volume, '
                    'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug(('Created volume path %(vp)s,\n'
                   'content: %(vc)s'), {
                       'vp': volume_path,
                       'vc': volume_conf
                   })

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            LOG.debug(
                'Detected old persistence file for volume '
                '%{vol}s at %{old_name}s', {
                    'vol': vol_id,
                    'old_name': old_name
                })
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.

            self._do_tgt_update(name)
        except putils.ProcessExecutionError as e:
            if "target already exists" in e.stderr:
                # Adding the additional Warning message below for a clear
                # ER marker (Ref bug: #1398078).
                LOG.warning(
                    _LW('Could not create target because '
                        'it already exists for volume: %s'), vol_id)
                LOG.debug('Exception was: %s', e)

            else:
                LOG.error(
                    _LE("Failed to create iscsi target for Volume "
                        "ID: %(vol_id)s: %(e)s"), {
                            'vol_id': vol_id,
                            'e': e
                        })

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        # Grab targets list for debug
        # Consider adding a check for lun 0 and 1 for tgtadm
        # before considering this as valid
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _LE("Failed to create iscsi target for Volume "
                    "ID: %(vol_id)s. Please ensure your tgtd config "
                    "file contains 'include %(volumes_dir)s/*'"), {
                        'vol_id': vol_id,
                        'volumes_dir': volumes_dir,
                    })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
Ejemplo n.º 38
0
 def _add_file(self, basedir, path, data):
     filepath = os.path.join(basedir, path)
     dirname = os.path.dirname(filepath)
     fileutils.ensure_tree(dirname)
     with open(filepath, 'wb') as f:
         f.write(data.encode('utf-8'))
Ejemplo n.º 39
0
def temporary_dir():
    fileutils.ensure_tree(CONF.image_conversion_dir)

    return utils.tempdir(dir=CONF.image_conversion_dir)
Ejemplo n.º 40
0
 def __init__(self, *args, **kwargs):
     fileutils.ensure_tree(CONF.storage.file_system_dir)
Ejemplo n.º 41
0
 def get_full_config_file_path(self, filename, ensure_conf_dir=True):
     conf_dir = self.get_conf_dir()
     if ensure_conf_dir:
         fileutils.ensure_tree(conf_dir, mode=0o755)
     return os.path.join(conf_dir, filename)
Ejemplo n.º 42
0
def ensure_dir(dir_path):
    """Ensure a directory with 755 permissions mode."""
    fileutils.ensure_tree(dir_path, mode=0o755)
Ejemplo n.º 43
0
 def ensure_config_dir(self, vpnservice):
     """Create config directory if it does not exist."""
     fileutils.ensure_tree(self.config_dir, 0o755)
     for subdir in self.CONFIG_DIRS:
         dir_path = os.path.join(self.config_dir, subdir)
         fileutils.ensure_tree(dir_path, 0o755)
Ejemplo n.º 44
0
Archivo: agent.py Proyecto: Tan0/ironic
def _cache_tftp_images(ctx, node, pxe_info):
    """Fetch the necessary kernels and ramdisks for the instance."""
    fileutils.ensure_tree(os.path.join(CONF.pxe.tftp_root, node.uuid))
    LOG.debug("Fetching kernel and ramdisk for node %s", node.uuid)
    deploy_utils.fetch_images(ctx, AgentTFTPImageCache(), pxe_info.values())
Ejemplo n.º 45
0
def build_instance_info_for_deploy(task):
    """Build instance_info necessary for deploying to a node.

    :param task: a TaskManager object containing the node
    :returns: a dictionary containing the properties to be updated
        in instance_info
    :raises: exception.ImageRefValidationFailed if image_source is not
        Glance href and is not HTTP(S) URL.
    """
    def validate_image_url(url, secret=False):
        """Validates image URL through the HEAD request.

        :param url: URL to be validated
        :param secret: if URL is secret (e.g. swift temp url),
            it will not be shown in logs.
        """
        try:
            image_service.HttpImageService().validate_href(url, secret)
        except exception.ImageRefValidationFailed as e:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    "Agent deploy supports only HTTP(S) URLs as "
                    "instance_info['image_source'] or swift "
                    "temporary URL. Either the specified URL is not "
                    "a valid HTTP(S) URL or is not reachable "
                    "for node %(node)s. Error: %(msg)s", {
                        'node': node.uuid,
                        'msg': e
                    })

    node = task.node
    instance_info = node.instance_info
    iwdi = node.driver_internal_info.get('is_whole_disk_image')
    image_source = instance_info['image_source']

    if service_utils.is_glance_image(image_source):
        glance = image_service.GlanceImageService(context=task.context)
        image_info = glance.show(image_source)
        LOG.debug('Got image info: %(info)s for node %(node)s.', {
            'info': image_info,
            'node': node.uuid
        })
        if CONF.agent.image_download_source == 'swift':
            swift_temp_url = glance.swift_temp_url(image_info)
            validate_image_url(swift_temp_url, secret=True)
            instance_info['image_url'] = swift_temp_url
            instance_info['image_checksum'] = image_info['checksum']
            instance_info['image_disk_format'] = image_info['disk_format']
            instance_info['image_os_hash_algo'] = image_info['os_hash_algo']
            instance_info['image_os_hash_value'] = image_info['os_hash_value']
        else:
            # Ironic cache and serve images from httpboot server
            force_raw = direct_deploy_should_convert_raw_image(node)
            _, image_path = cache_instance_image(task.context,
                                                 node,
                                                 force_raw=force_raw)
            if force_raw:
                instance_info['image_disk_format'] = 'raw'
                # Standard behavior is for image_checksum to be MD5,
                # so if the hash algorithm is None, then we will use
                # sha256.
                os_hash_algo = image_info.get('os_hash_algo')
                if os_hash_algo == 'md5':
                    LOG.debug(
                        'Checksum calculation for image %(image)s is '
                        'set to \'%(algo)s\', changing to \'sha256\'', {
                            'algo': os_hash_algo,
                            'image': image_path
                        })
                    os_hash_algo = 'sha256'
                LOG.debug(
                    'Recalculating checksum for image %(image)s due to '
                    'image conversion.', {'image': image_path})
                instance_info['image_checksum'] = None
                hash_value = compute_image_checksum(image_path, os_hash_algo)
                instance_info['image_os_hash_algo'] = os_hash_algo
                instance_info['image_os_hash_value'] = hash_value
            else:
                instance_info['image_checksum'] = image_info['checksum']
                instance_info['image_disk_format'] = image_info['disk_format']
                instance_info['image_os_hash_algo'] = image_info[
                    'os_hash_algo']
                instance_info['image_os_hash_value'] = image_info[
                    'os_hash_value']

            # Create symlink and update image url
            symlink_dir = _get_http_image_symlink_dir_path()
            fileutils.ensure_tree(symlink_dir)
            symlink_path = _get_http_image_symlink_file_path(node.uuid)
            utils.create_link_without_raise(image_path, symlink_path)
            base_url = CONF.deploy.http_url
            if base_url.endswith('/'):
                base_url = base_url[:-1]
            http_image_url = '/'.join(
                [base_url, CONF.deploy.http_image_subdir, node.uuid])
            validate_image_url(http_image_url, secret=True)
            instance_info['image_url'] = http_image_url

        instance_info['image_container_format'] = (
            image_info['container_format'])
        instance_info['image_tags'] = image_info.get('tags', [])
        instance_info['image_properties'] = image_info['properties']

        if not iwdi:
            instance_info['kernel'] = image_info['properties']['kernel_id']
            instance_info['ramdisk'] = image_info['properties']['ramdisk_id']
    else:
        validate_image_url(image_source)
        instance_info['image_url'] = image_source

    if not iwdi:
        instance_info['image_type'] = 'partition'
        i_info = parse_instance_info(node)
        instance_info.update(i_info)
    else:
        instance_info['image_type'] = 'whole-disk-image'
    return instance_info
Ejemplo n.º 46
0
 def setup(self):
     """Ensure the keychains and folders exist."""
     # NOTE(vish): One of the drawbacks of doing this in the api is
     #             the keys will only be on the api node that launched
     #             the cloudpipe.
     fileutils.ensure_tree(CONF.keys_path)
Ejemplo n.º 47
0
 def _ensure_path(self, path):
     with _storagefailure_wrapper():
         fileutils.ensure_tree(path)
Ejemplo n.º 48
0
def create_temporary_file(*args, **kwargs):
    fileutils.ensure_tree(CONF.image_conversion_dir)

    fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs)
    os.close(fd)
    return tmp
Ejemplo n.º 49
0
 def _init_ha_conf_path(self):
     ha_full_path = os.path.dirname("/%s/" % self.conf.ha_confs_path)
     fileutils.ensure_tree(ha_full_path, mode=0o755)
Ejemplo n.º 50
0
    def setUp(self):
        super(TargetDriverFixture, self).setUp()
        self.configuration = conf.Configuration(None)
        self.configuration.append_config_values = mock.Mock(return_value=0)
        self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get)
        self.configuration.target_ip_address = '10.9.8.7'
        self.configuration.target_port = 3260

        self.fake_volumes_dir = tempfile.mkdtemp()
        fileutils.ensure_tree(self.fake_volumes_dir)

        self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
        self.fake_project_id_2 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
        self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'

        self.addCleanup(self._cleanup)

        self.testvol =\
            {'project_id': self.fake_project_id,
             'name': 'testvol',
             'size': 1,
             'id': self.fake_volume_id,
             'volume_type_id': None,
             'provider_location': '10.10.7.1:3260 '
                                  'iqn.2010-10.org.openstack:'
                                  'volume-%s 0' % self.fake_volume_id,
             'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
                              'c76370d66b 2FE0CQ8J196R',
             'provider_geometry': '512 512',
             'created_at': timeutils.utcnow(),
             'host': 'fake_host@lvm#lvm'}

        self.testvol_no_prov_loc = copy.copy(self.testvol)
        self.testvol_no_prov_loc['provider_location'] = None

        self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:'
        self.target_string = ('127.0.0.1:3260,1 ' + self.iscsi_target_prefix +
                              'volume-%s' % self.testvol['id'])

        self.testvol_2 =\
            {'project_id': self.fake_project_id_2,
             'name': 'testvol2',
             'size': 1,
             'id': self.fake_volume_id,
             'volume_type_id': None,
             'provider_location': ('%(ip)s:%(port)d%(iqn)svolume-%(vol)s 2' %
                                   {'ip': self.configuration.target_ip_address,
                                    'port': self.configuration.target_port,
                                    'iqn': self.iscsi_target_prefix,
                                    'vol': self.fake_volume_id}),
             'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
                              'c76370d66b 2FE0CQ8J196R',
             'provider_geometry': '512 512',
             'created_at': timeutils.utcnow(),
             'host': 'fake_host@lvm#lvm'}

        self.expected_iscsi_properties = \
            {'auth_method': 'CHAP',
             'auth_password': '******',
             'auth_username': '******',
             'encrypted': False,
             'logical_block_size': '512',
             'physical_block_size': '512',
             'target_discovered': False,
             'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
                           self.fake_volume_id,
             'target_lun': 0,
             'target_portal': '10.10.7.1:3260',
             'volume_id': self.fake_volume_id}

        self.VOLUME_ID = '83c2e877-feed-46be-8435-77884fe55b45'
        self.VOLUME_NAME = 'volume-' + self.VOLUME_ID
        self.test_vol = (self.iscsi_target_prefix + self.VOLUME_NAME)
Ejemplo n.º 51
0
    def mount(self, fstype, export, vol_name, mountpoint, instance, options):
        """Ensure a mountpoint is available for an attachment, mounting it
        if necessary.

        If this is the first attachment on this mountpoint, we will mount it
        with:

          mount -t <fstype> <options> <export> <mountpoint>

        :param fstype: The filesystem type to be passed to mount command.
        :param export: The type-specific identifier of the filesystem to be
                       mounted. e.g. for nfs 'host.example.com:/mountpoint'.
        :param vol_name: The name of the volume on the remote filesystem.
        :param mountpoint: The directory where the filesystem will be
                           mounted on the local compute host.
        :param instance: The instance the volume will be attached to.
        :param options: An arbitrary list of additional arguments to be
                        passed to the mount command immediate before export
                        and mountpoint.
        """

        # NOTE(mdbooth): mount() may currently be called multiple times for a
        # single attachment. Any operation which calls
        # LibvirtDriver._hard_reboot will re-attach volumes which are probably
        # already attached, resulting in multiple mount calls.

        LOG.debug(
            '_HostMountState.mount(fstype=%(fstype)s, '
            'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '
            'options=%(options)s) generation %(gen)s', {
                'fstype': fstype,
                'export': export,
                'vol_name': vol_name,
                'mountpoint': mountpoint,
                'options': options,
                'gen': self.generation
            })
        with self._get_locked(mountpoint) as mount:
            if not os.path.ismount(mountpoint):
                LOG.debug('Mounting %(mountpoint)s generation %(gen)s', {
                    'mountpoint': mountpoint,
                    'gen': self.generation
                })

                fileutils.ensure_tree(mountpoint)

                mount_cmd = ['mount', '-t', fstype]
                if options is not None:
                    mount_cmd.extend(options)
                mount_cmd.extend([export, mountpoint])

                try:
                    utils.execute(*mount_cmd, run_as_root=True)
                except Exception:
                    # Check to see if mountpoint is mounted despite the error
                    # eg it was already mounted
                    if os.path.ismount(mountpoint):
                        # We're not going to raise the exception because we're
                        # in the desired state anyway. However, this is still
                        # unusual so we'll log it.
                        LOG.exception(
                            _('Error mounting %(fstype)s export '
                              '%(export)s on %(mountpoint)s. '
                              'Continuing because mountpount is '
                              'mounted despite this.'), {
                                  'fstype': fstype,
                                  'export': export,
                                  'mountpoint': mountpoint
                              })

                    else:
                        # If the mount failed there's no reason for us to keep
                        # a record of it. It will be created again if the
                        # caller retries.

                        # Delete while holding lock
                        del self.mountpoints[mountpoint]

                        raise

            mount.add_attachment(vol_name, instance.uuid)

        LOG.debug(
            '_HostMountState.mount() for %(mountpoint)s '
            'generation %(gen)s completed successfully', {
                'mountpoint': mountpoint,
                'gen': self.generation
            })
Ejemplo n.º 52
0
    def _add_configdrive(self, context, instance,
                         injected_files, admin_password, network_info):
        """Create configdrive for the instance."""
        if CONF.config_drive_format != 'iso9660':
            raise exception.ConfigDriveUnsupportedFormat(
                format=CONF.config_drive_format)

        container = self.client.containers.get(instance.name)
        storage_id = 0
        """
        Determine UID shift used for container uid mapping
        Sample JSON config from LXD
        {
            "volatile.apply_template": "create",
            ...
            "volatile.last_state.idmap": "[
                {
                \"Isuid\":true,
                \"Isgid\":false,
                \"Hostid\":100000,
                \"Nsid\":0,
                \"Maprange\":65536
                },
                {
                \"Isuid\":false,
                \"Isgid\":true,
                \"Hostid\":100000,
                \"Nsid\":0,
                \"Maprange\":65536
                }] ",
            "volatile.tap5fd6808a-7b.name": "eth0"
        }
        """
        container_id_map = json.loads(
            container.config['volatile.last_state.idmap'])
        uid_map = filter(lambda id_map: id_map.get("Isuid"), container_id_map)
        if uid_map:
            storage_id = uid_map[0].get("Hostid", 0)
        else:
            # privileged containers does not have uid/gid mapping
            # LXD API return nothing
            pass

        extra_md = {}
        if admin_password:
            extra_md['admin_pass'] = admin_password

        inst_md = instance_metadata.InstanceMetadata(
            instance, content=injected_files, extra_md=extra_md,
            network_info=network_info, request_context=context)

        iso_path = os.path.join(
            common.InstanceAttributes(instance).instance_dir,
            'configdrive.iso')

        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            try:
                cdb.make_drive(iso_path)
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    LOG.error('Creating config drive failed with '
                              'error: %s',
                              e, instance=instance)

        configdrive_dir = os.path.join(
            nova.conf.CONF.instances_path, instance.name, 'configdrive')
        if not os.path.exists(configdrive_dir):
            fileutils.ensure_tree(configdrive_dir)

        with utils.tempdir() as tmpdir:
            mounted = False
            try:
                _, err = utils.execute('mount',
                                       '-o',
                                       'loop,uid=%d,gid=%d' % (os.getuid(),
                                                               os.getgid()),
                                       iso_path, tmpdir,
                                       run_as_root=True)
                mounted = True

                # Copy and adjust the files from the ISO so that we
                # dont have the ISO mounted during the life cycle of the
                # instance and the directory can be removed once the instance
                # is terminated
                for ent in os.listdir(tmpdir):
                    shutil.copytree(os.path.join(tmpdir, ent),
                                    os.path.join(configdrive_dir, ent))

                utils.execute('chmod', '-R', '775', configdrive_dir,
                              run_as_root=True)
                utils.execute('chown', '-R',
                              '%s:%s' % (storage_id, storage_id),
                              configdrive_dir, run_as_root=True)
            finally:
                if mounted:
                    utils.execute('umount', tmpdir, run_as_root=True)

        return configdrive_dir