Example #1
0
    def _cache_image(fetch_func, target, fname, cow=False, *args, **kwargs):
        """Wrapper for a method that creates an image that caches the image.

        This wrapper will save the image into a common store and create a
        copy for use by the hypervisor.

        The underlying method should specify a kwarg of target representing
        where the image will be saved.

        fname is used as the filename of the base image.  The filename needs
        to be unique to a given image.

        If cow is True, it will make a CoW image instead of a copy.
        """
        if not os.path.exists(target):
            base_dir = os.path.join(CONF.instances_path, '_base')
            if not os.path.exists(base_dir):
                fileutils.ensure_tree(base_dir)
            base = os.path.join(base_dir, fname)

            @lockutils.synchronized(fname, 'nova-')
            def call_if_not_exists(base, fetch_func, *args, **kwargs):
                if not os.path.exists(base):
                    fetch_func(target=base, *args, **kwargs)

            call_if_not_exists(base, fetch_func, *args, **kwargs)

            if cow:
                libvirt_utils.create_cow_image(base, target)
            else:
                libvirt_utils.copy_image(base, target)
Example #2
0
def write_stored_info(target, field=None, value=None):
    """Write information about an image."""

    if not field:
        return

    info_file = get_info_filename(target)
    LOG.info(_('Writing stored info to %s'), info_file)
    fileutils.ensure_tree(os.path.dirname(info_file))

    lock_name = 'info-%s' % os.path.split(target)[-1]
    lock_path = os.path.join(CONF.instances_path, 'locks')

    @lockutils.synchronized(lock_name, 'nova-', external=True,
                            lock_path=lock_path)
    def write_file(info_file, field, value):
        d = {}

        if os.path.exists(info_file):
            with open(info_file, 'r') as f:
                d = _read_possible_json(f.read(), info_file)

        d[field] = value
        d['%s-timestamp' % field] = time.time()

        with open(info_file, 'w') as f:
            f.write(json.dumps(d))

    write_file(info_file, field, value)
    def _get_cache_image(self, context, instance, snapshot_id, suffix=""):
        def basepath(fname="", suffix=suffix):
            return os.path.join(libvirt_utils.get_instance_path(instance), fname + suffix)

        def raw(fname, image_type="raw"):
            return self.image_backend.image(instance, fname, image_type)

        # ensure directories exist and are writable
        fileutils.ensure_tree(basepath(suffix=""))
        fname = hashlib.sha1(snapshot_id).hexdigest()
        LOG.debug(_("cloudlet, caching file at %s" % fname))
        size = instance["root_gb"] * 1024 * 1024 * 1024
        if size == 0:
            size = None

        raw("disk").cache(
            fetch_func=libvirt_utils.fetch_image,
            context=context,
            filename=fname,
            size=size,
            image_id=snapshot_id,
            user_id=instance["user_id"],
            project_id=instance["project_id"],
        )

        # from cache method at virt/libvirt/imagebackend.py
        abspath = os.path.join(
            libvirt_driver.CONF.instances_path, libvirt_driver.CONF.image_cache_subdirectory_name, fname
        )
        return abspath
Example #4
0
    def cache(self, fetch_func, filename, size=None, *args, **kwargs):
        """Creates image from template.

        Ensures that template and image not already exists.
        Ensures that base directory exists.
        Synchronizes on template fetching.

        :fetch_func: Function that creates the base image
                     Should accept `target` argument.
        :filename: Name of the file in the image directory
        :size: Size of created image in bytes (optional)
        """
        @utils.synchronized(filename, external=True, lock_path=self.lock_path)
        def fetch_func_sync(target, *args, **kwargs):
            fetch_func(target=target, *args, **kwargs)

        base_dir = os.path.join(CONF.instances_path,
                                CONF.image_cache_subdirectory_name)
        if not os.path.exists(base_dir):
            fileutils.ensure_tree(base_dir)
        base = os.path.join(base_dir, filename)

        if not self.check_image_exists() or not os.path.exists(base):
            self.create_image(fetch_func_sync, base, size,
                              *args, **kwargs)

        if (size and self.preallocate and self._can_fallocate() and
                os.access(self.path, os.W_OK)):
            utils.execute('fallocate', '-n', '-l', size, self.path)
Example #5
0
    def _pull_missing_image(self, context, image_meta, instance):
        msg = 'Image name "%s" does not exist, fetching it...'
        LOG.debug(msg, image_meta['name'])

        # TODO(imain): It would be nice to do this with file like object
        # passing but that seems a bit complex right now.
        snapshot_directory = CONF.docker.snapshots_directory
        fileutils.ensure_tree(snapshot_directory)
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
            try:
                out_path = os.path.join(tmpdir, uuid.uuid4().hex)

                images.fetch(context, image_meta['id'], out_path,
                             instance['user_id'], instance['project_id'])
                self.docker.load_repository_file(
                    self._encode_utf8(image_meta['name']),
                    out_path
                )
            except Exception as e:
                LOG.warning(_('Cannot load repository file: %s'),
                            e, instance=instance, exc_info=True)
                msg = _('Cannot load repository file: {0}')
                raise exception.NovaException(msg.format(e),
                                              instance_id=image_meta['name'])

        return self.docker.inspect_image(self._encode_utf8(image_meta['name']))
Example #6
0
    def get_image_cache_dir(self, filename):
        base_dir = os.path.join(CONF.instances_path,
                                CONF.image_cache_subdirectory_name)
        if not os.path.exists(base_dir):
            fileutils.ensure_tree(base_dir)

        return os.path.join(base_dir, filename)
Example #7
0
    def cache(self, fetch_func, filename, size=None, *args, **kwargs):
        """Creates image from template.

        Ensures that template and image not already exists.
        Ensures that base directory exists.
        Synchronizes on template fetching.

        :fetch_func: Function that creates the base image
                     Should accept `target` argument.
        :filename: Name of the file in the image directory
        :size: Size of created image in bytes (optional)
        """
        @utils.synchronized(filename, external=True, lock_path=self.lock_path)
        def call_if_not_exists(target, *args, **kwargs):
            if not os.path.exists(target):
                fetch_func(target=target, *args, **kwargs)
            elif CONF.libvirt_images_type == "lvm" and \
                    'ephemeral_size' in kwargs:
                fetch_func(target=target, *args, **kwargs)

        base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
        if not os.path.exists(base_dir):
            fileutils.ensure_tree(base_dir)
        base = os.path.join(base_dir, filename)

        if not os.path.exists(self.path) or not os.path.exists(base):
            self.create_image(call_if_not_exists, base, size,
                              *args, **kwargs)

        if size and self.preallocate and self._can_fallocate():
            utils.execute('fallocate', '-n', '-l', size, self.path)
Example #8
0
def _sign_csr(csr_text, ca_folder):
    with utils.tempdir() as tmpdir:
        inbound = os.path.join(tmpdir, 'inbound.csr')
        outbound = os.path.join(tmpdir, 'outbound.csr')

        try:
            with open(inbound, 'w') as csrfile:
                csrfile.write(csr_text)
        except IOError:
            LOG.exception(_('Failed to write inbound.csr'))
            raise

        LOG.debug(_('Flags path: %s'), ca_folder)
        start = os.getcwd()

        # Change working dir to CA
        fileutils.ensure_tree(ca_folder)
        os.chdir(ca_folder)
        utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
                      './openssl.cnf', '-infiles', inbound)
        out, _err = utils.execute('openssl', 'x509', '-in', outbound,
                                  '-serial', '-noout')
        serial = string.strip(out.rpartition('=')[2])
        os.chdir(start)

        with open(outbound, 'r') as crtfile:
            return (serial, crtfile.read())
Example #9
0
def _sign_csr(csr_text, ca_folder):
    with utils.tempdir() as tmpdir:
        inbound = os.path.join(tmpdir, "inbound.csr")
        outbound = os.path.join(tmpdir, "outbound.csr")

        try:
            with open(inbound, "w") as csrfile:
                csrfile.write(csr_text)
        except IOError:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Failed to write inbound.csr"))

        LOG.debug("Flags path: %s", ca_folder)
        start = os.getcwd()

        # Change working dir to CA
        fileutils.ensure_tree(ca_folder)
        os.chdir(ca_folder)
        utils.execute("openssl", "ca", "-batch", "-out", outbound, "-config", "./openssl.cnf", "-infiles", inbound)
        out, _err = utils.execute("openssl", "x509", "-in", outbound, "-serial", "-noout")
        serial = string.strip(out.rpartition("=")[2])
        os.chdir(start)

        with open(outbound, "r") as crtfile:
            return (serial, crtfile.read())
Example #10
0
def create_container(instance):
    """Create an LXC rootfs directory for a given container."""
    LOG.debug('Creating LXC rootfs')

    container_rootfs = container_utils.get_container_rootfs(instance)
    if not os.path.exists(container_rootfs):
        fileutils.ensure_tree(container_rootfs)
 def __init__(self, filename):
     super(ImageCache, self).__init__("file", "qcow2", is_block_dev=False)
     base_dir = os.path.join(CONF.instances_path,
                             CONF.image_cache_subdirectory_name)
     if not os.path.exists(base_dir):
         fileutils.ensure_tree(base_dir)
     self.path=os.path.join(base_dir, filename)
Example #12
0
 def put(self, bucket_name):
     path = os.path.abspath(os.path.join(self.application.directory, bucket_name))
     if not path.startswith(self.application.directory) or os.path.exists(path):
         self.set_status(403)
         return
     fileutils.ensure_tree(path)
     self.finish()
Example #13
0
File: iscsi.py Project: rmk40/nova
    def create_iscsi_target(self, name, tid, lun, path, **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        fileutils.ensure_tree(FLAGS.volumes_dir)

        vol_id = name.split(":")[1]
        volume_conf = """
            <target %s>
                backing-store %s
            </target>
        """ % (
            name,
            path,
        )

        LOG.info(_("Creating volume: %s") % vol_id)
        volumes_dir = FLAGS.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, "w+")
        f.write(volume_conf)
        f.close()

        try:
            (out, err) = self._execute("tgt-admin", "--update", name, run_as_root=True)
        except exception.ProcessExecutionError, e:
            LOG.error(_("Failed to create iscsi target for volume " "id:%(vol_id)s.") % locals())

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
Example #14
0
    def _cache_image(self, context, instance, image_meta):
        """Fetch the instance's image from Glance

        This method pulls the relevant AMI and associated kernel and ramdisk,
        and the deploy kernel and ramdisk from Glance, and writes them
        to the appropriate places on local disk.

        Both sets of kernel and ramdisk are needed for Tilera booting, so these
        are stored under CONF.baremetal.tftp_root.

        At present, the AMI is cached and certain files are injected.
        Debian/ubuntu-specific assumptions are made regarding the injected
        files. In a future revision, this functionality will be replaced by a
        more scalable and os-agnostic approach: the deployment ramdisk will
        fetch from Glance directly, and write its own last-mile configuration.
        """
        fileutils.ensure_tree(get_image_dir_path(instance))
        image_path = get_image_file_path(instance)

        LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
                        {'ami': image_meta['id'], 'name': instance['name']})
        bm_utils.cache_image(context=context,
                             target=image_path,
                             image_id=image_meta['id'],
                             user_id=instance['user_id'],
                             project_id=instance['project_id']
                        )

        return [image_meta['id'], image_path]
Example #15
0
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn
            #              amically-allocating-and-destroying-mutexes
            sem = _semaphores.get(name, semaphore.Semaphore())
            if name not in _semaphores:
                # this check is not racy - we're already holding ref locally
                # so GC won't remove the item and there was no IO switch
                # (only valid in greenthreads)
                _semaphores[name] = sem

            with sem:
                LOG.debug(_('Got semaphore "%(lock)s" for method '
                            '"%(method)s"...'), {'lock': name,
                                                 'method': f.__name__})
                if external and not CONF.disable_process_locking:
                    LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
                                'method "%(method)s"...'),
                              {'lock': name, 'method': f.__name__})
                    cleanup_dir = False

                    # We need a copy of lock_path because it is non-local
                    local_lock_path = lock_path
                    if not local_lock_path:
                        local_lock_path = CONF.lock_path

                    if not local_lock_path:
                        cleanup_dir = True
                        local_lock_path = tempfile.mkdtemp()

                    if not os.path.exists(local_lock_path):
                        cleanup_dir = True
                        fileutils.ensure_tree(local_lock_path)

                    # NOTE(mikal): the lock name cannot contain directory
                    # separators
                    safe_name = name.replace(os.sep, '_')
                    lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
                    lock_file_path = os.path.join(local_lock_path,
                                                  lock_file_name)

                    try:
                        lock = InterProcessLock(lock_file_path)
                        with lock:
                            LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
                                        'for method "%(method)s"...'),
                                      {'lock': name,
                                       'path': lock_file_path,
                                       'method': f.__name__})
                            retval = f(*args, **kwargs)
                    finally:
                        # NOTE(vish): This removes the tempdir if we needed
                        #             to create one. This is used to cleanup
                        #             the locks left behind by unit tests.
                        if cleanup_dir:
                            shutil.rmtree(local_lock_path)
                else:
                    retval = f(*args, **kwargs)

            return retval
Example #16
0
    def cache(self, fetch_func, filename, size=None, *args, **kwargs):
        """Creates image from template.

        Ensures that template and image not already exists.
        Ensures that base directory exists.
        Synchronizes on template fetching.

        :fetch_func: Function that creates the base image
                     Should accept `target` argument.
        :filename: Name of the file in the image directory
        :size: Size of created image in bytes (optional)
        """
        @lockutils.synchronized(filename, 'nova-', external=True,
                                lock_path=self.lock_path)
        def call_if_not_exists(target, *args, **kwargs):
            if not os.path.exists(target):
                fetch_func(target=target, *args, **kwargs)

        if not os.path.exists(self.path):
            base_dir = os.path.join(CONF.instances_path, '_base')
            if not os.path.exists(base_dir):
                fileutils.ensure_tree(base_dir)
            base = os.path.join(base_dir, filename)

            self.create_image(call_if_not_exists, base, size,
                              *args, **kwargs)
Example #17
0
    def acquire(self):
        basedir = os.path.dirname(self.fname)

        if not os.path.exists(basedir):
            fileutils.ensure_tree(basedir)
            LOG.info(_LI('Created lock path: %s'), basedir)

        self.lockfile = open(self.fname, 'w')

        while True:
            try:
                # Using non-blocking locks since green threads are not
                # patched to deal with blocking locking calls.
                # Also upon reading the MSDN docs for locking(), it seems
                # to have a laughable 10 attempts "blocking" mechanism.
                self.trylock()
                LOG.debug('Got file lock "%s"', self.fname)
                return True
            except IOError as e:
                if e.errno in (errno.EACCES, errno.EAGAIN):
                    # external locks synchronise things like iptables
                    # updates - give it some time to prevent busy spinning
                    time.sleep(0.01)
                else:
                    raise threading.ThreadError(_("Unable to acquire lock on"
                                                  " `%(filename)s` due to"
                                                  " %(exception)s") %
                                                {'filename': self.fname,
                                                    'exception': e})
Example #18
0
    def _import_volume_from_glance(self, context, volume_id, volume_loc):

        volume = self.cinder_api.get(context,volume_id)
        image_meta = volume.get('volume_image_metadata')
        if not image_meta:
            LOG.error('Provider Volume NOT Found!')
            exception_ex.VolumeNotFoundAtProvider
        else:
            # 1.1 download qcow2 file from glance
            image_uuid = self._get_image_id_from_meta(image_meta)

            orig_file_name = 'orig_file.qcow2'
            this_conversion_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
            orig_file_full_name = '%s/%s' % (this_conversion_dir,orig_file_name)

            fileutils.ensure_tree(this_conversion_dir)
            self.glance_api.download(context, image_uuid,dest_path=orig_file_full_name)

            # 1.2 convert to provider image format
            converted_file_format = 'vmdk'
            converted_file_name = '%s.%s' % ('converted_file', converted_file_format)
            converted_file_path = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
            converted_file_full_name =  '%s/%s' % (converted_file_path,converted_file_name)
            convert_image(orig_file_full_name,
                          converted_file_full_name,
                          converted_file_format,
                          subformat='streamoptimized')


            # 1.3 upload volume file to provider storage (S3,eg)
            container = self.storage_adapter.get_container(CONF.provider_opts.storage_tmp_dir)
            # self.storage_adapter.upload_object(converted_file_full_name,container,volume_id)

            object_name = volume_id
            extra = {'content_type': 'text/plain'}

            with open(converted_file_full_name,'rb') as f:
                obj = self.storage_adapter.upload_object_via_stream(container=container,
                                                           object_name=object_name,
                                                           iterator=f,
                                                           extra=extra)

            # 1.4 import volume
            obj = self.storage_adapter.get_object(container.name,volume_id)

            task = self.compute_adapter.create_import_volume_task(CONF.provider_opts.storage_tmp_dir,
                                                                  volume_id,
                                                                  'VMDK',
                                                                  obj.size,
                                                                  str(volume.get('size')),
                                                                  volume_loc=volume_loc)
            while not task.is_completed():
                time.sleep(10)
                if task.is_cancelled():
                    LOG.error('import volume fail!')
                    raise exception_ex.UploadVolumeFailure
                task = self.compute_adapter.get_task_info(task)

            return task.volume_id
Example #19
0
    def _do_snapshot_2(self, context, instance, image_id, update_task_state):
         
        # a) get  provider node id
        provider_node_id = self._get_provider_node_id(instance)
        provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
        if not provider_nodes:
            LOG.error('instance %s is not found' % instance.uuid)
            raise exception.InstanceNotFound(instance_id=instance.uuid)
        if len(provider_nodes)>1:
            LOG.error('instance %s are more than one' % instance.uuid)
            raise exception_ex.MultiInstanceConfusion
        provider_node = provider_nodes[0]

        # b) export-instance to s3
        # self.compute_adapter.ex_stop_node(provider_node)
        try:
            task = self.compute_adapter.create_export_instance_task(provider_node_id,
                                                                    CONF.provider_opts.storage_tmp_dir)
        except:
            task = self.compute_adapter.create_export_instance_task(provider_node_id,
                                                                    CONF.provider_opts.storage_tmp_dir)
        while not task.is_completed():
            time.sleep(10)
            task = self.compute_adapter.get_task_info(task)

        obj_key = task.export_to_s3_info.s3_key
        obj_bucket = task.export_to_s3_info.s3_bucket

        # c) download from s3
        obj = self.storage_adapter.get_object(obj_bucket,obj_key)
        conv_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,image_id)
        fileutils.ensure_tree(conv_dir)
        org_full_name = '%s/%s.vmdk' % (conv_dir,image_id)

        self.storage_adapter.download_object(obj,org_full_name)

        # d) convert to qcow2
        dest_full_name = '%s/%s.qcow2' % (conv_dir,image_id)
        convert_image(org_full_name,
                     dest_full_name,
                      'qcow2')

        # upload to glance
        update_task_state(task_state=task_states.IMAGE_UPLOADING,
                          expected_state=task_states.IMAGE_PENDING_UPLOAD)

        file_size = os.path.getsize(dest_full_name)
        metadata = self.glance_api.get(context, image_id)
        image_metadata = {"disk_format": "qcow2",
                          "is_public": "false",
                          "name": metadata['name'],
                          "status": "active",
                          "container_format": "bare",
                          "size": file_size,
                          "properties": {"owner_id": instance['project_id']}}

        src_file_handle = fileutils.file_open(dest_full_name, "rb")
        self.glance_api.create(context,image_metadata,src_file_handle)
        src_file_handle.close()
    def resolve_driver_format(self):
        """Return the driver format for self.path.

        First checks self.disk_info_path for an entry.
        If it's not there, calls self._get_driver_format(), and then
        stores the result in self.disk_info_path

        See https://bugs.launchpad.net/nova/+bug/1221190
        """
        def _dict_from_line(line):
            if not line:
                return {}
            try:
                return jsonutils.loads(line)
            except (TypeError, ValueError) as e:
                msg = (_("Could not load line %(line)s, got error "
                        "%(error)s") %
                        {'line': line, 'error': unicode(e)})
                raise exception.InvalidDiskInfo(reason=msg)

        @utils.synchronized(self.disk_info_path, external=False,
                            lock_path=self.lock_path)
        def write_to_disk_info_file():
            # Use os.open to create it without group or world write permission.
            fd = os.open(self.disk_info_path, os.O_RDWR | os.O_CREAT, 0o644)
            with os.fdopen(fd, "r+") as disk_info_file:
                line = disk_info_file.read().rstrip()
                dct = _dict_from_line(line)
                if self.path in dct:
                    msg = _("Attempted overwrite of an existing value.")
                    raise exception.InvalidDiskInfo(reason=msg)
                dct.update({self.path: driver_format})
                disk_info_file.seek(0)
                disk_info_file.truncate()
                disk_info_file.write('%s\n' % jsonutils.dumps(dct))
            # Ensure the file is always owned by the nova user so qemu can't
            # write it.
			# TODO chown for windows?
            # utils.chown(self.disk_info_path, owner_uid=getpass.getuser())

        try:
            LOG.debug('Disk info path %s' % self.disk_info_path)
            if (self.disk_info_path is not None and
                        os.path.exists(self.disk_info_path)):
                with open(self.disk_info_path) as disk_info_file:
                    line = disk_info_file.read().rstrip()
                    dct = _dict_from_line(line)
                    for path, driver_format in dct.iteritems():
                        if path == self.path:
                            return driver_format
            driver_format = self._get_driver_format()
            if self.disk_info_path is not None:
                fileutils.ensure_tree(os.path.dirname(self.disk_info_path))
                write_to_disk_info_file()
        except OSError as e:
            raise exception.DiskInfoReadWriteFail(reason=unicode(e))
        return driver_format
Example #21
0
    def resolve_driver_format(self):
        """Return the driver format for self.path.

        First checks self.disk_info_path for an entry.
        If it's not there, calls self._get_driver_format(), and then
        stores the result in self.disk_info_path

        See https://bugs.launchpad.net/nova/+bug/1221190
        """
        def _dict_from_line(line):
            if not line:
                return {}
            try:
                return jsonutils.loads(line)
            except (TypeError, ValueError) as e:
                msg = (_("Could not load line %(line)s, got error "
                        "%(error)s") %
                        {'line': line, 'error': e})
                raise exception.InvalidDiskInfo(reason=msg)

        @utils.synchronized(self.disk_info_path, external=False,
                            lock_path=self.lock_path)
        def write_to_disk_info_file():
            # Use os.open to create it without group or world write permission.
            fd = os.open(self.disk_info_path, os.O_RDONLY | os.O_CREAT, 0o644)
            with os.fdopen(fd, "r") as disk_info_file:
                line = disk_info_file.read().rstrip()
                dct = _dict_from_line(line)

            if self.path in dct:
                msg = _("Attempted overwrite of an existing value.")
                raise exception.InvalidDiskInfo(reason=msg)
            dct.update({self.path: driver_format})

            tmp_path = self.disk_info_path + ".tmp"
            fd = os.open(tmp_path, os.O_WRONLY | os.O_CREAT, 0o644)
            with os.fdopen(fd, "w") as tmp_file:
                tmp_file.write('%s\n' % jsonutils.dumps(dct))
            os.rename(tmp_path, self.disk_info_path)

        try:
            if (self.disk_info_path is not None and
                        os.path.exists(self.disk_info_path)):
                with open(self.disk_info_path) as disk_info_file:
                    line = disk_info_file.read().rstrip()
                    dct = _dict_from_line(line)
                    for path, driver_format in dct.iteritems():
                        if path == self.path:
                            return driver_format
            driver_format = self._get_driver_format()
            if self.disk_info_path is not None:
                fileutils.ensure_tree(os.path.dirname(self.disk_info_path))
                write_to_disk_info_file()
        except OSError as e:
            raise exception.DiskInfoReadWriteFail(reason=six.text_type(e))
        return driver_format
Example #22
0
    def __init__(self, root_directory, bucket_depth=0, mapper=None):
        if mapper is None:
            mapper = routes.Mapper()

        mapper.connect("/", controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
        mapper.connect("/{bucket}/{object_name}", controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw))
        mapper.connect("/{bucket_name}/", controller=lambda *a, **kw: BucketHandler(self)(*a, **kw))
        self.directory = os.path.abspath(root_directory)
        fileutils.ensure_tree(self.directory)
        self.bucket_depth = bucket_depth
        super(S3Application, self).__init__(mapper)
    def _spawn_using_handoff(self, context, instance, xml, image_meta, handoff_info):
        image_properties = image_meta.get("properties", None)
        memory_snap_id = str(image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM))
        diskhash_snap_id = str(image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH))
        memhash_snap_id = str(image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH))
        basedisk_path = self._get_cache_image(context, instance, image_meta["id"])
        basemem_path = self._get_cache_image(context, instance, memory_snap_id)
        diskhash_path = self._get_cache_image(context, instance, diskhash_snap_id)
        memhash_path = self._get_cache_image(context, instance, memhash_snap_id)
        base_vm_paths = [basedisk_path, basemem_path, diskhash_path, memhash_path]
        image_sha256 = image_properties.get(CloudletAPI.PROPERTY_KEY_BASE_UUID)

        snapshot_directory = libvirt_driver.CONF.libvirt.snapshots_directory
        fileutils.ensure_tree(snapshot_directory)
        synthesized_vm = None
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
            uuidhex = uuid.uuid4().hex
            launch_diskpath = os.path.join(tmpdir, uuidhex + "-launch-disk")
            launch_memorypath = os.path.join(tmpdir, uuidhex + "-launch-memory")
            tmp_dir = mkdtemp(prefix="cloudlet-residue-")
            handoff_recv_datafile = os.path.join(tmp_dir, "handoff-data")
            # recv handoff data and synthesize disk img and memory snapshot
            try:
                ret_values = self._handoff_recv(
                    base_vm_paths, image_sha256, handoff_recv_datafile, launch_diskpath, launch_memorypath
                )
                # start VM
                launch_disk_size, launch_memory_size, disk_overlay_map, memory_overlay_map = ret_values
                synthesized_vm = self._handoff_launch_vm(
                    xml,
                    basedisk_path,
                    basemem_path,
                    launch_diskpath,
                    launch_memorypath,
                    int(launch_disk_size),
                    int(launch_memory_size),
                    disk_overlay_map,
                    memory_overlay_map,
                )

                # rettach NIC
                synthesis.rettach_nic(synthesized_vm.machine, synthesized_vm.old_xml_str, xml)
            except handoff.HandoffError as e:
                msg = "failed to perform VM handoff:\n"
                msg += str(e)
                raise exception.ImageNotFound(msg)
            finally:
                if os.path.exists(tmp_dir):
                    shutil.rmtree(tmp_dir)
                if os.path.exists(launch_diskpath):
                    os.remove(launch_diskpath)
                if os.path.exists(launch_memorypath):
                    os.remove(launch_memorypath)
        return synthesized_vm
Example #24
0
def ensure_ca_filesystem():
    """Ensure the CA filesystem exists."""
    ca_dir = ca_folder()
    if not os.path.exists(ca_path()):
        genrootca_sh_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "CA", "genrootca.sh"))

        start = os.getcwd()
        fileutils.ensure_tree(ca_dir)
        os.chdir(ca_dir)
        utils.execute("sh", genrootca_sh_path)
        os.chdir(start)
Example #25
0
File: crypto.py Project: yuans/nova
def ensure_ca_filesystem():
    """Ensure the CA filesystem exists."""
    ca_dir = ca_folder()
    if not os.path.exists(ca_path()):
        genrootca_sh_path = os.path.abspath(
                os.path.join(os.path.dirname(__file__), 'CA', 'genrootca.sh'))

        start = os.getcwd()
        fileutils.ensure_tree(ca_dir)
        os.chdir(ca_dir)
        utils.execute("sh", genrootca_sh_path)
        os.chdir(start)
Example #26
0
def mount_volume(volume, mnt_base, configfile=None):
    """Wraps execute calls for mounting a Quobyte volume"""
    fileutils.ensure_tree(mnt_base)

    command = ['mount.quobyte', volume, mnt_base]
    if configfile:
        command.extend(['-c', configfile])

    LOG.debug('Mounting volume %s at mount point %s ...', volume, mnt_base)
    # Run mount command but do not fail on already mounted exit code
    utils.execute(*command, check_exit_code=[0, 4])
    LOG.info(_LI('Mounted volume: %s'), volume)
Example #27
0
 def setup_key_pair(self, context):
     key_name = "%s%s" % (context.project_id, CONF.vpn_key_suffix)
     try:
         keypair_api = compute.api.KeypairAPI()
         result, private_key = keypair_api.create_key_pair(context, context.user_id, key_name)
         key_dir = os.path.join(CONF.keys_path, context.user_id)
         fileutils.ensure_tree(key_dir)
         key_path = os.path.join(key_dir, "%s.pem" % key_name)
         with open(key_path, "w") as f:
             f.write(private_key)
     except (exception.KeyPairExists, os.error, IOError):
         pass
     return key_name
Example #28
0
def mount_volume(volume, mnt_base, configfile=None):
    """Wraps execute calls for mounting a Quobyte volume"""
    fileutils.ensure_tree(mnt_base)

    command = ['mount.quobyte', volume, mnt_base]
    if configfile:
        command.extend(['-c', configfile])

    LOG.debug('Mounting volume %s at mount point %s ...',
              volume,
              mnt_base)
    # Run mount command but do not fail on already mounted exit code
    utils.execute(*command, check_exit_code=[0, 4])
    LOG.info(_LI('Mounted volume: %s'), volume)
Example #29
0
def fetch_image(client, context, image, instance):
    try:
        if image not in client.image_list():
            if not os.path.exists(container_utils.get_base_dir()):
                fileutils.ensure_tree(container_utils.get_base_dir())
            container_image = container_utils.get_container_image(
                                instance)
            container_utils.fetch_image(context, container_image, instance)
    except Exception:
        with excutils.save_and_reraise_exception():
                LOG.error(_LE('Error downloading image: %(instance)'
                              ' %(image)s'),
                              {'instance': instance.uuid,
                              'image': instance.image_ref})
Example #30
0
    def __init__(self, root_directory, bucket_depth=0, mapper=None):
        if mapper is None:
            mapper = routes.Mapper()

        mapper.connect('/',
                controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
        mapper.connect('/{bucket}/{object_name}',
                controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw))
        mapper.connect('/{bucket_name}/',
                controller=lambda *a, **kw: BucketHandler(self)(*a, **kw))
        self.directory = os.path.abspath(root_directory)
        fileutils.ensure_tree(self.directory)
        self.bucket_depth = bucket_depth
        super(S3Application, self).__init__(mapper)
Example #31
0
File: pxe.py Project: gtriolo/nova
    def _cache_tftp_images(self, context, instance, image_info):
        """Fetch the necessary kernels and ramdisks for the instance."""
        fileutils.ensure_tree(os.path.join(CONF.baremetal.tftp_root, instance["uuid"]))

        LOG.debug(_("Fetching kernel and ramdisk for instance %s") % instance["name"])
        for label in image_info.keys():
            (uuid, path) = image_info[label]
            bm_utils.cache_image(
                context=context,
                target=path,
                image_id=uuid,
                user_id=instance["user_id"],
                project_id=instance["project_id"],
            )
Example #32
0
 def setup_key_pair(self, context):
     key_name = '%s%s' % (context.project_id, CONF.vpn_key_suffix)
     try:
         keypair_api = compute.api.KeypairAPI()
         result, private_key = keypair_api.create_key_pair(
             context, context.user_id, key_name)
         key_dir = os.path.join(CONF.keys_path, context.user_id)
         fileutils.ensure_tree(key_dir)
         key_path = os.path.join(key_dir, '%s.pem' % key_name)
         with open(key_path, 'w') as f:
             f.write(private_key)
     except (exception.KeyPairExists, os.error, IOError):
         pass
     return key_name
 def setup_key_pair(self, context):
     key_name = '%s%s' % (context.project_id, CONF.vpn_key_suffix)
     try:
         keypair_api = compute.api.KeypairAPI()
         result = keypair_api.create_key_pair(context,
                                              context.user_id,
                                              key_name)
         private_key = result['private_key']
         key_dir = os.path.join(CONF.keys_path, context.user_id)
         fileutils.ensure_tree(key_dir)
         key_path = os.path.join(key_dir, '%s.pem' % key_name)
         with open(key_path, 'w') as f:
             f.write(private_key)
     except (exception.Duplicate, os.error, IOError):
         pass
     return key_name
Example #34
0
    def _try_fetch_image(self, context, image, instance, max_size=0):
        try:
            images.fetch(context, instance.image_ref, image,
                         instance.user_id, instance.project_id,
                         max_size=max_size)
        except Exception:
            LOG.exception(_LE("Image %(image_id)s doesn't exist anymore on "
                          "image service, attempting to copy image ",
                              {'image_id': instance.image_ref}))

        fileutils.ensure_tree(self.image_dir)
        (user, group) = self.idmap.get_user()
        utils.execute('tar', '-C', self.image_dir, '--anchored', '--numeric-owner', 
                      '-xpzf', image, run_as_root=True)
        utils.execute('chown', '-R', '%s:%s' % (user, group), self.image_dir,
                      run_as_root=True)
Example #35
0
    def _cache_tftp_images(self, context, instance, image_info):
        """Fetch the necessary kernels and ramdisks for the instance."""
        fileutils.ensure_tree(
                os.path.join(CONF.baremetal.tftp_root, instance['uuid']))

        LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
                        instance['name'])
        for label in image_info.keys():
            (uuid, path) = image_info[label]
            bm_utils.cache_image(
                    context=context,
                    target=path,
                    image_id=uuid,
                    user_id=instance['user_id'],
                    project_id=instance['project_id'],
                )
Example #36
0
 def _inject_key(self, id, key):
     if isinstance(id, dict):
         id = id.get('id')
     sshdir = os.path.join(CONF.instances_path, id, '.ssh')
     key_data = ''.join([
         '\n',
         '# The following ssh key was injected by Nova',
         '\n',
         key.strip(),
         '\n',
     ])
     fileutils.ensure_tree(sshdir)
     keys_file = os.path.join(sshdir, 'authorized_keys')
     with open(keys_file, 'a') as f:
         f.write(key_data)
     os.chmod(sshdir, 0o700)
     os.chmod(keys_file, 0o600)
     return sshdir
Example #37
0
def write_stored_info(target, field=None, value=None):
    """Write information about an image."""

    if not field:
        return

    info_file = get_info_filename(target)
    fileutils.ensure_tree(os.path.dirname(info_file))

    d = read_stored_info(info_file)
    d[field] = value
    serialized = jsonutils.dumps(d)

    LOG.info(_('Writing image info file: %s'), info_file)
    LOG.info(_('Wrote: %s'), serialized)
    f = open(info_file, 'w')
    f.write(serialized)
    f.close()
Example #38
0
    def _pull_missing_image(self, context, image_meta, instance):
        msg = 'Image name "%s" does not exist, fetching it...'
        LOG.debug(msg, image_meta['name'])

        shared_directory = CONF.docker.shared_directory
        if (shared_directory and
                os.path.exists(os.path.join(shared_directory,
                                            image_meta['id']))):
            try:
                self.docker.load_repository_file(
                    self._encode_utf8(image_meta['name']),
                    os.path.join(shared_directory, image_meta['id']))
                return self.docker.inspect_image(
                    self._encode_utf8(image_meta['name']))
            except Exception as e:
                # If failed to load image from shared_directory, continue
                # to download the image from glance then load.
                LOG.warning(_('Cannot load repository file from shared '
                              'directory: %s'),
                            e, instance=instance, exc_info=True)

        # TODO(imain): It would be nice to do this with file like object
        # passing but that seems a bit complex right now.
        snapshot_directory = CONF.docker.snapshots_directory
        fileutils.ensure_tree(snapshot_directory)
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
            try:
                out_path = os.path.join(tmpdir, uuid.uuid4().hex)

                images.fetch(context, image_meta['id'], out_path,
                             instance['user_id'], instance['project_id'])
                self.docker.load_repository_file(
                    self._encode_utf8(image_meta['name']),
                    out_path
                )
            except Exception as e:
                LOG.warning(_('Cannot load repository file: %s'),
                            e, instance=instance, exc_info=True)
                msg = _('Cannot load repository file: {0}')
                raise exception.NovaException(msg.format(e),
                                              instance_id=image_meta['name'])

        return self.docker.inspect_image(self._encode_utf8(image_meta['name']))
Example #39
0
 def put(self, bucket, object_name):
     object_name = urllib.unquote(object_name)
     bucket_dir = os.path.abspath(
         os.path.join(self.application.directory, bucket))
     if (not bucket_dir.startswith(self.application.directory)
             or not os.path.isdir(bucket_dir)):
         self.set_404()
         return
     path = self._object_path(bucket, object_name)
     if not path.startswith(bucket_dir) or os.path.isdir(path):
         self.set_status(403)
         return
     directory = os.path.dirname(path)
     fileutils.ensure_tree(directory)
     object_file = open(path, "w")
     object_file.write(self.request.body)
     object_file.close()
     self.set_header('ETag', '"%s"' % utils.get_hash_str(self.request.body))
     self.finish()
    def _pull_missing_image(self, context, image_meta, instance):
        msg = 'Image name "%s" does not exist, fetching it...'
        LOG.debug(msg % image_meta['name'])

        # TODO(imain): It would be nice to do this with file like object
        # passing but that seems a bit complex right now.
        snapshot_directory = CONF.docker.snapshots_directory
        fileutils.ensure_tree(snapshot_directory)
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
            try:
                out_path = os.path.join(tmpdir, uuid.uuid4().hex)

                images.fetch(context, image_meta['id'], out_path,
                             instance['user_id'], instance['project_id'])
                self.docker.load_repository_file(image_meta['name'], out_path)
            except Exception as e:
                msg = _('Cannot load repository file: {0}')
                raise exception.NovaException(msg.format(e),
                                              instance_id=image_meta['name'])

        return self.docker.inspect_image(image_meta['name'])
    def snapshot(self, context, instance, image_id, update_task_state):

        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
        # 1. get vmdk url
        vapp_name = self._get_vcloud_vapp_name(instance)
        remote_vmdk_url = self._vcloud_client.query_vmdk_url(vapp_name)

        # 2. download vmdk
        temp_dir = '%s/%s' % (CONF.vcloud.vcloud_conversion_dir, instance.uuid)
        fileutils.ensure_tree(temp_dir)

        vmdk_name = remote_vmdk_url.split('/')[-1]
        local_file_name = '%s/%s' % (temp_dir, vmdk_name)

        self._download_vmdk_from_vcloud(context, remote_vmdk_url,
                                        local_file_name)

        # 3. convert vmdk to qcow2
        converted_file_name = temp_dir + '/converted-file.qcow2'
        convert_commond = "qemu-img convert -f %s -O %s %s %s" % \
            ('vmdk',
             'qcow2',
             local_file_name,
             converted_file_name)
        convert_result = subprocess.call([convert_commond], shell=True)

        if convert_result != 0:
            # do something, change metadata
            LOG.error('converting file failed')

        # 4. upload qcow2 to image repository\
        update_task_state(task_state=task_states.IMAGE_UPLOADING,
                          expected_state=task_states.IMAGE_PENDING_UPLOAD)

        self._upload_image_to_glance(context, converted_file_name, image_id,
                                     instance)

        # 5. delete temporary files
        shutil.rmtree(temp_dir, ignore_errors=True)
Example #42
0
    def __init__(self, root_directory, bucket_depth=0, mapper=None):
        versionutils.report_deprecated_feature(
            LOG,
            _LW('The in tree EC2 API is deprecated as of Kilo release and may '
                'be removed in a future release. The stackforge ec2-api '
                'project http://git.openstack.org/cgit/stackforge/ec2-api/ '
                'is the target replacement for this functionality.'))
        if mapper is None:
            mapper = routes.Mapper()

        mapper.connect('/',
                       controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
        mapper.connect('/{bucket}/{object_name}',
                       controller=lambda *a, **kw: ObjectHandler(self)
                       (*a, **kw))
        mapper.connect('/{bucket_name}/',
                       controller=lambda *a, **kw: BucketHandler(self)
                       (*a, **kw))
        self.directory = os.path.abspath(root_directory)
        fileutils.ensure_tree(self.directory)
        self.bucket_depth = bucket_depth
        super(S3Application, self).__init__(mapper)
Example #43
0
def _fetch_image(context, instance, image_meta, container_image, idmap,
                 flavor):
    """Fetch the image from a glance image server."""
    LOG.debug("Downloading image from glance")

    base_dir = os.path.join(CONF.instances_path,
                            CONF.image_cache_subdirectory_name)
    image_dir = os.path.join(base_dir, instance['image_ref'])
    if not os.path.exists(base_dir):
        fileutils.ensure_tree(base_dir)
    base = os.path.join(base_dir, container_image)
    if not os.path.exists(base):
        images.fetch_to_raw(context, instance['image_ref'], base,
                            instance['user_id'], instance['project_id'])
        if not tarfile.is_tarfile(base):
            os.unlink(base)
            raise exeception.InvalidDiskFormat(
                disk_format=container_utils.get_disk_format(image_meta))

    if not os.path.exists(image_dir):
        (user, group) = idmap.get_user()
        utils.execute('btrfs', 'sub', 'create', image_dir)

        utils.execute('chown',
                      '%s:%s' % (user, group),
                      image_dir,
                      run_as_root=True)

        tar = [
            'tar', '--directory', image_dir, '--anchored', '--numeric-owner',
            '-xpzf', base
        ]
        nsexec = (['lxc-usernsexec'] +
                  idmap.usernsexec_margs(with_read="user") + ['--'])

        args = tuple(nsexec + tar)
        utils.execute(*args, check_exit_code=[0, 2])
        utils.execute(*tuple(nsexec + ['chown', '0:0', image_dir]))
Example #44
0
def _sign_csr(csr_text, ca_folder):
    with utils.tempdir() as tmpdir:
        inbound = os.path.join(tmpdir, 'inbound.csr')
        outbound = os.path.join(tmpdir, 'outbound.csr')

        with open(inbound, 'w') as csrfile:
            csrfile.write(csr_text)

        LOG.debug(_('Flags path: %s'), ca_folder)
        start = os.getcwd()

        # Change working dir to CA
        fileutils.ensure_tree(ca_folder)
        os.chdir(ca_folder)
        utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
                      './openssl.cnf', '-infiles', inbound)
        out, _err = utils.execute('openssl', 'x509', '-in', outbound,
                                  '-serial', '-noout')
        serial = string.strip(out.rpartition('=')[2])
        os.chdir(start)

        with open(outbound, 'r') as crtfile:
            return (serial, crtfile.read())
Example #45
0
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn
            #              amically-allocating-and-destroying-mutexes
            sem = _semaphores.get(name, semaphore.Semaphore())
            if name not in _semaphores:
                # this check is not racy - we're already holding ref locally
                # so GC won't remove the item and there was no IO switch
                # (only valid in greenthreads)
                _semaphores[name] = sem

            with sem:
                LOG.debug(
                    _('Got semaphore "%(lock)s" for method '
                      '"%(method)s"...'), {
                          'lock': name,
                          'method': f.__name__
                      })

                # NOTE(mikal): I know this looks odd
                if not hasattr(local.strong_store, 'locks_held'):
                    local.strong_store.locks_held = []
                local.strong_store.locks_held.append(name)

                try:
                    if external and not CONF.disable_process_locking:
                        LOG.debug(
                            _('Attempting to grab file lock "%(lock)s" '
                              'for method "%(method)s"...'), {
                                  'lock': name,
                                  'method': f.__name__
                              })
                        cleanup_dir = False

                        # We need a copy of lock_path because it is non-local
                        local_lock_path = lock_path
                        if not local_lock_path:
                            local_lock_path = CONF.lock_path

                        if not local_lock_path:
                            cleanup_dir = True
                            local_lock_path = tempfile.mkdtemp()

                        if not os.path.exists(local_lock_path):
                            fileutils.ensure_tree(local_lock_path)

                        # NOTE(mikal): the lock name cannot contain directory
                        # separators
                        safe_name = name.replace(os.sep, '_')
                        lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
                        lock_file_path = os.path.join(local_lock_path,
                                                      lock_file_name)

                        try:
                            lock = InterProcessLock(lock_file_path)
                            with lock:
                                LOG.debug(
                                    _('Got file lock "%(lock)s" at '
                                      '%(path)s for method '
                                      '"%(method)s"...'), {
                                          'lock': name,
                                          'path': lock_file_path,
                                          'method': f.__name__
                                      })
                                retval = f(*args, **kwargs)
                        finally:
                            LOG.debug(
                                _('Released file lock "%(lock)s" at '
                                  '%(path)s for method "%(method)s"...'), {
                                      'lock': name,
                                      'path': lock_file_path,
                                      'method': f.__name__
                                  })
                            # NOTE(vish): This removes the tempdir if we needed
                            #             to create one. This is used to
                            #             cleanup the locks left behind by unit
                            #             tests.
                            if cleanup_dir:
                                shutil.rmtree(local_lock_path)
                    else:
                        retval = f(*args, **kwargs)

                finally:
                    local.strong_store.locks_held.remove(name)

            return retval
Example #46
0
    def _do_snapshot_2(self, context, instance, image_id, update_task_state):
        # xxx(wangfeng)
        # import pdb
        # pdb.set_trace()
        # a) get  provider node id
        provider_node_id = self._get_provider_node_id(instance)
        provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])
        if not provider_nodes:
            LOG.error('instance %s is not found' % instance.uuid)
            raise exception.InstanceNotFound
        if len(provider_nodes)>1:
            LOG.error('instance %s are more than one' % instance.uuid)
            raise exception_ex.MultiInstanceConfusion
        provider_node = provider_nodes[0]

        # b) export-instance to s3
        # self.compute_adapter.ex_stop_node(provider_node)
        try:
            task = self.compute_adapter.create_export_instance_task(provider_node_id,
                                                                    CONF.provider_opts.storage_tmp_dir)
        except:
            task = self.compute_adapter.create_export_instance_task(provider_node_id,
                                                                    CONF.provider_opts.storage_tmp_dir)
        while not task.is_completed():
            time.sleep(10)
            task = self.compute_adapter.get_task_info(task)

        obj_key = task.export_to_s3_info.s3_key
        obj_bucket = task.export_to_s3_info.s3_bucket

        # c) download from s3
        obj = self.storage_adapter.get_object(obj_bucket,obj_key)
        conv_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,image_id)
        fileutils.ensure_tree(conv_dir)
        org_full_name = '%s/%s.vmdk' % (conv_dir,image_id)

        # with open(org_full_name, 'wb') as f:
        #     for chunk in self.storage_adapter.download_object_as_stream(obj,chunk_size=CHUNK_SIZE):
        #         if chunk:
        #             f.write(chunk)
        #             f.flush()

        self.storage_adapter.download_object(obj,org_full_name)


        # d) convert to qcow2
        dest_full_name = '%s/%s.qcow2' % (conv_dir,image_id)
        convert_image(org_full_name,
                     dest_full_name,
                      'qcow2')

        # upload to glance
        update_task_state(task_state=task_states.IMAGE_UPLOADING,
                          expected_state=task_states.IMAGE_PENDING_UPLOAD)

        file_size = os.path.getsize(dest_full_name)
        metadata = self.glance_api.get(context, image_id)
        image_metadata = {"disk_format": "qcow2",
                          "is_public": "false",
                          "name": metadata['name'],
                          "status": "active",
                          "container_format": "bare",
                          "size": file_size,
                          "properties": {"owner_id": instance['project_id']}}

        src_file_handle = fileutils.file_open(dest_full_name, "rb")
        self.glance_api.create(context,image_metadata,src_file_handle)
        src_file_handle.close()
Example #47
0
    def _spawn_from_image(self, context, instance, image_meta, injected_files,
                                    admin_password, network_info, block_device_info):
        # 0.get provider_image,
        retry_time = 3
        provider_image_id = None
        while (not provider_image_id) and retry_time>0:
            provider_image_id = self._get_provider_image_id(image_meta)
            retry_time = retry_time-1

        if provider_image_id is not None:
            provider_image = self.compute_adapter.get_image(provider_image_id)
        else:
            provider_image = None

        # 1. if provider_image do not exist,, import image first
        if not provider_image:
            image_uuid = self._get_image_id_from_meta(image_meta)
            container = self.storage_adapter.get_container(CONF.provider_opts.storage_tmp_dir)

            try:
                self.storage_adapter.get_object(container.name,image_uuid)
            except ObjectDoesNotExistError:
                # 1.1 download qcow2 file from glance


                this_conversion_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,image_uuid)
                orig_file_full_name = '%s/%s.qcow2' % (this_conversion_dir,'orig_file')
                fileutils.ensure_tree(this_conversion_dir)
                self.glance_api.download(context,image_uuid,dest_path=orig_file_full_name)

                # 1.2 convert to provider image format
                converted_file_format = 'vmdk'
                converted_file_name = '%s.%s' % ('converted_file', converted_file_format)
                converted_file_full_name =  '%s/%s' % (this_conversion_dir,converted_file_name)

                convert_image(orig_file_full_name,
                              converted_file_full_name,
                              converted_file_format,
                              subformat='streamoptimized')

            # 1.3 upload to provider_image_id
            #     self.storage_adapter.upload_object(converted_file_full_name,container,image_uuid)
                object_name = image_uuid
                extra = {'content_type': 'text/plain'}

                with open(converted_file_full_name,'rb') as f:
                    obj = self.storage_adapter.upload_object_via_stream(container=container,
                                                               object_name=object_name,
                                                               iterator=f,
                                                               extra=extra)

            # except:
                # LOG.error('Connect to provider storage error')

            task = self.compute_adapter.create_import_image_task(CONF.provider_opts.storage_tmp_dir,
                                                         image_uuid,
                                                         image_name=image_uuid)
            while not task.is_completed():
                time.sleep(5)
                task = self.compute_adapter.get_task_info(task)

            provider_image = self.compute_adapter.get_image(task.image_id)
            set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
            if set_tag_func:
                set_tag_func(provider_image, {'hybrid_cloud_image_id': image_uuid})


        # 2. map flovar to node size, from configuration
        provider_size = self._get_provider_node_size(instance.get_flavor())

        # 3. get a subnet, create_node in this subnet
        # provider_subnet = self.compute_adapter.ex_list_subnets()[0]

        provider_subnet_data = self.compute_adapter.ex_list_subnets(
            subnet_ids=[CONF.provider_opts.subnet_data])[0]
        provider_subnet_api = self.compute_adapter.ex_list_subnets(
            subnet_ids=[CONF.provider_opts.subnet_api])[0]

        provider_node_name = self._generate_provider_node_name(instance)
        user_data = self._generate_user_data()
        provider_node = self.compute_adapter.create_node(name=provider_node_name,
                                                         image=provider_image,
                                                         size=provider_size,
                                                         ex_subnet=provider_subnet_data,
                                                         ex_userdata=user_data)

        # 4. mapping instance id to provider node, using metadata
        instance.metadata['provider_node_id'] =  provider_node.id
        instance.save()
        set_tag_func = getattr(self.compute_adapter, 'ex_create_tags')
        if set_tag_func:
            set_tag_func(provider_node, {'hybrid_cloud_instance_id': instance.uuid})

        # 5 create a network interface and attach it to node
        while provider_node.state!=NodeState.RUNNING and provider_node.state!=NodeState.STOPPED:
            provider_node = self.compute_adapter.list_nodes(ex_node_ids=[provider_node.id])[0]
            time.sleep(10)

        provider_interface = self.compute_adapter.ex_create_network_interface(
            provider_subnet_api,
            name='Test Interface',
            description='My Test')
        try:
            self.compute_adapter.ex_attach_network_interface_to_node(provider_interface,provider_node, 1)
        except:
            self.compute_adapter.ex_attach_network_interface_to_node(provider_interface,provider_node, 1)

        return provider_node
Example #48
0
    def attach_volume(self, context, connection_info, instance, mountpoint,
                      disk_bus=None, device_type=None, encryption=None):
        """Attach volume storage to VM instance."""
        # import pdb
        # pdb.set_trace()

        volume_id = connection_info['data']['volume_id']
        instance_id = instance.uuid
        LOG.info("attach volume")
        provider_node_id = self._get_provider_node_id(instance)
        provider_volume_id = self._get_provider_volume_id(context, volume_id)

        # 1.get node
        if not provider_node_id:
            LOG.error('instance %s is not found' % instance_id)
            raise exception.InstanceNotFound
        else:
            provider_nodes = self.compute_adapter.list_nodes(ex_node_ids=[provider_node_id])

        if not provider_nodes:
            LOG.error('instance %s is not found' % instance_id)
            raise exception.InstanceNotFound
        if len(provider_nodes)>1:
            LOG.error('instance %s are more than one' % instance_id)
            raise exception_ex.MultiInstanceConfusion
        provider_node = provider_nodes[0]

        # 2.get volume
        if not provider_volume_id:
            # LOG.error('volume %s is not found' % volume_id)
            # raise exception.VolumeNotFound
            # 1. if provider_image do not exist,, import image first
            volume = self.cinder_api.get(context,volume_id)
            image_meta = volume.get('volume_image_metadata')
            if not image_meta:
                LOG.error('Provider Volume NOT Found!')
                exception_ex.VolumeNotFoundAtProvider
            else:
                # 1.1 download qcow2 file from glance
                image_uuid = self._get_image_id_from_meta(image_meta)

                orig_file_name = 'orig_file.qcow2'
                this_conversion_dir = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
                orig_file_full_name = '%s/%s' % (this_conversion_dir,orig_file_name)

                fileutils.ensure_tree(this_conversion_dir)
                self.glance_api.download(context,image_uuid,dest_path=orig_file_full_name)

                # 1.2 convert to provider image format
                converted_file_format = 'vmdk'
                converted_file_name = '%s.%s' % ('converted_file', converted_file_format)
                converted_file_path = '%s/%s' % (CONF.provider_opts.conversion_dir,volume_id)
                converted_file_full_name =  '%s/%s' % (converted_file_path,converted_file_name)
                convert_image(orig_file_full_name,
                              converted_file_full_name,
                              converted_file_format,
                              subformat='streamoptimized')


                # 1.3 upload to provider_image_id
                container = self.storage_adapter.get_container(CONF.provider_opts.storage_tmp_dir)
                # self.storage_adapter.upload_object(converted_file_full_name,container,volume_id)

                object_name = volume_id
                extra = {'content_type': 'text/plain'}

                with open(converted_file_full_name,'rb') as f:
                    obj = self.storage_adapter.upload_object_via_stream(container=container,
                                                               object_name=object_name,
                                                               iterator=f,
                                                               extra=extra)

                obj = self.storage_adapter.get_object(container.name,volume_id)

                task = self.compute_adapter.create_import_volume_task(CONF.provider_opts.storage_tmp_dir,
                                                                      volume_id,
                                                                      'VMDK',
                                                                      obj.size,
                                                                      str(volume.get('size')),
                                                                      volume_loc=provider_node.extra.get('availability'))
                while not task.is_completed():
                    time.sleep(10)
                    if task.is_cancelled():
                        LOG.error('import volume fail!')
                        raise exception_ex.UploadVolumeFailure
                    task = self.compute_adapter.get_task_info(task)

                provider_volume_id = task.volume_id


        provider_volumes = self.compute_adapter.list_volumes(ex_volume_ids=[provider_volume_id])

        if provider_volumes is None:
            LOG.error('Time out when query provider volume %s', provider_volume_id)
            raise exception_ex.ProviderRequestTimeOut

        if len(provider_volumes)>1:
            LOG.error('volume %s are more than one' % volume_id)
            raise exception_ex.MultiVolumeConfusion
        provider_volume = provider_volumes[0]

        if provider_volume.state != StorageVolumeState.AVAILABLE:
            LOG.error('volume %s is not available' % volume_id)
            raise exception.InvalidVolume

        # 3.attach
        self.compute_adapter.attach_volume(provider_node,provider_volume,mountpoint)

        # 4. map volume to provider volume
        self._map_volume_to_provider(context, volume_id, provider_volume)
    def resolve_driver_format(self):
        """Return the driver format for self.path.

        First checks self.disk_info_path for an entry.
        If it's not there, calls self._get_driver_format(), and then
        stores the result in self.disk_info_path

        See https://bugs.launchpad.net/nova/+bug/1221190
        """
        def _dict_from_line(line):
            if not line:
                return {}
            try:
                return jsonutils.loads(line)
            except (TypeError, ValueError) as e:
                msg = (_("Could not load line %(line)s, got error "
                         "%(error)s") % {
                             'line': line,
                             'error': unicode(e)
                         })
                raise exception.InvalidDiskInfo(reason=msg)

        @utils.synchronized(self.disk_info_path,
                            external=False,
                            lock_path=self.lock_path)
        def write_to_disk_info_file():
            # Use os.open to create it without group or world write permission.
            fd = os.open(self.disk_info_path, os.O_RDWR | os.O_CREAT, 0o644)
            with os.fdopen(fd, "r+") as disk_info_file:
                line = disk_info_file.read().rstrip()
                dct = _dict_from_line(line)
                if self.path in dct:
                    msg = _("Attempted overwrite of an existing value.")
                    raise exception.InvalidDiskInfo(reason=msg)
                dct.update({self.path: driver_format})
                disk_info_file.seek(0)
                disk_info_file.truncate()
                disk_info_file.write('%s\n' % jsonutils.dumps(dct))
            # Ensure the file is always owned by the nova user so qemu can't
            # write it.

# TODO chown for windows?
# utils.chown(self.disk_info_path, owner_uid=getpass.getuser())

        try:
            LOG.debug('Disk info path %s' % self.disk_info_path)
            if (self.disk_info_path is not None
                    and os.path.exists(self.disk_info_path)):
                with open(self.disk_info_path) as disk_info_file:
                    line = disk_info_file.read().rstrip()
                    dct = _dict_from_line(line)
                    for path, driver_format in dct.iteritems():
                        if path == self.path:
                            return driver_format
            driver_format = self._get_driver_format()
            if self.disk_info_path is not None:
                fileutils.ensure_tree(os.path.dirname(self.disk_info_path))
                write_to_disk_info_file()
        except OSError as e:
            raise exception.DiskInfoReadWriteFail(reason=unicode(e))
        return driver_format
Example #50
0
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
    """Context based lock

    This function yields a `semaphore.Semaphore` instance unless external is
    True, in which case, it'll yield an InterProcessLock instance.

    :param lock_file_prefix: The lock_file_prefix argument is used to provide
    lock files on disk with a meaningful prefix.

    :param external: The external keyword argument denotes whether this lock
    should work across multiple processes. This means that if two different
    workers both run a a method decorated with @synchronized('mylock',
    external=True), only one of them will execute at a time.

    :param lock_path: The lock_path keyword argument is used to specify a
    special location for external lock files to live. If nothing is set, then
    CONF.lock_path is used as a default.
    """
    # NOTE(soren): If we ever go natively threaded, this will be racy.
    #              See http://stackoverflow.com/questions/5390569/dyn
    #              amically-allocating-and-destroying-mutexes
    sem = _semaphores.get(name, semaphore.Semaphore())
    if name not in _semaphores:
        # this check is not racy - we're already holding ref locally
        # so GC won't remove the item and there was no IO switch
        # (only valid in greenthreads)
        _semaphores[name] = sem

    with sem:
        LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})

        # NOTE(mikal): I know this looks odd
        if not hasattr(local.strong_store, 'locks_held'):
            local.strong_store.locks_held = []
        local.strong_store.locks_held.append(name)

        try:
            if external and not CONF.disable_process_locking:
                LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
                          {'lock': name})

                # We need a copy of lock_path because it is non-local
                local_lock_path = lock_path or CONF.lock_path
                if not local_lock_path:
                    raise cfg.RequiredOptError('lock_path')

                if not os.path.exists(local_lock_path):
                    fileutils.ensure_tree(local_lock_path)
                    LOG.info(_('Created lock path: %s'), local_lock_path)

                def add_prefix(name, prefix):
                    if not prefix:
                        return name
                    sep = '' if prefix.endswith('-') else '-'
                    return '%s%s%s' % (prefix, sep, name)

                # NOTE(mikal): the lock name cannot contain directory
                # separators
                lock_file_name = add_prefix(name.replace(os.sep, '_'),
                                            lock_file_prefix)

                lock_file_path = os.path.join(local_lock_path, lock_file_name)

                try:
                    lock = InterProcessLock(lock_file_path)
                    with lock as lock:
                        LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), {
                            'lock': name,
                            'path': lock_file_path
                        })
                        yield lock
                finally:
                    LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), {
                        'lock': name,
                        'path': lock_file_path
                    })
            else:
                yield sem

        finally:
            local.strong_store.locks_held.remove(name)
    def cloudlet_base(self, context, instance, vm_name, disk_meta_id,
                      memory_meta_id, diskhash_meta_id, memoryhash_meta_id,
                      update_task_state):
        """create base vm and save it to glance
        """
        try:
            if hasattr(self, "_lookup_by_name"):
                # icehouse
                virt_dom = self._lookup_by_name(instance['name'])
            else:
                # kilo
                virt_dom = self._host.get_domain(instance)
        except exception.InstanceNotFound:
            raise exception.InstanceNotRunning(instance_id=instance['uuid'])

        # pause VM
        self.pause(instance)

        (image_service,
         image_id) = glance.get_remote_image_service(context,
                                                     instance['image_ref'])

        disk_metadata = self._get_snapshot_metadata(virt_dom, context,
                                                    instance, disk_meta_id)
        mem_metadata = self._get_snapshot_metadata(virt_dom, context, instance,
                                                   memory_meta_id)
        diskhash_metadata = self._get_snapshot_metadata(
            virt_dom, context, instance, diskhash_meta_id)
        memhash_metadata = self._get_snapshot_metadata(virt_dom, context,
                                                       instance,
                                                       memoryhash_meta_id)

        disk_path = libvirt_utils.find_disk(virt_dom)
        source_format = libvirt_utils.get_disk_type(disk_path)
        snapshot_name = uuid.uuid4().hex
        (state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
        state = libvirt_driver.LIBVIRT_POWER_STATE[state]

        # creating base vm requires cold snapshotting
        snapshot_backend = self.image_backend.snapshot(
            disk_path, image_type=source_format)

        LOG.info(_("Beginning cold snapshot process"), instance=instance)
        # not available at icehouse
        # snapshot_backend.snapshot_create()

        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
                          expected_state=None)
        snapshot_directory = libvirt_driver.CONF.libvirt.snapshots_directory
        fileutils.ensure_tree(snapshot_directory)
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
            try:
                out_path = os.path.join(tmpdir, snapshot_name)
                # At this point, base vm should be "raw" format
                snapshot_backend.snapshot_extract(out_path, "raw")
            finally:
                # snapshotting logic is changed since icehouse.
                #  : cannot find snapshot_create and snapshot_delete.
                # snapshot_extract is replacing these two operations.
                # snapshot_backend.snapshot_delete()
                LOG.info(_("Snapshot extracted, beginning image upload"),
                         instance=instance)

            # generate memory snapshop and hashlist
            basemem_path = os.path.join(tmpdir, snapshot_name + "-mem")
            diskhash_path = os.path.join(tmpdir, snapshot_name + "-disk_hash")
            memhash_path = os.path.join(tmpdir, snapshot_name + "-mem_hash")

            update_task_state(task_state=task_states.IMAGE_UPLOADING,
                              expected_state=task_states.IMAGE_PENDING_UPLOAD)
            synthesis._create_baseVM(self._conn,
                                     virt_dom,
                                     out_path,
                                     basemem_path,
                                     diskhash_path,
                                     memhash_path,
                                     nova_util=libvirt_utils)

            self._update_to_glance(context, image_service, out_path,
                                   disk_meta_id, disk_metadata)
            LOG.info(_("Base disk upload complete"), instance=instance)
            self._update_to_glance(context, image_service, basemem_path,
                                   memory_meta_id, mem_metadata)
            LOG.info(_("Base memory image upload complete"), instance=instance)
            self._update_to_glance(context, image_service, diskhash_path,
                                   diskhash_meta_id, diskhash_metadata)
            LOG.info(_("Base disk upload complete"), instance=instance)
            self._update_to_glance(context, image_service, memhash_path,
                                   memoryhash_meta_id, memhash_metadata)
            LOG.info(_("Base memory image upload complete"), instance=instance)
    def _spawn_using_synthesis(self, context, instance, xml, image_meta,
                               overlay_url):
        # download vm overlay
        overlay_package = VMOverlayPackage(overlay_url)
        meta_raw = overlay_package.read_meta()
        meta_info = msgpack.unpackb(meta_raw)
        basevm_sha256 = meta_info.get(Cloudlet_Const.META_BASE_VM_SHA256, None)
        image_properties = image_meta.get("properties", None)
        if image_properties is None:
            msg = "image does not have properties for cloudlet metadata"
            raise exception.ImageNotFound(msg)
        image_sha256 = image_properties.get(CloudletAPI.PROPERTY_KEY_BASE_UUID)

        # check basevm
        if basevm_sha256 != image_sha256:
            msg = "requested base vm is not compatible with openstack base disk %s != %s" \
                % (basevm_sha256, image_sha256)
            raise exception.ImageNotFound(msg)
        memory_snap_id = str(
            image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM))
        diskhash_snap_id = str(
            image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH))
        memhash_snap_id = str(
            image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH))
        basedisk_path = self._get_cache_image(context, instance,
                                              image_meta['id'])
        basemem_path = self._get_cache_image(context, instance, memory_snap_id)
        diskhash_path = self._get_cache_image(context, instance,
                                              diskhash_snap_id)
        memhash_path = self._get_cache_image(context, instance,
                                             memhash_snap_id)

        # download blob
        fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
        decomp_overlay = os.path.join(
            libvirt_utils.get_instance_path(instance), 'decomp_overlay')

        meta_info = compression.decomp_overlayzip(overlay_url, decomp_overlay)

        # recover VM
        launch_disk, launch_mem, fuse, delta_proc, fuse_proc = \
            synthesis.recover_launchVM(basedisk_path, meta_info,
                                       decomp_overlay,
                                       base_mem=basemem_path,
                                       base_diskmeta=diskhash_path,
                                       base_memmeta=memhash_path)
        # resume VM
        LOG.info(_("Starting VM synthesis"), instance=instance)
        synthesized_vm = synthesis.SynthesizedVM(launch_disk,
                                                 launch_mem,
                                                 fuse,
                                                 disk_only=False,
                                                 qemu_args=False,
                                                 nova_xml=xml,
                                                 nova_conn=self._conn,
                                                 nova_util=libvirt_utils)
        # testing non-thread resume
        delta_proc.start()
        fuse_proc.start()
        delta_proc.join()
        fuse_proc.join()
        LOG.info(_("Finish VM synthesis"), instance=instance)
        synthesized_vm.resume()
        # rettach NIC
        synthesis.rettach_nic(synthesized_vm.machine,
                              synthesized_vm.old_xml_str, xml)

        return synthesized_vm
    def _spawn_using_handoff(self, context, instance, xml, image_meta,
                             handoff_info):
        image_properties = image_meta.get("properties", None)
        memory_snap_id = str(
            image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM))
        diskhash_snap_id = str(
            image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_DISK_HASH))
        memhash_snap_id = str(
            image_properties.get(CloudletAPI.IMAGE_TYPE_BASE_MEM_HASH))
        basedisk_path = self._get_cache_image(context, instance,
                                              image_meta['id'])
        basemem_path = self._get_cache_image(context, instance, memory_snap_id)
        diskhash_path = self._get_cache_image(context, instance,
                                              diskhash_snap_id)
        memhash_path = self._get_cache_image(context, instance,
                                             memhash_snap_id)
        base_vm_paths = [
            basedisk_path, basemem_path, diskhash_path, memhash_path
        ]
        image_sha256 = image_properties.get(CloudletAPI.PROPERTY_KEY_BASE_UUID)

        snapshot_directory = libvirt_driver.CONF.libvirt.snapshots_directory
        fileutils.ensure_tree(snapshot_directory)
        synthesized_vm = None
        with utils.tempdir(dir=snapshot_directory) as tmpdir:
            uuidhex = uuid.uuid4().hex
            launch_diskpath = os.path.join(tmpdir, uuidhex + "-launch-disk")
            launch_memorypath = os.path.join(tmpdir,
                                             uuidhex + "-launch-memory")
            tmp_dir = mkdtemp(prefix="cloudlet-residue-")
            handoff_recv_datafile = os.path.join(tmp_dir, "handoff-data")
            # recv handoff data and synthesize disk img and memory snapshot
            try:
                ret_values = self._handoff_recv(base_vm_paths, image_sha256,
                                                handoff_recv_datafile,
                                                launch_diskpath,
                                                launch_memorypath)
                # start VM
                launch_disk_size, launch_memory_size, \
                    disk_overlay_map, memory_overlay_map = ret_values
                synthesized_vm = self._handoff_launch_vm(
                    xml,
                    basedisk_path,
                    basemem_path,
                    launch_diskpath,
                    launch_memorypath,
                    int(launch_disk_size),
                    int(launch_memory_size),
                    disk_overlay_map,
                    memory_overlay_map,
                )

                # rettach NIC
                synthesis.rettach_nic(synthesized_vm.machine,
                                      synthesized_vm.old_xml_str, xml)
            except handoff.HandoffError as e:
                msg = "failed to perform VM handoff:\n"
                msg += str(e)
                raise exception.ImageNotFound(msg)
            finally:
                if os.path.exists(tmp_dir):
                    shutil.rmtree(tmp_dir)
                if os.path.exists(launch_diskpath):
                    os.remove(launch_diskpath)
                if os.path.exists(launch_memorypath):
                    os.remove(launch_memorypath)
        return synthesized_vm
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info=None,
              block_device_info=None):
        LOG.info('begin time of vcloud create vm is %s' %
                 (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

        image_cache_dir = CONF.vcloud.vcloud_conversion_dir
        volume_cache_dir = CONF.vcloud.vcloud_volumes_dir

        # update port bind host
        self._binding_host(context, network_info, instance.uuid)

        this_conversion_dir = '%s/%s' % (CONF.vcloud.vcloud_conversion_dir,
                                         instance.uuid)
        fileutils.ensure_tree(this_conversion_dir)
        os.chdir(this_conversion_dir)
        #0: create metadata iso and upload to vcloud
        rabbit_host = CONF.rabbit_host
        if 'localhost' in rabbit_host or '127.0.0.1' in rabbit_host:
            rabbit_host = CONF.rabbit_hosts[0]
        if ':' in rabbit_host:
            rabbit_host = rabbit_host[0:rabbit_host.find(':')]
        iso_file = common_tools.create_user_data_iso(
            "userdata.iso", {
                "rabbit_userid": CONF.rabbit_userid,
                "rabbit_password": CONF.rabbit_password,
                "rabbit_host": rabbit_host,
                "host": instance.uuid,
                "tunnel_cidr": CONF.vcloud.tunnel_cidr,
                "route_gw": CONF.vcloud.route_gw
            }, this_conversion_dir)
        vapp_name = self._get_vcloud_vapp_name(instance)
        metadata_iso = self._vcloud_client.upload_metadata_iso(
            iso_file, vapp_name)

        # 0.get vorg, user name,password vdc  from configuration file (only one
        # org)

        # 1.1 get image id, vm info ,flavor info
        # image_uuid = instance.image_ref
        if 'id' in image_meta:
            # create from image
            image_uuid = image_meta['id']
        else:
            # create from volume
            image_uuid = image_meta['properties']['image_id']

        #NOTE(nkapotoxin): create vapp with vapptemplate
        network_names = [
            CONF.vcloud.provider_tunnel_network_name,
            CONF.vcloud.provider_base_network_name
        ]
        network_configs = self._vcloud_client.get_network_configs(
            network_names)

        # create vapp
        if CONF.vcloud.use_link_clone:
            vapp = self._vcloud_client.create_vapp(vapp_name, image_uuid,
                                                   network_configs)
        else:
            vapp = self._vcloud_client.create_vapp(
                vapp_name,
                image_uuid,
                network_configs,
                root_gb=instance.get_flavor().root_gb)

        # generate the network_connection
        network_connections = self._vcloud_client.get_network_connections(
            vapp, network_names)

        # update network
        self._vcloud_client.update_vms_connections(vapp, network_connections)

        # update vm specification
        self._vcloud_client.modify_vm_cpu(vapp, instance.get_flavor().vcpus)
        self._vcloud_client.modify_vm_memory(vapp,
                                             instance.get_flavor().memory_mb)

        # mount it
        self._vcloud_client.insert_media(vapp_name, metadata_iso)

        # power on it
        self._vcloud_client.power_on_vapp(vapp_name)

        # 7. clean up
        shutil.rmtree(this_conversion_dir, ignore_errors=True)
        LOG.info('end time of vcloud create vm is %s' %
                 (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

        # update port bind host
        self._binding_host(context, network_info, instance.uuid)
Example #55
0
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
    """Context based lock

    This function yields a `threading.Semaphore` instance (if we don't use
    eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
    True, in which case, it'll yield an InterProcessLock instance.

    :param lock_file_prefix: The lock_file_prefix argument is used to provide
    lock files on disk with a meaningful prefix.

    :param external: The external keyword argument denotes whether this lock
    should work across multiple processes. This means that if two different
    workers both run a a method decorated with @synchronized('mylock',
    external=True), only one of them will execute at a time.

    :param lock_path: The lock_path keyword argument is used to specify a
    special location for external lock files to live. If nothing is set, then
    CONF.lock_path is used as a default.
    """
    with _semaphores_lock:
        try:
            sem = _semaphores[name]
        except KeyError:
            sem = threading.Semaphore()
            _semaphores[name] = sem

    with sem:
        LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})

        # NOTE(mikal): I know this looks odd
        if not hasattr(local.strong_store, 'locks_held'):
            local.strong_store.locks_held = []
        local.strong_store.locks_held.append(name)

        try:
            if external and not CONF.disable_process_locking:
                LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
                          {'lock': name})

                # We need a copy of lock_path because it is non-local
                local_lock_path = lock_path or CONF.lock_path
                if not local_lock_path:
                    raise cfg.RequiredOptError('lock_path')

                if not os.path.exists(local_lock_path):
                    fileutils.ensure_tree(local_lock_path)
                    LOG.info(_('Created lock path: %s'), local_lock_path)

                def add_prefix(name, prefix):
                    if not prefix:
                        return name
                    sep = '' if prefix.endswith('-') else '-'
                    return '%s%s%s' % (prefix, sep, name)

                # NOTE(mikal): the lock name cannot contain directory
                # separators
                lock_file_name = add_prefix(name.replace(os.sep, '_'),
                                            lock_file_prefix)

                lock_file_path = os.path.join(local_lock_path, lock_file_name)

                try:
                    lock = InterProcessLock(lock_file_path)
                    with lock as lock:
                        LOG.debug(_('Got file lock "%(lock)s" at %(path)s'), {
                            'lock': name,
                            'path': lock_file_path
                        })
                        yield lock
                finally:
                    LOG.debug(_('Released file lock "%(lock)s" at %(path)s'), {
                        'lock': name,
                        'path': lock_file_path
                    })
            else:
                yield sem

        finally:
            local.strong_store.locks_held.remove(name)