def test__cache_instance_images_master_path(self): temp_dir = tempfile.mkdtemp() CONF.set_default('images_path', temp_dir, group='pxe') CONF.set_default('instance_master_path', os.path.join(temp_dir, 'instance_master_path'), group='pxe') fileutils.ensure_tree(CONF.pxe.instance_master_path) fd, tmp_master_image = tempfile.mkstemp( dir=CONF.pxe.instance_master_path) self.mox.StubOutWithMock(images, 'fetch_to_raw') self.mox.StubOutWithMock(tempfile, 'mkstemp') self.mox.StubOutWithMock(service_utils, 'parse_image_ref') tempfile.mkstemp(dir=CONF.pxe.instance_master_path).\ AndReturn((fd, tmp_master_image)) images.fetch_to_raw(None, 'glance://image_uuid', tmp_master_image, None).\ AndReturn(None) service_utils.parse_image_ref('glance://image_uuid').\ AndReturn(('image_uuid', None, None, None)) self.mox.ReplayAll() (uuid, image_path) = pxe._cache_instance_image(None, self.node) self.mox.VerifyAll() self.assertEqual(uuid, 'glance://image_uuid') self.assertEqual(image_path, temp_dir + '/fake_instance_name/disk')
def _download(self, image_href, data=None, method='data'): """Calls out to Glance for data and writes data. :param image_id: The opaque image identifier. :param data: (Optional) File object to write data to. """ image_id = service_utils.parse_image_ref(image_href)[0] if (self.version == 2 and 'file' in CONF.glance.allowed_direct_url_schemes): location = self._get_location(image_id) url = urlparse.urlparse(location) if url.scheme == "file": with open(url.path, "r") as f: filesize = os.path.getsize(f.name) sendfile.sendfile(data.fileno(), f.fileno(), 0, filesize) return image_chunks = self.call(method, image_id) if data is None: return image_chunks else: for chunk in image_chunks: data.write(chunk)
def _download(self, image_id, data=None, method='data'): """Calls out to Glance for data and writes data. :param image_id: The opaque image identifier. :param data: (Optional) File object to write data to. """ (image_id, self.glance_host, self.glance_port, use_ssl) = service_utils.parse_image_ref(image_id) if self.version == 2 \ and 'file' in CONF.glance.allowed_direct_url_schemes: location = self._get_location(image_id) url = urlparse.urlparse(location) if url.scheme == "file": with open(url.path, "r") as f: #TODO(ghe): Use system call for downloading files. # Bug #1199522 # FIXME(jbresnah) a system call to cp could have # significant performance advantages, however we # do not have the path to files at this point in # the abstraction. shutil.copyfileobj(f, data) return image_chunks = self.call(method, image_id) if data is None: return image_chunks else: for chunk in image_chunks: data.write(chunk)
def _update(self, image_id, image_meta, data=None, method='update', purge_props=False): """Modify the given image with the new data. :param image_id: The opaque image identifier. :param data: (Optional) File object to update data from. :param purge_props: (Optional=False) Purge existing properties. :returns: dict -- New created image metadata """ (image_id, self.glance_host, self.glance_port, use_ssl) = service_utils.parse_image_ref(image_id) if image_meta: image_meta = service_utils.translate_to_glance(image_meta) else: image_meta = {} if self.version == 1: image_meta['purge_props'] = purge_props if data: image_meta['data'] = data #NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) image_meta = self.call(method, image_id, **image_meta) if self.version == 2 and data: self.call('upload', image_id, data) image_meta = self._show(image_id) return image_meta
def wrapper(self, *args, **kwargs): """Wrapper around methods calls. :param image_href: href that describes the location of an image """ if self.client: return func(self, *args, **kwargs) image_href = kwargs.get('image_href') (image_id, self.glance_host, self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href) if use_ssl: scheme = 'https' else: scheme = 'http' params = {} params['insecure'] = CONF.glance.glance_api_insecure if CONF.glance.auth_strategy == 'keystone': params['token'] = self.context.auth_token endpoint = '%s://%s:%s' % (scheme, self.glance_host, self.glance_port) self.client = client.Client(self.version, endpoint, **params) return func(self, *args, **kwargs)
def _destroy_images(d_info, node_uuid): """Delete instance's image file.""" image_uuid = service_utils.parse_image_ref(d_info['image_source'])[0] utils.unlink_without_raise(_get_image_file_path(node_uuid)) utils.rmtree_without_raise(_get_image_dir_path(node_uuid)) master_image = os.path.join(CONF.pxe.instance_master_path, image_uuid) _unlink_master_image(master_image)
def fetch_image(self, uuid, dest_path, ctx=None, force_raw=True): """Fetch image with given uuid to the destination path. Does nothing if destination path exists. Only creates a link if master image for this UUID is already in cache. Otherwise downloads an image and also stores it in cache. :param uuid: image UUID or href to fetch :param dest_path: destination file path :param ctx: context :param force_raw: boolean value, whether to convert the image to raw format """ img_download_lock_name = 'download-image' if self.master_dir is None: # NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, 'ironic-'): _fetch(ctx, uuid, dest_path, self._image_service, force_raw) else: _fetch(ctx, uuid, dest_path, self._image_service, force_raw) return # TODO(ghe): have hard links and counts the same behaviour in all fs master_file_name = service_utils.parse_image_ref(uuid)[0] master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: img_download_lock_name = 'download-image:%s' % master_file_name # TODO(dtantsur): lock expiration time with lockutils.lock(img_download_lock_name, 'ironic-'): if os.path.exists(dest_path): LOG.debug("Destination %(dest)s already exists for " "image %(uuid)s" % {'uuid': uuid, 'dest': dest_path}) return try: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock('master_image', 'ironic-'): os.link(master_path, dest_path) except OSError: LOG.info(_LI("Master cache miss for image %(uuid)s, " "starting download"), {'uuid': uuid}) else: LOG.debug("Master cache hit for image %(uuid)s", {'uuid': uuid}) return self._download_image( uuid, master_path, dest_path, ctx=ctx, force_raw=force_raw) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()
def _delete(self, image_id, method="delete"): """Delete the given image. :param image_id: The opaque image identifier. :raises: ImageNotFound if the image does not exist. :raises: NotAuthorized if the user is not an owner. :raises: ImageNotAuthorized if the user is not authorized. """ (image_id, glance_host, glance_port, use_ssl) = service_utils.parse_image_ref(image_id) self.call(method, image_id)
def fetch_image(self, uuid, dest_path, ctx=None): """Fetch image with given uuid to the destination path. Does nothing if destination path exists. Only creates a link if master image for this UUID is already in cache. Otherwise downloads an image and also stores it in cache. :param uuid: image UUID or href to fetch :param dest_path: destination file path :param ctx: context """ if self._master_dir is None: #NOTE(ghe): We don't share images between instances/hosts images.fetch_to_raw(ctx, uuid, dest_path, self._image_service) return #TODO(ghe): have hard links and counts the same behaviour in all fs master_file_name = service_utils.parse_image_ref(uuid)[0] master_path = os.path.join(self._master_dir, master_file_name) # NOTE(dtantsur): hold only specific lock, so that we don't # serialize _all_ downloads; take general lock when required # TODO(dtantsur): lock expiration time with lockutils.lock('download-image:%s' % master_file_name, 'ironic-'): if os.path.exists(dest_path): LOG.debug(_("Destination %(dest)s already exists for " "image %(uuid)s") % {'uuid': uuid, 'dest': dest_path}) return try: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock('master_image', 'ironic-'): os.link(master_path, dest_path) except OSError: LOG.info(_("Master cache miss for image %(uuid)s, " "starting download") % {'uuid': uuid}) else: LOG.debug(_("Master cache hit for image %(uuid)s") % {'uuid': uuid}) return self._download_image(uuid, master_path, dest_path, ctx=ctx) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()
def _show(self, image_href, method="get"): """Returns a dict with image data for the given opaque image id. :param image_id: The opaque image identifier. :returns: A dict containing image metadata. :raises: ImageNotFound """ LOG.debug("Getting image metadata from glance. Image: %s" % image_href) (image_id, self.glance_host, self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href) image = self.call(method, image_id) if not service_utils.is_image_available(self.context, image): raise exception.ImageNotFound(image_id=image_id) base_image_meta = service_utils.translate_from_glance(image) return base_image_meta
def _get_image(ctx, path, uuid, master_path=None, image_service=None): #TODO(ghe): Revise this logic and cdocument process Bug #1199665 # When master_path defined, we save the images in this dir using the iamge # uuid as the file name. Deployments that use this images, creates a hard # link to keep track of this. When the link count of a master image is # equal to 1, can be deleted. #TODO(ghe): have hard links and count links the same behaviour in all fs #TODO(ghe): timeout and retry for downloads def _wait_for_download(): if not os.path.exists(lock_file): raise loopingcall.LoopingCallDone() # If the download of the image needed is in progress (lock file present) # we wait until the locks disappears and create the link. if master_path is None: #NOTE(ghe): We don't share images between instances/hosts images.fetch_to_raw(ctx, uuid, path, image_service) else: master_uuid = os.path.join(master_path, service_utils.parse_image_ref(uuid)[0]) lock_file = os.path.join(master_path, master_uuid + '.lock') _link_master_image(master_uuid, path) if not os.path.exists(path): fileutils.ensure_tree(master_path) if not _download_in_progress(lock_file): with fileutils.remove_path_on_error(lock_file): #TODO(ghe): logging when image cannot be created fd, tmp_path = tempfile.mkstemp(dir=master_path) os.close(fd) images.fetch_to_raw(ctx, uuid, tmp_path, image_service) _create_master_image(tmp_path, master_uuid, path) _remove_download_in_progress_lock(lock_file) else: #TODO(ghe): expiration time timer = loopingcall.FixedIntervalLoopingCall( _wait_for_download) timer.start(interval=1).wait() _link_master_image(master_uuid, path)
def wrapper(self, *args, **kwargs): """Wrapper around methods calls. :param image_href: href that describes the location of an image """ if self.client: return func(self, *args, **kwargs) image_href = kwargs.get('image_href') _id, self.endpoint, use_ssl = service_utils.parse_image_ref(image_href) params = {} params['insecure'] = CONF.glance.glance_api_insecure if (not params['insecure'] and CONF.glance.glance_cafile and use_ssl): params['cacert'] = CONF.glance.glance_cafile if CONF.glance.auth_strategy == 'keystone': params['token'] = self.context.auth_token self.client = client.Client(self.version, self.endpoint, **params) return func(self, *args, **kwargs)
def wrapper(self, *args, **kwargs): """wrapper around methods calls :param image_href: href that describes the location of an image """ if self.client: return func(self, *args, **kwargs) image_href = kwargs.get("image_href") (image_id, self.glance_host, self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href) if use_ssl: scheme = "https" else: scheme = "http" params = {} params["insecure"] = CONF.glance.glance_api_insecure if CONF.glance.auth_strategy == "keystone": params["token"] = self.context.auth_token endpoint = "%s://%s:%s" % (scheme, self.glance_host, self.glance_port) self.client = client.Client(self.version, endpoint, **params) return func(self, *args, **kwargs)
def test_parse_image_ref_no_ssl(self): image_href = 'http://127.0.0.1:9292/image_path/image_uuid' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual(('image_uuid', '127.0.0.1', 9292, False), parsed_href)
def test_parse_image_ref_ssl(self): image_href = "https://127.0.0.1:9292/image_path/" u"image_\u00F9\u00FA\u00EE\u0111" parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual((u"image_\u00F9\u00FA\u00EE\u0111", "127.0.0.1", 9292, True), parsed_href)
def fetch_image(self, href, dest_path, ctx=None, force_raw=True): """Fetch image by given href to the destination path. Does nothing if destination path exists and is up to date with cache and href contents. Only creates a hard link (dest_path) to cached image if requested image is already in cache and up to date with href contents. Otherwise downloads an image, stores it in cache and creates a hard link (dest_path) to it. :param href: image UUID or href to fetch :param dest_path: destination file path :param ctx: context :param force_raw: boolean value, whether to convert the image to raw format """ img_download_lock_name = 'download-image' if self.master_dir is None: # NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, 'ironic-'): _fetch(ctx, href, dest_path, force_raw) else: _fetch(ctx, href, dest_path, force_raw) return # TODO(ghe): have hard links and counts the same behaviour in all fs # NOTE(vdrok): File name is converted to UUID if it's not UUID already, # so that two images with same file names do not collide if service_utils.is_glance_image(href): master_file_name = service_utils.parse_image_ref(href)[0] else: # NOTE(vdrok): Doing conversion of href in case it's unicode # string, UUID cannot be generated for unicode strings on python 2. href_encoded = href.encode('utf-8') if six.PY2 else href master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL, href_encoded)) master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: img_download_lock_name = 'download-image:%s' % master_file_name # TODO(dtantsur): lock expiration time with lockutils.lock(img_download_lock_name, 'ironic-'): # NOTE(vdrok): After rebuild requested image can change, so we # should ensure that dest_path and master_path (if exists) are # pointing to the same file and their content is up to date cache_up_to_date = _delete_master_path_if_stale( master_path, href, ctx) dest_up_to_date = _delete_dest_path_if_stale( master_path, dest_path) if cache_up_to_date and dest_up_to_date: LOG.debug( "Destination %(dest)s already exists " "for image %(href)s", { 'href': href, 'dest': dest_path }) return if cache_up_to_date: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock('master_image', 'ironic-'): os.link(master_path, dest_path) LOG.debug("Master cache hit for image %(href)s", {'href': href}) return LOG.info( _LI("Master cache miss for image %(href)s, " "starting download"), {'href': href}) self._download_image(href, master_path, dest_path, ctx=ctx, force_raw=force_raw) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()
def test_parse_image_ref_no_ssl(self): image_href = u'http://127.0.0.1:9292/image_path/'\ u'image_\u00F9\u00FA\u00EE\u0111' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual((u'image_\u00F9\u00FA\u00EE\u0111', 'http://127.0.0.1:9292', False), parsed_href)
def test_parse_image_ref_ssl(self): image_href = 'https://127.0.0.1:9292/image_path/image_uuid' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual(parsed_href, ('image_uuid', '127.0.0.1', 9292, True))
def fetch_image(self, href, dest_path, ctx=None, force_raw=True): """Fetch image by given href to the destination path. Does nothing if destination path exists and is up to date with cache and href contents. Only creates a hard link (dest_path) to cached image if requested image is already in cache and up to date with href contents. Otherwise downloads an image, stores it in cache and creates a hard link (dest_path) to it. :param href: image UUID or href to fetch :param dest_path: destination file path :param ctx: context :param force_raw: boolean value, whether to convert the image to raw format """ img_download_lock_name = 'download-image' if self.master_dir is None: # NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, 'ironic-'): _fetch(ctx, href, dest_path, force_raw) else: _fetch(ctx, href, dest_path, force_raw) return # TODO(ghe): have hard links and counts the same behaviour in all fs # NOTE(vdrok): File name is converted to UUID if it's not UUID already, # so that two images with same file names do not collide if service_utils.is_glance_image(href): master_file_name = service_utils.parse_image_ref(href)[0] else: # NOTE(vdrok): Doing conversion of href in case it's unicode # string, UUID cannot be generated for unicode strings on python 2. href_encoded = href.encode('utf-8') if six.PY2 else href master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL, href_encoded)) master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: img_download_lock_name = 'download-image:%s' % master_file_name # TODO(dtantsur): lock expiration time with lockutils.lock(img_download_lock_name, 'ironic-'): # NOTE(vdrok): After rebuild requested image can change, so we # should ensure that dest_path and master_path (if exists) are # pointing to the same file and their content is up to date cache_up_to_date = _delete_master_path_if_stale(master_path, href, ctx) dest_up_to_date = _delete_dest_path_if_stale(master_path, dest_path) if cache_up_to_date and dest_up_to_date: LOG.debug("Destination %(dest)s already exists " "for image %(href)s", {'href': href, 'dest': dest_path}) return if cache_up_to_date: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock('master_image', 'ironic-'): os.link(master_path, dest_path) LOG.debug("Master cache hit for image %(href)s", {'href': href}) return LOG.info(_LI("Master cache miss for image %(href)s, " "starting download"), {'href': href}) self._download_image( href, master_path, dest_path, ctx=ctx, force_raw=force_raw) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()
def test_parse_image_ref_no_ssl(self): image_href = u'http://127.0.0.1:9292/image_path/'\ u'image_\u00F9\u00FA\u00EE\u0111' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual((u'image_\u00F9\u00FA\u00EE\u0111', '127.0.0.1', 9292, False), parsed_href)
def test_parse_image_ref_no_ssl(self): image_href = 'http://127.0.0.1:9292/image_path/image_uuid' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual(('image_uuid', '127.0.0.1', 9292, False), parsed_href)
def test_parse_image_ref_ssl(self): image_href = 'https://127.0.0.1:9292/image_path/'\ u'image_\u00F9\u00FA\u00EE\u0111' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual((u'image_\u00F9\u00FA\u00EE\u0111', '127.0.0.1', 9292, True), parsed_href)
def test_parse_image_ref_ssl(self): image_href = 'https://127.0.0.1:9292/image_path/image_uuid' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual(parsed_href, ('image_uuid', '127.0.0.1', 9292, True))
def fetch_image(self, href, dest_path, ctx=None, force_raw=True): """Fetch image by given href to the destination path. Does nothing if destination path exists and corresponds to a file that exists. Only creates a link if master image for this UUID is already in cache. Otherwise downloads an image and also stores it in cache. :param href: image UUID or href to fetch :param dest_path: destination file path :param ctx: context :param force_raw: boolean value, whether to convert the image to raw format """ img_download_lock_name = "download-image" if self.master_dir is None: # NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, "ironic-"): _fetch(ctx, href, dest_path, force_raw) else: _fetch(ctx, href, dest_path, force_raw) return # TODO(ghe): have hard links and counts the same behaviour in all fs # NOTE(vdrok): File name is converted to UUID if it's not UUID already, # so that two images with same file names do not collide if service_utils.is_glance_image(href): master_file_name = service_utils.parse_image_ref(href)[0] else: # NOTE(vdrok): Doing conversion of href in case it's unicode # string, UUID cannot be generated for unicode strings on python 2. href_encoded = href.encode("utf-8") if six.PY2 else href master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL, href_encoded)) master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: img_download_lock_name = "download-image:%s" % master_file_name # TODO(dtantsur): lock expiration time with lockutils.lock(img_download_lock_name, "ironic-"): if os.path.exists(dest_path): # NOTE(vdrok): After rebuild requested image can change, so we # should ensure that dest_path and master_path (if exists) are # pointing to the same file if os.path.exists(master_path) and (os.stat(dest_path).st_ino == os.stat(master_path).st_ino): LOG.debug( "Destination %(dest)s already exists for " "image %(uuid)s" % {"uuid": href, "dest": dest_path} ) return os.unlink(dest_path) try: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock("master_image", "ironic-"): os.link(master_path, dest_path) except OSError: LOG.info(_LI("Master cache miss for image %(uuid)s, " "starting download"), {"uuid": href}) else: LOG.debug("Master cache hit for image %(uuid)s", {"uuid": href}) return self._download_image(href, master_path, dest_path, ctx=ctx, force_raw=force_raw) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()
def fetch_image(self, href, dest_path, ctx=None, force_raw=True): """Fetch image by given href to the destination path. Does nothing if destination path exists and corresponds to a file that exists. Only creates a link if master image for this UUID is already in cache. Otherwise downloads an image and also stores it in cache. :param href: image UUID or href to fetch :param dest_path: destination file path :param ctx: context :param force_raw: boolean value, whether to convert the image to raw format """ img_download_lock_name = 'download-image' if self.master_dir is None: # NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, 'ironic-'): _fetch(ctx, href, dest_path, self._image_service, force_raw) else: _fetch(ctx, href, dest_path, self._image_service, force_raw) return # TODO(ghe): have hard links and counts the same behaviour in all fs # NOTE(vdrok): File name is converted to UUID if it's not UUID already, # so that two images with same file names do not collide if service_utils.is_glance_image(href): master_file_name = service_utils.parse_image_ref(href)[0] else: # NOTE(vdrok): Doing conversion of href in case it's unicode # string, UUID cannot be generated for unicode strings on python 2. master_file_name = str(uuid.uuid5(uuid.NAMESPACE_URL, href.encode('utf-8'))) master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: img_download_lock_name = 'download-image:%s' % master_file_name # TODO(dtantsur): lock expiration time with lockutils.lock(img_download_lock_name, 'ironic-'): if os.path.exists(dest_path): # NOTE(vdrok): After rebuild requested image can change, so we # should ensure that dest_path and master_path (if exists) are # pointing to the same file if (os.path.exists(master_path) and (os.stat(dest_path).st_ino == os.stat(master_path).st_ino)): LOG.debug("Destination %(dest)s already exists for " "image %(uuid)s" % {'uuid': href, 'dest': dest_path}) return os.unlink(dest_path) try: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock('master_image', 'ironic-'): os.link(master_path, dest_path) except OSError: LOG.info(_LI("Master cache miss for image %(uuid)s, " "starting download"), {'uuid': href}) else: LOG.debug("Master cache hit for image %(uuid)s", {'uuid': href}) return self._download_image( href, master_path, dest_path, ctx=ctx, force_raw=force_raw) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()
def test_parse_image_ref_ssl(self): image_href = 'https://127.0.0.1:9292/image_path/'\ u'image_\u00F9\u00FA\u00EE\u0111' parsed_href = service_utils.parse_image_ref(image_href) self.assertEqual((u'image_\u00F9\u00FA\u00EE\u0111', 'https://127.0.0.1:9292', True), parsed_href)
def fetch_image(self, uuid, dest_path, ctx=None): """Fetch image with given uuid to the destination path. Does nothing if destination path exists. Only creates a link if master image for this UUID is already in cache. Otherwise downloads an image and also stores it in cache. :param uuid: image UUID or href to fetch :param dest_path: destination file path :param ctx: context """ img_download_lock_name = 'download-image' if self.master_dir is None: #NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, 'ironic-'): _fetch_to_raw(ctx, uuid, dest_path, self._image_service) else: _fetch_to_raw(ctx, uuid, dest_path, self._image_service) return #TODO(ghe): have hard links and counts the same behaviour in all fs master_file_name = service_utils.parse_image_ref(uuid)[0] master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: img_download_lock_name = 'download-image:%s' % master_file_name # TODO(dtantsur): lock expiration time with lockutils.lock(img_download_lock_name, 'ironic-'): if os.path.exists(dest_path): # NOTE(vdrok): After rebuild requested image can change, so we # should ensure that dest_path and master_path (if exists) are # pointing to the same file if (os.path.exists(master_path) and (os.stat(dest_path).st_ino == os.stat(master_path).st_ino)): LOG.debug("Destination %(dest)s already exists for " "image %(uuid)s" % {'uuid': uuid, 'dest': dest_path}) return os.unlink(dest_path) try: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock('master_image', 'ironic-'): os.link(master_path, dest_path) except OSError: LOG.info(_LI("Master cache miss for image %(uuid)s, " "starting download"), {'uuid': uuid}) else: LOG.debug("Master cache hit for image %(uuid)s", {'uuid': uuid}) return self._download_image(uuid, master_path, dest_path, ctx=ctx) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()
def fetch_image(self, uuid, dest_path, ctx=None, force_raw=True): """Fetch image with given uuid to the destination path. Does nothing if destination path exists. Only creates a link if master image for this UUID is already in cache. Otherwise downloads an image and also stores it in cache. :param uuid: image UUID or href to fetch :param dest_path: destination file path :param ctx: context :param force_raw: boolean value, whether to convert the image to raw format """ img_download_lock_name = 'download-image' if self.master_dir is None: #NOTE(ghe): We don't share images between instances/hosts if not CONF.parallel_image_downloads: with lockutils.lock(img_download_lock_name, 'ironic-'): _fetch(ctx, uuid, dest_path, self._image_service, force_raw) else: _fetch(ctx, uuid, dest_path, self._image_service, force_raw) return #TODO(ghe): have hard links and counts the same behaviour in all fs master_file_name = service_utils.parse_image_ref(uuid)[0] master_path = os.path.join(self.master_dir, master_file_name) if CONF.parallel_image_downloads: img_download_lock_name = 'download-image:%s' % master_file_name # TODO(dtantsur): lock expiration time with lockutils.lock(img_download_lock_name, 'ironic-'): if os.path.exists(dest_path): LOG.debug("Destination %(dest)s already exists for " "image %(uuid)s" % { 'uuid': uuid, 'dest': dest_path }) return try: # NOTE(dtantsur): ensure we're not in the middle of clean up with lockutils.lock('master_image', 'ironic-'): os.link(master_path, dest_path) except OSError: LOG.info( _LI("Master cache miss for image %(uuid)s, " "starting download"), {'uuid': uuid}) else: LOG.debug("Master cache hit for image %(uuid)s", {'uuid': uuid}) return self._download_image(uuid, master_path, dest_path, ctx=ctx, force_raw=force_raw) # NOTE(dtantsur): we increased cache size - time to clean up self.clean_up()