def test_contextlock_unlocks(self):
        lock_dir = tempfile.mkdtemp()
        self.config(lock_path=lock_dir, group='oslo_concurrency')

        sem = None

        try:
            with lockutils.lock("test") as sem:
                if six.PY2:
                    self.assertTrue(isinstance(sem, threading._Semaphore))
                else:
                    self.assertTrue(isinstance(sem, threading.Semaphore))

                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

                # NOTE(flaper87): Lock should be free
                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

            # NOTE(flaper87): Lock should be free
            # but semaphore should already exist.
            with lockutils.lock("test") as sem2:
                self.assertEqual(sem, sem2)
        finally:
            if os.path.exists(lock_dir):
                shutil.rmtree(lock_dir, ignore_errors=True)
    def test_contextlock(self):
        lock_dir = tempfile.mkdtemp()
        self.config(lock_path=lock_dir, group='oslo_concurrency')

        try:
            # Note(flaper87): Lock is not external, which means
            # a semaphore will be yielded
            with lockutils.lock("test") as sem:
                if six.PY2:
                    self.assertTrue(isinstance(sem, threading._Semaphore))
                else:
                    self.assertTrue(isinstance(sem, threading.Semaphore))

                # NOTE(flaper87): Lock is external so an InterProcessLock
                # will be yielded.
                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

                with lockutils.lock("test1",
                                    external=True) as lock1:
                    self.assertTrue(isinstance(lock1,
                                               lockutils.InterProcessLock))
        finally:
            if os.path.exists(lock_dir):
                shutil.rmtree(lock_dir, ignore_errors=True)
Example #3
0
    def test_contextlock_unlocks(self):
        lock_dir = tempfile.mkdtemp()
        self.config(lock_path=lock_dir)

        sem = None

        try:
            with lockutils.lock("test") as sem:
                if six.PY2:
                    self.assertTrue(isinstance(sem, threading._Semaphore))
                else:
                    self.assertTrue(isinstance(sem, threading.Semaphore))

                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

                # NOTE(flaper87): Lock should be free
                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

            # NOTE(flaper87): Lock should be free
            # but semaphore should already exist.
            with lockutils.lock("test") as sem2:
                self.assertEqual(sem, sem2)
        finally:
            if os.path.exists(lock_dir):
                shutil.rmtree(lock_dir, ignore_errors=True)
Example #4
0
    def fetch_image(self, uuid, dest_path, ctx=None, force_raw=True):
        """Fetch image with given uuid to the destination path.

        Does nothing if destination path exists.
        Only creates a link if master image for this UUID is already in cache.
        Otherwise downloads an image and also stores it in cache.

        :param uuid: image UUID or href to fetch
        :param dest_path: destination file path
        :param ctx: context
        :param force_raw: boolean value, whether to convert the image to raw
                          format
        """
        img_download_lock_name = 'download-image'
        if self.master_dir is None:
            # NOTE(ghe): We don't share images between instances/hosts
            if not CONF.parallel_image_downloads:
                with lockutils.lock(img_download_lock_name, 'ironic-'):
                    _fetch(ctx, uuid, dest_path, self._image_service,
                           force_raw)
            else:
                _fetch(ctx, uuid, dest_path, self._image_service, force_raw)
            return

        # TODO(ghe): have hard links and counts the same behaviour in all fs

        master_file_name = service_utils.parse_image_ref(uuid)[0]
        master_path = os.path.join(self.master_dir, master_file_name)

        if CONF.parallel_image_downloads:
            img_download_lock_name = 'download-image:%s' % master_file_name

        # TODO(dtantsur): lock expiration time
        with lockutils.lock(img_download_lock_name, 'ironic-'):
            if os.path.exists(dest_path):
                LOG.debug("Destination %(dest)s already exists for "
                            "image %(uuid)s" %
                          {'uuid': uuid,
                           'dest': dest_path})
                return

            try:
                # NOTE(dtantsur): ensure we're not in the middle of clean up
                with lockutils.lock('master_image', 'ironic-'):
                    os.link(master_path, dest_path)
            except OSError:
                LOG.info(_LI("Master cache miss for image %(uuid)s, "
                             "starting download"),
                         {'uuid': uuid})
            else:
                LOG.debug("Master cache hit for image %(uuid)s",
                          {'uuid': uuid})
                return

            self._download_image(
                uuid, master_path, dest_path, ctx=ctx, force_raw=force_raw)

        # NOTE(dtantsur): we increased cache size - time to clean up
        self.clean_up()
Example #5
0
    def _cleanup(self, pool):
        now = time.time()
        cleanup_file = os.path.join(CONF.scrubber_datadir, ".cleanup")
        if not os.path.exists(cleanup_file):
            self._update_cleanup_file(cleanup_file, now)
            return

        last_cleanup_time = self._read_cleanup_file(cleanup_file)
        cleanup_time = last_cleanup_time + CONF.cleanup_scrubber_time
        if cleanup_time > now:
            return

        LOG.info(_LI("Getting images deleted before %s") %
                 CONF.cleanup_scrubber_time)
        self._update_cleanup_file(cleanup_file, now)

        delete_jobs = self._get_delete_jobs(self.db_queue, False)
        if not delete_jobs:
            return

        for image_id, jobs in six.iteritems(delete_jobs):
            with lockutils.lock("scrubber-%s" % image_id,
                                lock_file_prefix='glance-', external=True):
                if not self.file_queue.has_image(image_id):
                    # NOTE(zhiyan): scrubber should not cleanup this image
                    # since a queue file be created for this 'pending_delete'
                    # image concurrently before the code get lock and
                    # reach here. The checking only be worth if glance-api and
                    # glance-scrubber service be deployed on a same host.
                    self._scrub_image(pool, image_id, jobs)
Example #6
0
    def _cleanup(self, pool):
        now = time.time()
        cleanup_file = os.path.join(CONF.scrubber_datadir, ".cleanup")
        if not os.path.exists(cleanup_file):
            self._update_cleanup_file(cleanup_file, now)
            return

        last_cleanup_time = self._read_cleanup_file(cleanup_file)
        cleanup_time = last_cleanup_time + CONF.cleanup_scrubber_time
        if cleanup_time > now:
            return

        LOG.info(
            _LI("Getting images deleted before %s") %
            CONF.cleanup_scrubber_time)
        self._update_cleanup_file(cleanup_file, now)

        delete_jobs = self._get_delete_jobs(self.db_queue, False)
        if not delete_jobs:
            return

        for image_id, jobs in six.iteritems(delete_jobs):
            with lockutils.lock("scrubber-%s" % image_id,
                                lock_file_prefix='glance-',
                                external=True):
                if not self.file_queue.has_image(image_id):
                    # NOTE(zhiyan): scrubber should not cleanup this image
                    # since a queue file be created for this 'pending_delete'
                    # image concurrently before the code get lock and
                    # reach here. The checking only be worth if glance-api and
                    # glance-scrubber service be deployed on a same host.
                    self._scrub_image(pool, image_id, jobs)
        def lock_files(handles_dir):

            with lockutils.lock('external', 'test-', external=True):
                # Open some files we can use for locking
                handles = []
                for n in range(50):
                    path = os.path.join(handles_dir, ('file-%s' % n))
                    handles.append(open(path, 'w'))

                # Loop over all the handles and try locking the file
                # without blocking, keep a count of how many files we
                # were able to lock and then unlock. If the lock fails
                # we get an IOError and bail out with bad exit code
                count = 0
                for handle in handles:
                    try:
                        fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
                        count += 1
                        fcntl.flock(handle, fcntl.LOCK_UN)
                    except IOError:
                        os._exit(2)
                    finally:
                        handle.close()

                # Check if we were able to open all files
                self.assertEqual(50, count)
Example #8
0
        def lock_files(handles_dir):

            with lockutils.lock('external', 'test-', external=True):
                # Open some files we can use for locking
                handles = []
                for n in range(50):
                    path = os.path.join(handles_dir, ('file-%s' % n))
                    handles.append(open(path, 'w'))

                # Loop over all the handles and try locking the file
                # without blocking, keep a count of how many files we
                # were able to lock and then unlock. If the lock fails
                # we get an IOError and bail out with bad exit code
                count = 0
                for handle in handles:
                    try:
                        fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
                        count += 1
                        fcntl.flock(handle, fcntl.LOCK_UN)
                    except IOError:
                        os._exit(2)
                    finally:
                        handle.close()

                # Check if we were able to open all files
                self.assertEqual(50, count)
 def test_non_destructive(self):
     lock_file = os.path.join(self.lock_dir, 'not-destroyed')
     with open(lock_file, 'w') as f:
         f.write('test')
     with lockutils.lock('not-destroyed', external=True,
                         lock_path=self.lock_dir):
         with open(lock_file) as f:
             self.assertEqual(f.read(), 'test')
Example #10
0
    def add_location(self, image_id, location, user_context=None):
        """Adding image location to scrub queue.

        :param image_id: The opaque image identifier
        :param location: The opaque image location
        :param user_context: The user's request context

        :retval A boolean value to indicate success or not
        """
        if user_context is not None:
            registry_client = registry.get_registry_client(user_context)
        else:
            registry_client = self.registry

        with lockutils.lock("scrubber-%s" % image_id,
                            lock_file_prefix='glance-',
                            external=True):

            # NOTE(zhiyan): make sure scrubber does not cleanup
            # 'pending_delete' images concurrently before the code
            # get lock and reach here.
            try:
                image = registry_client.get_image(image_id)
                if image['status'] == 'deleted':
                    return True
            except exception.NotFound as e:
                LOG.warn(_LW("Failed to find image to delete: %s"),
                         utils.exception_to_str(e))
                return False

            loc_id = location.get('id', '-')
            if self.metadata_encryption_key:
                uri = crypt.urlsafe_encrypt(self.metadata_encryption_key,
                                            location['url'], 64)
            else:
                uri = location['url']
            delete_time = time.time() + self.scrub_time
            file_path = os.path.join(self.scrubber_datadir, str(image_id))

            if os.path.exists(file_path):
                # Append the uri of location to the queue file
                with open(file_path, 'a') as f:
                    f.write('\n')
                    f.write('\n'.join(
                        [str(loc_id), uri,
                         str(int(delete_time))]))
            else:
                # NOTE(zhiyan): Protect the file before we write any data.
                open(file_path, 'w').close()
                os.chmod(file_path, 0o600)
                with open(file_path, 'w') as f:
                    f.write('\n'.join(
                        [str(loc_id), uri,
                         str(int(delete_time))]))
            os.utime(file_path, (delete_time, delete_time))

            return True
Example #11
0
 def test_non_destructive(self):
     lock_file = os.path.join(self.lock_dir, 'not-destroyed')
     with open(lock_file, 'w') as f:
         f.write('test')
     with lockutils.lock('not-destroyed',
                         external=True,
                         lock_path=self.lock_dir):
         with open(lock_file) as f:
             self.assertEqual(f.read(), 'test')
Example #12
0
    def add_location(self, image_id, location, user_context=None):
        """Adding image location to scrub queue.

        :param image_id: The opaque image identifier
        :param location: The opaque image location
        :param user_context: The user's request context

        :retval A boolean value to indicate success or not
        """
        if user_context is not None:
            registry_client = registry.get_registry_client(user_context)
        else:
            registry_client = self.registry

        with lockutils.lock("scrubber-%s" % image_id,
                            lock_file_prefix='glance-', external=True):

            # NOTE(zhiyan): make sure scrubber does not cleanup
            # 'pending_delete' images concurrently before the code
            # get lock and reach here.
            try:
                image = registry_client.get_image(image_id)
                if image['status'] == 'deleted':
                    return True
            except exception.NotFound as e:
                LOG.warn(_LW("Failed to find image to delete: %s"),
                         utils.exception_to_str(e))
                return False

            loc_id = location.get('id', '-')
            if self.metadata_encryption_key:
                uri = crypt.urlsafe_encrypt(self.metadata_encryption_key,
                                            location['url'], 64)
            else:
                uri = location['url']
            delete_time = time.time() + self.scrub_time
            file_path = os.path.join(self.scrubber_datadir, str(image_id))

            if os.path.exists(file_path):
                # Append the uri of location to the queue file
                with open(file_path, 'a') as f:
                    f.write('\n')
                    f.write('\n'.join([str(loc_id),
                                       uri,
                                       str(int(delete_time))]))
            else:
                # NOTE(zhiyan): Protect the file before we write any data.
                open(file_path, 'w').close()
                os.chmod(file_path, 0o600)
                with open(file_path, 'w') as f:
                    f.write('\n'.join([str(loc_id),
                                       uri,
                                       str(int(delete_time))]))
            os.utime(file_path, (delete_time, delete_time))

            return True
Example #13
0
    def _age_cached_images(self, context, datastore, dc_info, ds_path):
        """Ages cached images."""
        age_seconds = CONF.remove_unused_original_minimum_age_seconds
        unused_images = self.originals - self.used_images
        ds_browser = self._get_ds_browser(datastore.ref)
        for image in unused_images:
            path = self.timestamp_folder_get(ds_path, image)
            # Lock to ensure that the spawn will not try and access a image
            # that is currently being deleted on the datastore.
            with lockutils.lock(str(path),
                                lock_file_prefix='nova-vmware-ts',
                                external=True):
                ts = self._get_timestamp(ds_browser, path)
                if not ts:
                    ts_path = path.join(self._get_timestamp_filename())
                    try:
                        ds_util.mkdir(self._session, ts_path, dc_info.ref)
                    except vexc.FileAlreadyExistsException:
                        LOG.debug("Timestamp already exists.")
                    LOG.info(
                        _LI("Image %s is no longer used by this node. "
                            "Pending deletion!"), image)
                else:
                    dt = self._get_datetime_from_filename(str(ts))
                    if timeutils.is_older_than(dt, age_seconds):
                        LOG.info(
                            _LI("Image %s is no longer used. "
                                "Deleting!"), path)
                        # Image has aged - delete the image ID folder
                        self._folder_delete(path, dc_info.ref)

        # If the image is used and the timestamp file exists then we delete
        # the timestamp.
        for image in self.used_images:
            path = self.timestamp_folder_get(ds_path, image)
            with lockutils.lock(str(path),
                                lock_file_prefix='nova-vmware-ts',
                                external=True):
                self.timestamp_cleanup(dc_info.ref, ds_browser, path)
Example #14
0
    def enlist_image(self, image_id, datastore, dc_ref):
        ds_browser = self._get_ds_browser(datastore.ref)
        cache_root_folder = datastore.build_path(self._base_folder)

        # Check if the timestamp file exists - if so then delete it. This
        # will ensure that the aging will not delete a cache image if it
        # is going to be used now.
        path = self.timestamp_folder_get(cache_root_folder, image_id)

        # Lock to ensure that the spawn will not try and access a image
        # that is currently being deleted on the datastore.
        with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
                            external=True):
            self.timestamp_cleanup(dc_ref, ds_browser, path)
Example #15
0
    def _age_cached_images(self, context, datastore, dc_info,
                           ds_path):
        """Ages cached images."""
        age_seconds = CONF.remove_unused_original_minimum_age_seconds
        unused_images = self.originals - self.used_images
        ds_browser = self._get_ds_browser(datastore.ref)
        for image in unused_images:
            path = self.timestamp_folder_get(ds_path, image)
            # Lock to ensure that the spawn will not try and access a image
            # that is currently being deleted on the datastore.
            with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
                                external=True):
                ts = self._get_timestamp(ds_browser, path)
                if not ts:
                    ts_path = path.join(self._get_timestamp_filename())
                    try:
                        ds_util.mkdir(self._session, ts_path, dc_info.ref)
                    except vexc.FileAlreadyExistsException:
                        LOG.debug("Timestamp already exists.")
                    LOG.info(_LI("Image %s is no longer used by this node. "
                                 "Pending deletion!"), image)
                else:
                    dt = self._get_datetime_from_filename(str(ts))
                    if timeutils.is_older_than(dt, age_seconds):
                        LOG.info(_LI("Image %s is no longer used. "
                                     "Deleting!"), path)
                        # Image has aged - delete the image ID folder
                        self._folder_delete(path, dc_info.ref)

        # If the image is used and the timestamp file exists then we delete
        # the timestamp.
        for image in self.used_images:
            path = self.timestamp_folder_get(ds_path, image)
            with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
                                external=True):
                self.timestamp_cleanup(dc_info.ref, ds_browser,
                                       path)
Example #16
0
    def enlist_image(self, image_id, datastore, dc_ref):
        ds_browser = self._get_ds_browser(datastore.ref)
        cache_root_folder = datastore.build_path(self._base_folder)

        # Check if the timestamp file exists - if so then delete it. This
        # will ensure that the aging will not delete a cache image if it
        # is going to be used now.
        path = self.timestamp_folder_get(cache_root_folder, image_id)

        # Lock to ensure that the spawn will not try and access a image
        # that is currently being deleted on the datastore.
        with lockutils.lock(str(path),
                            lock_file_prefix='nova-vmware-ts',
                            external=True):
            self.timestamp_cleanup(dc_ref, ds_browser, path)
Example #17
0
    def test_contextlock(self):
        lock_dir = tempfile.mkdtemp()
        self.config(lock_path=lock_dir)

        try:
            # Note(flaper87): Lock is not external, which means
            # a semaphore will be yielded
            with lockutils.lock("test") as sem:
                if six.PY2:
                    self.assertTrue(isinstance(sem, threading._Semaphore))
                else:
                    self.assertTrue(isinstance(sem, threading.Semaphore))

                # NOTE(flaper87): Lock is external so an InterProcessLock
                # will be yielded.
                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

                with lockutils.lock("test1", external=True) as lock1:
                    self.assertTrue(
                        isinstance(lock1, lockutils.InterProcessLock))
        finally:
            if os.path.exists(lock_dir):
                shutil.rmtree(lock_dir, ignore_errors=True)
Example #18
0
    def wrapper(self, context, *args, **kwargs):
        res = f(self, context, *args, **kwargs)
        try:
            # get the instance from arguments (or raise ValueError)
            instance = kwargs.get('instance')
            if not instance:
                instance = args[argspec.args.index('instance') - 2]
        except ValueError:
            msg = _('instance is a required argument to use @refresh_cache')
            raise Exception(msg)

        with lockutils.lock('refresh_cache-%s' % instance['uuid']):
            update_instance_cache_with_nw_info(self, context, instance,
                                               nw_info=res)
        # return the original function's return value
        return res
Example #19
0
    def wrapper(self, context, *args, **kwargs):
        res = f(self, context, *args, **kwargs)
        try:
            # get the instance from arguments (or raise ValueError)
            instance = kwargs.get('instance')
            if not instance:
                instance = args[argspec.args.index('instance') - 2]
        except ValueError:
            msg = _('instance is a required argument to use @refresh_cache')
            raise Exception(msg)

        with lockutils.lock('refresh_cache-%s' % instance['uuid']):
            update_instance_cache_with_nw_info(self, context, instance,
                                               nw_info=res)
        # return the original function's return value
        return res
Example #20
0
    def _walk_all_locations(self, remove=False):
        """Returns a list of image id and location tuple from scrub queue.

        :param remove: Whether remove location from queue or not after walk

        :retval a list of image id, location id and uri tuple from scrub queue
        """
        if not os.path.exists(self.scrubber_datadir):
            LOG.warn(
                _LW("%s directory does not exist.") % self.scrubber_datadir)
            return []

        ret = []
        for root, dirs, files in os.walk(self.scrubber_datadir):
            for image_id in files:
                if not utils.is_uuid_like(image_id):
                    continue
                with lockutils.lock("scrubber-%s" % image_id,
                                    lock_file_prefix='glance-',
                                    external=True):
                    file_path = os.path.join(self.scrubber_datadir, image_id)
                    records = self._read_queue_file(file_path)
                    loc_ids, uris, delete_times = records

                    remove_record_idxs = []
                    skipped = False
                    for (record_idx, delete_time) in enumerate(delete_times):
                        if delete_time > time.time():
                            skipped = True
                            continue
                        else:
                            ret.append((image_id, loc_ids[record_idx],
                                        uris[record_idx]))
                            remove_record_idxs.append(record_idx)

                    if remove:
                        if skipped:
                            # NOTE(zhiyan): remove location records from
                            # the queue file.
                            self._update_queue_file(file_path,
                                                    remove_record_idxs)
                        else:
                            utils.safe_remove(file_path)
        return ret
Example #21
0
    def _walk_all_locations(self, remove=False):
        """Returns a list of image id and location tuple from scrub queue.

        :param remove: Whether remove location from queue or not after walk

        :retval a list of image id, location id and uri tuple from scrub queue
        """
        if not os.path.exists(self.scrubber_datadir):
            LOG.warn(_LW("%s directory does not exist.") %
                     self.scrubber_datadir)
            return []

        ret = []
        for root, dirs, files in os.walk(self.scrubber_datadir):
            for image_id in files:
                if not utils.is_uuid_like(image_id):
                    continue
                with lockutils.lock("scrubber-%s" % image_id,
                                    lock_file_prefix='glance-', external=True):
                    file_path = os.path.join(self.scrubber_datadir, image_id)
                    records = self._read_queue_file(file_path)
                    loc_ids, uris, delete_times = records

                    remove_record_idxs = []
                    skipped = False
                    for (record_idx, delete_time) in enumerate(delete_times):
                        if delete_time > time.time():
                            skipped = True
                            continue
                        else:
                            ret.append((image_id,
                                        loc_ids[record_idx],
                                        uris[record_idx]))
                            remove_record_idxs.append(record_idx)

                    if remove:
                        if skipped:
                            # NOTE(zhiyan): remove location records from
                            # the queue file.
                            self._update_queue_file(file_path,
                                                    remove_record_idxs)
                        else:
                            utils.safe_remove(file_path)
        return ret
Example #22
0
def get_client(context, admin=False):
    # NOTE(dprince): In the case where no auth_token is present
    # we allow use of neutron admin tenant credentials if
    # it is an admin context.
    # This is to support some services (metadata API) where
    # an admin context is used without an auth token.
    if admin or (context.is_admin and not context.auth_token):
        with lockutils.lock('neutron_admin_auth_token_lock'):
            orig_token = AdminTokenStore.get().admin_auth_token
        client = _get_client(orig_token, admin=True)
        return ClientWrapper(client)

    # We got a user token that we can use that as-is
    if context.auth_token:
        token = context.auth_token
        return _get_client(token=token)

    # We did not get a user token and we should not be using
    # an admin token so log an error
    raise exceptions.Unauthorized()
Example #23
0
def get_client(context, admin=False):
    # NOTE(dprince): In the case where no auth_token is present
    # we allow use of neutron admin tenant credentials if
    # it is an admin context.
    # This is to support some services (metadata API) where
    # an admin context is used without an auth token.
    if admin or (context.is_admin and not context.auth_token):
        with lockutils.lock('neutron_admin_auth_token_lock'):
            orig_token = AdminTokenStore.get().admin_auth_token
        client = _get_client(orig_token, admin=True)
        return ClientWrapper(client)

    # We got a user token that we can use that as-is
    if context.auth_token:
        token = context.auth_token
        return _get_client(token=token)

    # We did not get a user token and we should not be using
    # an admin token so log an error
    raise exceptions.Unauthorized()
Example #24
0
    def _sync_domain(self, domain, new_domain_flag=False):
        """Sync a single domain's zone file and reload bind config"""

        # NOTE: Different versions of BIND9 behave differently with a trailing
        #       dot, so we're just going to take it off.
        domain_name = domain.origin.to_text().rstrip('.')

        # NOTE: Only one thread should be working with the Zonefile at a given
        #       time. The sleep(1) below introduces a not insignificant risk
        #       of more than 1 thread working with a zonefile at a given time.
        with lockutils.lock('bind9-%s' % domain_name):
            LOG.debug('Synchronising Domain: %s' % domain_name)

            zone_path = cfg.CONF[CFG_GROUP].zone_file_path

            output_path = os.path.join(zone_path,
                                       '%s.zone' % domain_name)

            domain.to_file(output_path, relativize=False)

            rndc_call = self._rndc_base()

            if new_domain_flag:
                rndc_op = [
                    'addzone',
                    '%s { type master; file "%s"; };' % (domain_name,
                                                         output_path),
                ]
                rndc_call.extend(rndc_op)
            else:
                rndc_op = 'reload'
                rndc_call.extend([rndc_op])
                rndc_call.extend([domain_name])

            LOG.debug('Calling RNDC with: %s' % " ".join(rndc_call))
            self._execute_rndc(rndc_call)
Example #25
0
    def fetch_image(self, uuid, dest_path, ctx=None, force_raw=True):
        """Fetch image with given uuid to the destination path.

        Does nothing if destination path exists.
        Only creates a link if master image for this UUID is already in cache.
        Otherwise downloads an image and also stores it in cache.

        :param uuid: image UUID or href to fetch
        :param dest_path: destination file path
        :param ctx: context
        :param force_raw: boolean value, whether to convert the image to raw
                          format
        """
        img_download_lock_name = 'download-image'
        if self.master_dir is None:
            #NOTE(ghe): We don't share images between instances/hosts
            if not CONF.parallel_image_downloads:
                with lockutils.lock(img_download_lock_name, 'ironic-'):
                    _fetch(ctx, uuid, dest_path, self._image_service,
                           force_raw)
            else:
                _fetch(ctx, uuid, dest_path, self._image_service, force_raw)
            return

        #TODO(ghe): have hard links and counts the same behaviour in all fs

        master_file_name = service_utils.parse_image_ref(uuid)[0]
        master_path = os.path.join(self.master_dir, master_file_name)

        if CONF.parallel_image_downloads:
            img_download_lock_name = 'download-image:%s' % master_file_name

        # TODO(dtantsur): lock expiration time
        with lockutils.lock(img_download_lock_name, 'ironic-'):
            if os.path.exists(dest_path):
                LOG.debug("Destination %(dest)s already exists for "
                          "image %(uuid)s" % {
                              'uuid': uuid,
                              'dest': dest_path
                          })
                return

            try:
                # NOTE(dtantsur): ensure we're not in the middle of clean up
                with lockutils.lock('master_image', 'ironic-'):
                    os.link(master_path, dest_path)
            except OSError:
                LOG.info(
                    _LI("Master cache miss for image %(uuid)s, "
                        "starting download"), {'uuid': uuid})
            else:
                LOG.debug("Master cache hit for image %(uuid)s",
                          {'uuid': uuid})
                return

            self._download_image(uuid,
                                 master_path,
                                 dest_path,
                                 ctx=ctx,
                                 force_raw=force_raw)

        # NOTE(dtantsur): we increased cache size - time to clean up
        self.clean_up()
Example #26
0
def _update_token(new_token):
    with lockutils.lock('neutron_admin_auth_token_lock'):
        token_store = AdminTokenStore.get()
        token_store.admin_auth_token = new_token
Example #27
0
    def _sync_domain(self, domain, new_domain_flag=False):
        """Sync a single domain's zone file and reload bind config"""

        # NOTE: Only one thread should be working with the Zonefile at a given
        #       time. The sleep(1) below introduces a not insignificant risk
        #       of more than 1 thread working with a zonefile at a given time.
        with lockutils.lock('bind9-%s' % domain['id']):
            LOG.debug('Synchronising Domain: %s' % domain['id'])

            recordsets = self.central_service.find_recordsets(
                self.admin_context, {'domain_id': domain['id']})

            records = []

            for recordset in recordsets:
                criterion = {
                    'domain_id': domain['id'],
                    'recordset_id': recordset['id']
                }

                raw_records = self.central_service.find_records(
                    self.admin_context, criterion)

                for record in raw_records:
                    records.append({
                        'name': recordset['name'],
                        'type': recordset['type'],
                        'ttl': recordset['ttl'],
                        'data': record['data'],
                    })

            output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),
                                         'bind9')

            output_name = "_".join([domain['name'], domain['id']])
            output_path = os.path.join(output_folder, '%s.zone' % output_name)

            utils.render_template_to_file('bind9-zone.jinja2',
                                          output_path,
                                          domain=domain,
                                          records=records)

            rndc_call = self._rndc_base()

            if new_domain_flag:
                rndc_op = [
                    'addzone',
                    '%s { type master; file "%s"; };' % (domain['name'],
                                                         output_path),
                ]
                rndc_call.extend(rndc_op)
            else:
                rndc_op = 'reload'
                rndc_call.extend([rndc_op])
                rndc_call.extend([domain['name']])

            if not new_domain_flag:
                # NOTE: Bind9 will only ever attempt to re-read a zonefile if
                #       the file's timestamp has changed since the previous
                #       reload. A one second sleep ensures we cross over a
                #       second boundary before allowing the next change.
                time.sleep(1)

            LOG.debug('Calling RNDC with: %s' % " ".join(rndc_call))
            utils.execute(*rndc_call)

            nzf_name = glob.glob('%s/*.nzf' % cfg.CONF[self.name].nzf_path)

            output_file = os.path.join(output_folder, 'zones.config')

            shutil.copyfile(nzf_name[0], output_file)
Example #28
0
def _update_token(new_token):
    with lockutils.lock('neutron_admin_auth_token_lock'):
        token_store = AdminTokenStore.get()
        token_store.admin_auth_token = new_token
Example #29
0
 def f(_id):
     with lockutils.lock('testlock2', 'test-', external=False):
         for x in range(10):
             seen_threads.append(_id)
Example #30
0
 def test_no_slash_in_b64(self):
     # base64(sha1(foobar)) has a slash in it
     with lockutils.lock("foobar"):
         pass
Example #31
0
 def f(_id):
     with lockutils.lock('testlock2', 'test-', external=False):
         for x in range(10):
             seen_threads.append(_id)
Example #32
0
 def test_no_slash_in_b64(self):
     # base64(sha1(foobar)) has a slash in it
     with lockutils.lock("foobar"):
         pass
 def _sync_domain(self, domain_name):
     LOG.debug('Synchronising domain: %s' % domain_name)
     return lockutils.lock('denominator-%s' % domain_name)
Example #34
0
 def __init__(self, name, lock_file_prefix=None):
     self.mgr = lockutils.lock(name, lock_file_prefix, True)