コード例 #1
0
 def update_router(self, context, router_id, router):
     edge_id = edge_utils.get_router_edge_id(context, router_id)
     if not edge_id:
         return super(nsx_v.NsxVPluginV2, self.plugin).update_router(
             context, router_id, router)
     else:
         with lockutils.lock(str(edge_id),
                             lock_file_prefix=NSXV_ROUTER_RECONFIG,
                             external=True):
             gw_info = self.plugin._extract_external_gw(
                 context, router, is_extract=True)
             super(nsx_v.NsxVPluginV2, self.plugin).update_router(
                 context, router_id, router)
         # here is used to handle routes which tenant updates.
         if gw_info != attr.ATTR_NOT_SPECIFIED:
             self._update_router_gw_info(context, router_id, gw_info)
         else:
             with lockutils.lock(str(edge_id),
                                 lock_file_prefix=NSXV_ROUTER_RECONFIG,
                                 external=True):
                 router_db = self.plugin._get_router(context, router_id)
                 nexthop = self.plugin._get_external_attachment_info(
                     context, router_db)[2]
                 self.update_routes(context, router_id, nexthop)
         return self.plugin.get_router(context, router_id)
コード例 #2
0
    def test_contextlock(self):
        lock_dir = tempfile.mkdtemp()
        self.config(lock_path=lock_dir, group='oslo_concurrency')

        try:
            # Note(flaper87): Lock is not external, which means
            # a semaphore will be yielded
            with lockutils.lock("test") as sem:
                if six.PY2:
                    self.assertTrue(isinstance(sem, threading._Semaphore))
                else:
                    self.assertTrue(isinstance(sem, threading.Semaphore))

                # NOTE(flaper87): Lock is external so an InterProcessLock
                # will be yielded.
                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

                with lockutils.lock("test1",
                                    external=True) as lock1:
                    self.assertTrue(isinstance(lock1,
                                               lockutils.InterProcessLock))
        finally:
            if os.path.exists(lock_dir):
                shutil.rmtree(lock_dir, ignore_errors=True)
コード例 #3
0
    def test_contextlock_unlocks(self):
        lock_dir = tempfile.mkdtemp()
        self.config(lock_path=lock_dir, group='oslo_concurrency')

        sem = None

        try:
            with lockutils.lock("test") as sem:
                if six.PY2:
                    self.assertTrue(isinstance(sem, threading._Semaphore))
                else:
                    self.assertTrue(isinstance(sem, threading.Semaphore))

                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

                # NOTE(flaper87): Lock should be free
                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

            # NOTE(flaper87): Lock should be free
            # but semaphore should already exist.
            with lockutils.lock("test") as sem2:
                self.assertEqual(sem, sem2)
        finally:
            if os.path.exists(lock_dir):
                shutil.rmtree(lock_dir, ignore_errors=True)
コード例 #4
0
ファイル: image_cache.py プロジェクト: faizan-barmawer/ironic
    def fetch_image(self, href, dest_path, ctx=None, force_raw=True):
        """Fetch image by given href to the destination path.

        Does nothing if destination path exists.
        Only creates a link if master image for this UUID is already in cache.
        Otherwise downloads an image and also stores it in cache.

        :param href: image UUID or href to fetch
        :param dest_path: destination file path
        :param ctx: context
        :param force_raw: boolean value, whether to convert the image to raw
                          format
        """
        img_download_lock_name = 'download-image'
        if self.master_dir is None:
            # NOTE(ghe): We don't share images between instances/hosts
            if not CONF.parallel_image_downloads:
                with lockutils.lock(img_download_lock_name, 'ironic-'):
                    _fetch(ctx, href, dest_path, self._image_service,
                           force_raw)
            else:
                _fetch(ctx, href, dest_path, self._image_service, force_raw)
            return

        # TODO(ghe): have hard links and counts the same behaviour in all fs

        master_file_name = service_utils.parse_image_ref(href)[0]
        master_path = os.path.join(self.master_dir, master_file_name)

        if CONF.parallel_image_downloads:
            img_download_lock_name = 'download-image:%s' % master_file_name

        # TODO(dtantsur): lock expiration time
        with lockutils.lock(img_download_lock_name, 'ironic-'):
            if os.path.exists(dest_path):
                LOG.debug("Destination %(dest)s already exists for "
                            "image %(uuid)s" %
                          {'uuid': href,
                           'dest': dest_path})
                return

            try:
                # NOTE(dtantsur): ensure we're not in the middle of clean up
                with lockutils.lock('master_image', 'ironic-'):
                    os.link(master_path, dest_path)
            except OSError:
                LOG.info(_LI("Master cache miss for image %(uuid)s, "
                             "starting download"),
                         {'uuid': href})
            else:
                LOG.debug("Master cache hit for image %(uuid)s",
                          {'uuid': href})
                return

            self._download_image(
                href, master_path, dest_path, ctx=ctx, force_raw=force_raw)

        # NOTE(dtantsur): we increased cache size - time to clean up
        self.clean_up()
コード例 #5
0
    def handle_update(self, json_snippet, tmpl_diff, prop_diff):
        if not prop_diff:
            return
        new_members = set(tmpl_diff['Properties'][self.GRID_MEMBERS])
        old_members = set(self.properties.get(self.GRID_MEMBERS))
        to_remove = old_members - new_members
        if self.GRID_MEMBERS in prop_diff:
            for member in to_remove:
                self._delete_anycast_ip_from_member(member)

            if len(prop_diff) > 1:
                # Anycast settings were changed, need to update all members
                to_update = new_members
            else:
                # Anycast settings unchanged, so add it to new members
                to_update = new_members - old_members
        else:
            # Anycast settings were changed, so need to update all members
            to_update = new_members

        # Enable_dns field complicates update because it refers to
        # member:dns additional_ip_list which depends on the
        # additional_ip_list field from member.
        # To update ip for anycast loopback update has to be executed in
        # next order:
        # - delete old ip address from member:dns
        # - update anycast ip
        # - add updated ip address to member:dns

        # if ip changed or dns disabled - delete dns ip from existing members
        if (self.IP in prop_diff or (self.ENABLE_DNS in prop_diff and
                                     not prop_diff[self.ENABLE_DNS])):
            for member in old_members - to_remove:
                self._delete_ip_from_dns(member, self.properties[self.IP])
        # now create/update anycast loopback and dns ip
        for member in to_update:

            with lockutils.lock(member, external=True,
                                lock_file_prefix='infoblox-anycast'):
                self.infoblox.create_anycast_loopback(
                    member,
                    tmpl_diff['Properties'][self.IP],
                    tmpl_diff['Properties'][self.ENABLE_BGP],
                    tmpl_diff['Properties'][self.ENABLE_OSPF],
                    old_ip=self.properties[self.IP])

            if tmpl_diff['Properties'][self.ENABLE_DNS]:
                with lockutils.lock(member,
                                    external=True,
                                    lock_file_prefix='infoblox-dns-ips'):
                    self.infoblox.add_member_dns_additional_ip(
                        member, tmpl_diff['Properties'][self.IP])
コード例 #6
0
ファイル: scrubber.py プロジェクト: sayalilunkad/glance
    def _cleanup(self, pool):
        now = time.time()
        cleanup_file = os.path.join(CONF.scrubber_datadir, ".cleanup")
        if not os.path.exists(cleanup_file):
            self._update_cleanup_file(cleanup_file, now)
            return

        last_cleanup_time = self._read_cleanup_file(cleanup_file)
        cleanup_time = last_cleanup_time + CONF.cleanup_scrubber_time
        if cleanup_time > now:
            return

        LOG.info(_LI("Getting images deleted before %s") %
                 CONF.cleanup_scrubber_time)
        self._update_cleanup_file(cleanup_file, now)

        delete_jobs = self._get_delete_jobs(self.db_queue, False)
        if not delete_jobs:
            return

        for image_id, jobs in six.iteritems(delete_jobs):
            with lockutils.lock("scrubber-%s" % image_id,
                                lock_file_prefix='glance-', external=True):
                if not self.file_queue.has_image(image_id):
                    # NOTE(zhiyan): scrubber should not cleanup this image
                    # since a queue file be created for this 'pending_delete'
                    # image concurrently before the code get lock and
                    # reach here. The checking only be worth if glance-api and
                    # glance-scrubber service be deployed on a same host.
                    self._scrub_image(pool, image_id, jobs)
コード例 #7
0
 def disable_module(self, ctxt, name):
     LOG.info('Received disable command for module %s.', name)
     lock = lockutils.lock('module-state')
     with lock:
         self._module_state[name] = False
         if name in self._pending_reload:
             self._pending_reload.remove(name)
コード例 #8
0
 def _delete_ip_from_dns(self, member_name, ip):
     if self.properties[self.ENABLE_DNS]:
         with lockutils.lock(member_name,
                             external=True,
                             lock_file_prefix='infoblox-dns-ips'):
             self.infoblox.remove_member_dns_additional_ip(member_name,
                                                           ip)
コード例 #9
0
 def _delete_anycast_ip_from_member(self, member_name):
     ip = self.properties[self.IP]
     self._delete_ip_from_dns(member_name, ip)
     with lockutils.lock(member_name,
                         external=True,
                         lock_file_prefix='infoblox-anycast'):
         self.infoblox.delete_anycast_loopback(ip, member_name)
コード例 #10
0
 def handle_delete(self):
     with lockutils.lock(self.properties[self.GRID_MEMBER],
                         external=True,
                         lock_file_prefix='infoblox-bgp-update'):
         self.infoblox.delete_bgp_neighbor(
             self.properties[self.GRID_MEMBER],
             self.properties[self.NEIGHBOR_IP])
コード例 #11
0
    def test_lock_internal_fair(self):
        """Check that we're actually fair."""

        def f(_id):
            with lockutils.lock('testlock', 'test-',
                                external=False, fair=True):
                lock_holder.append(_id)

        lock_holder = []
        threads = []
        # While holding the fair lock, spawn a bunch of threads that all try
        # to acquire the lock.  They will all block.  Then release the lock
        # and see what happens.
        with lockutils.lock('testlock', 'test-', external=False, fair=True):
            for i in range(10):
                thread = threading.Thread(target=f, args=(i,))
                threads.append(thread)
                thread.start()
                # Allow some time for the new thread to get queued onto the
                # list of pending writers before continuing.  This is gross
                # but there's no way around it without using knowledge of
                # fasteners internals.
                time.sleep(0.5)
        # Wait for all threads.
        for thread in threads:
            thread.join()

        self.assertEqual(10, len(lock_holder))
        # Check that the threads each got the lock in fair order.
        for i in range(10):
            self.assertEqual(i, lock_holder[i])
コード例 #12
0
ファイル: fcvscsi.py プロジェクト: arbrandes/nova
    def _attach_volume_to_vio(self, vios_w):
        """Attempts to attach a volume to a given VIO.

        :param vios_w: The Virtual I/O Server wrapper to attach to.
        :return: True if the volume was attached.  False if the volume was
                 not (could be the Virtual I/O Server does not have
                 connectivity to the hdisk).
        """
        status, device_name, udid = self._discover_volume_on_vios(vios_w)

        if hdisk.good_discovery(status, device_name):
            # Found a hdisk on this Virtual I/O Server.  Add the action to
            # map it to the VM when the stg_ftsk is executed.
            with lockutils.lock(self.volume_id):
                self._add_append_mapping(vios_w.uuid, device_name,
                tag=self.volume_id)

            # Save the UDID for the disk in the connection info.  It is
            # used for the detach.
            self._set_udid(udid)
            LOG.debug('Added deferred task to attach device %(device_name)s '
                      'to vios %(vios_name)s.',
                      {'device_name': device_name, 'vios_name': vios_w.name},
                      instance=self.instance)

            # Valid attachment
            return True

        return False
コード例 #13
0
ファイル: vm.py プロジェクト: arbrandes/nova
def reboot(adapter, instance, hard):
    """Reboots a VM.

    :param adapter: A pypowervm.adapter.Adapter.
    :param instance: The nova instance to reboot.
    :param hard: Boolean True if hard reboot, False otherwise.
    :raises: InstanceRebootFailure
    """
    # Synchronize power-on and power-off ops on a given instance
    with lockutils.lock('power_%s' % instance.uuid):
        try:
            entry = get_instance_wrapper(adapter, instance)
            if entry.state != pvm_bp.LPARState.NOT_ACTIVATED:
                if hard:
                    power.PowerOp.stop(
                        entry, opts=popts.PowerOffOpts().vsp_hard().restart())
                else:
                    power.power_off_progressive(entry, restart=True)
            else:
                # pypowervm does NOT throw an exception if "already down".
                # Any other exception from pypowervm is a legitimate failure;
                # let it raise up.
                # If we get here, pypowervm thinks the instance is down.
                power.power_on(entry, None)
        except pvm_exc.Error as e:
            LOG.exception("PowerVM error during reboot.", instance=instance)
            raise exc.InstanceRebootFailure(reason=six.text_type(e))
コード例 #14
0
    def all_from_string(self, rc_str):
        """Given a string representation of a resource class -- e.g. "DISK_GB"
        or "CUSTOM_IRON_SILVER" -- return all the resource class info.

        :param rc_str: The string representation of the resource class for
                       which to look up a resource_class.
        :returns: dict representing the resource class fields, if the
                  resource class was found in the list of standard
                  resource classes or the resource_classes database table.
        :raises: `exception.ResourceClassNotFound` if rc_str cannot be found in
                 either the standard classes or the DB.
        """
        # First check the standard resource classes
        if rc_str in fields.ResourceClass.STANDARD:
            return {'id': fields.ResourceClass.STANDARD.index(rc_str),
                    'name': rc_str,
                    'updated_at': None,
                    'created_at': None}

        with lockutils.lock(_LOCKNAME):
            if rc_str in self.all_cache:
                return self.all_cache[rc_str]
            # Otherwise, check the database table
            _refresh_from_db(self.ctx, self)
            if rc_str in self.all_cache:
                return self.all_cache[rc_str]
            raise exception.ResourceClassNotFound(resource_class=rc_str)
コード例 #15
0
    def id_from_string(self, rc_str):
        """Given a string representation of a resource class -- e.g. "DISK_GB"
        or "IRON_SILVER" -- return the integer code for the resource class. For
        standard resource classes, this integer code will match the list of
        resource classes on the fields.ResourceClass field type. Other custom
        resource classes will cause a DB lookup into the resource_classes
        table, however the results of these DB lookups are cached since the
        lookups are so frequent.

        :param rc_str: The string representation of the resource class to look
                       up a numeric identifier for.
        :returns integer identifier for the resource class, or None, if no such
                 resource class was found in the list of standard resource
                 classes or the resource_classes database table.
        :raises `exception.ResourceClassNotFound` if rc_str cannot be found in
                either the standard classes or the DB.
        """
        # First check the standard resource classes
        if rc_str in fields.ResourceClass.STANDARD:
            return fields.ResourceClass.STANDARD.index(rc_str)

        with lockutils.lock(_LOCKNAME):
            if rc_str in self.id_cache:
                return self.id_cache[rc_str]
            # Otherwise, check the database table
            _refresh_from_db(self.ctx, self)
            if rc_str in self.id_cache:
                return self.id_cache[rc_str]
            raise exception.ResourceClassNotFound(resource_class=rc_str)
コード例 #16
0
 def remove_router_interface(self, context, router_id, interface_info):
     edge_id = edge_utils.get_router_edge_id(context, router_id)
     with lockutils.lock(str(edge_id),
                         lock_file_prefix=NSXV_ROUTER_RECONFIG,
                         external=True):
         info = super(
             nsx_v.NsxVPluginV2, self.plugin).remove_router_interface(
                 context, router_id, interface_info)
         subnet = self.plugin.get_subnet(context, info['subnet_id'])
         network_id = subnet['network_id']
         router_ids = self.edge_manager.get_routers_on_same_edge(
             context, router_id)
         self._update_nat_rules_on_routers(context, router_id, router_ids)
         self._update_subnets_and_dnat_firewall_on_routers(
             context, router_id, router_ids, allow_external=True)
         ports = self.plugin._get_router_interface_ports_by_network(
             context, router_id, network_id)
         if not ports:
             edge_utils.delete_interface(self.nsx_v, context,
                                         router_id, network_id)
             # unbind all services if no interfaces attached to the router
             if not self._get_internal_network_ids_by_router(
                 context, router_id):
                 self._remove_router_services_on_edge(context, router_id)
                 self._unbind_router_on_edge(context, router_id)
         else:
             address_groups = self.plugin._get_address_groups(
                 context, router_id, network_id)
             edge_utils.update_internal_interface(
                 self.nsx_v, context, router_id, network_id, address_groups)
     return info
コード例 #17
0
    def _apply(self):
        lock_name = 'iptables'
        if self.namespace:
            lock_name += '-' + self.namespace

        with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
            return self._apply_synchronized()
コード例 #18
0
        def lock_files(handles_dir):

            with lockutils.lock('external', 'test-', external=True):
                # Open some files we can use for locking
                handles = []
                for n in range(50):
                    path = os.path.join(handles_dir, ('file-%s' % n))
                    handles.append(open(path, 'w'))

                # Loop over all the handles and try locking the file
                # without blocking, keep a count of how many files we
                # were able to lock and then unlock. If the lock fails
                # we get an IOError and bail out with bad exit code
                count = 0
                for handle in handles:
                    try:
                        fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
                        count += 1
                        fcntl.flock(handle, fcntl.LOCK_UN)
                    except IOError:
                        os._exit(2)
                    finally:
                        handle.close()

                # Check if we were able to open all files
                self.assertEqual(50, count)
コード例 #19
0
ファイル: vscsi.py プロジェクト: kairoaraujo/nova-powervm
        def connect_volume_to_vio(vios_w):
            """Attempts to connect a volume to a given VIO.

            :param vios_w: The Virtual I/O Server wrapper to connect to.
            :return: True if the volume was connected.  False if the volume was
                     not (could be the Virtual I/O Server does not have
                     connectivity to the hdisk).
            """
            status, device_name, udid = self._discover_volume_on_vios(vios_w, self.volume_id)

            if hdisk.good_discovery(status, device_name):
                # Found a hdisk on this Virtual I/O Server.  Add the action to
                # map it to the VM when the stg_ftsk is executed.
                with lockutils.lock(hash(self)):
                    self._add_append_mapping(vios_w.uuid, device_name)

                # Save the UDID for the disk in the connection info.  It is
                # used for the detach.
                self._set_udid(udid)
                LOG.debug("Device attached: %s", device_name)

                # Valid attachment
                return True

            return False
コード例 #20
0
    def string_from_id(self, rc_id):
        """The reverse of the id_from_string() method. Given a supplied numeric
        identifier for a resource class, we look up the corresponding string
        representation, either in the list of standard resource classes or via
        a DB lookup. The results of these DB lookups are cached since the
        lookups are so frequent.

        :param rc_id: The numeric representation of the resource class to look
                      up a string identifier for.
        :returns: string identifier for the resource class, or None, if no such
                 resource class was found in the list of standard resource
                 classes or the resource_classes database table.
        :raises `exception.ResourceClassNotFound` if rc_id cannot be found in
                either the standard classes or the DB.
        """
        # First check the fields.ResourceClass.STANDARD values
        try:
            return fields.ResourceClass.STANDARD[rc_id]
        except IndexError:
            pass

        with lockutils.lock(_LOCKNAME):
            if rc_id in self.str_cache:
                return self.str_cache[rc_id]

            # Otherwise, check the database table
            _refresh_from_db(self.ctx, self)
            if rc_id in self.str_cache:
                return self.str_cache[rc_id]
            raise exception.ResourceClassNotFound(resource_class=rc_id)
コード例 #21
0
ファイル: service.py プロジェクト: rtapadar/designate
    def periodic_recovery(self):
        """
        :return: None
        """
        # TODO(kiall): Replace this inter-process-lock with a distributed
        #              lock, likely using the tooz library - see bug 1445127.
        with lockutils.lock('periodic_recovery', external=True, delay=30):
            context = DesignateContext.get_admin_context(all_tenants=True)

            LOG.debug("Starting Periodic Recovery")

            try:
                # Handle Deletion Failures
                domains = self._get_failed_domains(context, DELETE_ACTION)

                for domain in domains:
                    self.delete_domain(context, domain)

                # Handle Creation Failures
                domains = self._get_failed_domains(context, CREATE_ACTION)

                for domain in domains:
                    self.create_domain(context, domain)

                # Handle Update Failures
                domains = self._get_failed_domains(context, UPDATE_ACTION)

                for domain in domains:
                    self.update_domain(context, domain)

            except Exception:
                LOG.exception(_LE('An unhandled exception in periodic '
                                  'recovery occurred'))
コード例 #22
0
ファイル: base_api.py プロジェクト: mahak/nova
 def get_instance_nw_info(self, context, instance, **kwargs):
     """Returns all network info related to an instance."""
     with lockutils.lock('refresh_cache-%s' % instance.uuid):
         result = self._get_instance_nw_info(context, instance, **kwargs)
         update_instance_cache_with_nw_info(self, context, instance,
                                            nw_info=result)
     return result
コード例 #23
0
ファイル: aim_mapping.py プロジェクト: ashutosh-mishra/my-gbp
    def _use_implicit_subnet(self, context, force_add=False,
                             clean_session=False):
        """Implicit subnet for AIM.

        The first PTG in a L2P will allocate a new subnet from the L3P.
        Any subsequent PTG in the same L2P will use the same subnet.
        Additional subnets will be allocated as and when the currently used
        subnet runs out of IP addresses.
        """
        l2p_id = context.current['l2_policy_id']
        with lockutils.lock(l2p_id, external=True):
            subs = self._get_l2p_subnets(context, l2p_id)
            subs = set([x['id'] for x in subs])
            added = []
            if not subs or force_add:
                l2p = context._plugin.get_l2_policy(
                    context._plugin_context, l2p_id)
                name = APIC_OWNED + l2p['name']
                added = super(
                    AIMMappingDriver, self)._use_implicit_subnet(
                        context, subnet_specifics={'name': name},
                        is_proxy=False, clean_session=clean_session)
            context.add_subnets(subs - set(context.current['subnets']))
            for subnet in added:
                self._sync_ptg_subnets(context, l2p)
コード例 #24
0
ファイル: impl_bind9.py プロジェクト: bias/designate
    def _sync_domain(self, domain, new_domain_flag=False):
        """Sync a single domain's zone file and reload bind config"""

        # NOTE: Different versions of BIND9 behave differently with a trailing
        #       dot, so we're just going to take it off.
        domain_name = domain.origin.to_text().rstrip(".")

        # NOTE: Only one thread should be working with the Zonefile at a given
        #       time. The sleep(1) below introduces a not insignificant risk
        #       of more than 1 thread working with a zonefile at a given time.
        with lockutils.lock("bind9-%s" % domain_name):
            LOG.debug("Synchronising Domain: %s" % domain_name)

            zone_path = cfg.CONF[CFG_GROUP].zone_file_path

            output_path = os.path.join(zone_path, "%s.zone" % domain_name)

            domain.to_file(output_path, relativize=False)

            rndc_call = self._rndc_base()

            if new_domain_flag:
                rndc_op = ["addzone", '%s { type master; file "%s"; };' % (domain_name, output_path)]
                rndc_call.extend(rndc_op)
            else:
                rndc_op = "reload"
                rndc_call.extend([rndc_op])
                rndc_call.extend([domain_name])

            LOG.debug("Calling RNDC with: %s" % " ".join(rndc_call))
            self._execute_rndc(rndc_call)
コード例 #25
0
ファイル: ovsdb_handler.py プロジェクト: cubeek/neutron
 def inner(*args, **kwargs):
     try:
         bridge_name = kwargs[required_parameter]
     except KeyError:
         bridge_name = args[br_arg_index]
     with lockutils.lock(bridge_name):
         return f(*args, **kwargs)
コード例 #26
0
    def _apply(self):
        lock_name = 'iptables'
        if self.namespace:
            lock_name += '-' + self.namespace

        with lockutils.lock(lock_name, common.SYNCHRONIZED_PREFIX, True, lock_path='/var/run'):
            return self._apply_synchronized()
コード例 #27
0
 def _bind_router_on_available_edge(self, context, router_id):
     conflict_network_ids, conflict_router_ids, intf_num = (
         self._get_conflict_network_and_router_ids_by_intf(context,
                                                           router_id))
     conflict_network_ids_by_ext_net = (
         self._get_conflict_network_ids_by_ext_net(context, router_id))
     conflict_network_ids.extend(conflict_network_ids_by_ext_net)
     conflict_router_ids_by_ext_net = (
         self._get_conflict_router_ids_by_ext_net(context,
                                                  conflict_network_ids))
     conflict_router_ids.extend(conflict_router_ids_by_ext_net)
     optional_router_ids, conflict_router_ids_by_gw = (
         self._get_optional_and_conflict_router_ids_by_gw(
             context, router_id))
     conflict_router_ids.extend(conflict_router_ids_by_gw)
     conflict_router_ids = list(set(conflict_router_ids))
     new = self.edge_manager.bind_router_on_available_edge(
         context, router_id, optional_router_ids,
         conflict_router_ids, conflict_network_ids, intf_num)
     # configure metadata service on the router.
     metadata_proxy_handler = self.plugin.metadata_proxy_handler
     if metadata_proxy_handler and new:
         metadata_proxy_handler.configure_router_edge(router_id)
     edge_id = edge_utils.get_router_edge_id(context, router_id)
     with lockutils.lock(str(edge_id),
                         lock_file_prefix=NSXV_ROUTER_RECONFIG,
                         external=True):
         # add all internal interfaces of the router on edge
         intf_net_ids = self._get_internal_network_ids_by_router(
             context, router_id)
         for network_id in intf_net_ids:
             address_groups = self.plugin._get_address_groups(
                 context, router_id, network_id)
             edge_utils.update_internal_interface(
                 self.nsx_v, context, router_id, network_id, address_groups)
コード例 #28
0
 def test_non_destructive(self):
     lock_file = os.path.join(self.lock_dir, 'not-destroyed')
     with open(lock_file, 'w') as f:
         f.write('test')
     with lockutils.lock('not-destroyed', external=True,
                         lock_path=self.lock_dir):
         with open(lock_file) as f:
             self.assertEqual(f.read(), 'test')
コード例 #29
0
ファイル: manager.py プロジェクト: yosshy/nova
 def _get_admin_context_with_token(self):
     with lockutils.lock('saver_admin_auth_token_lock'):
         token = self.auth_plugin.get_token(self.session)
     ctxt = context.RequestContext(user_name=CONF.saver.username,
                                   project_name=CONF.saver.project_name,
                                   auth_token=token)
     ctxt = ctxt.elevated()
     return ctxt
コード例 #30
0
 def handle_create(self):
     ip = self.properties[self.IP]
     for member_name in self.properties[self.GRID_MEMBERS]:
         with lockutils.lock(member_name,
                             external=True,
                             lock_file_prefix='infoblox-anycast'):
             self.infoblox.create_anycast_loopback(
                 member_name,
                 ip,
                 self.properties[self.ENABLE_BGP],
                 self.properties[self.ENABLE_OSPF])
         if self.properties[self.ENABLE_DNS]:
             with lockutils.lock(member_name,
                                 external=True,
                                 lock_file_prefix='infoblox-dns-ips'):
                 self.infoblox.add_member_dns_additional_ip(member_name,
                                                            ip)
コード例 #31
0
    def test_contextlock_unlocks(self):
        self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')

        with lockutils.lock("test") as sem:
            if six.PY2:
                self.assertIsInstance(sem, threading._Semaphore)
            else:
                self.assertIsInstance(sem, threading.Semaphore)

            with lockutils.lock("test2", external=True) as lock:
                self.assertTrue(lock.exists())

            # NOTE(flaper87): Lock should be free
            with lockutils.lock("test2", external=True) as lock:
                self.assertTrue(lock.exists())

        # NOTE(flaper87): Lock should be free
        # but semaphore should already exist.
        with lockutils.lock("test") as sem2:
            self.assertEqual(sem, sem2)
コード例 #32
0
ファイル: memory.py プロジェクト: paulphoenix01/neutron2
    def _incr_append(self, key, other):
        with lockutils.lock(key):
            timeout, value = self._get_unlocked(key)

            if value is None:
                return None

            ttl = timeutils.utcnow_ts() - timeout
            new_value = value + other
            self._set_unlocked(key, new_value, ttl)
            return new_value
コード例 #33
0
    def setup_image(self, context, instance, image_meta):
        """Download an image from glance and upload it to LXD

        :param context: context object
        :param instance: The nova instance
        :param image_meta: Image dict returned by nova.image.glance
        """
        LOG.debug('setup_image called for instance', instance=instance)

        self.container_image = \
            self.container_dir.get_container_rootfs_image(image_meta)
        self.container_manifest = \
            self.container_dir.get_container_manifest_image(image_meta)

        with lockutils.lock(self.lock_path,
                            lock_file_prefix=('lxd-image-%s' %
                                              instance.image_ref),
                            external=True):

            if self.client.image_defined(instance):
                return

            base_dir = self.container_dir.get_base_dir()
            if not os.path.exists(base_dir):
                fileutils.ensure_tree(base_dir)

            try:
                # Inspect image for the correct format
                self._verify_image(context, instance)

                # Fetch the image from glance
                self._fetch_image(context, instance)

                # Generate the LXD manifest for the image
                self._get_lxd_manifest(instance, image_meta)

                # Upload the image to the local LXD image store
                self._image_upload(instance)

                # Setup the LXD alias for the image
                self._setup_alias(instance)

                # Remove image and manifest when done.
                self._cleanup_image(instance)

            except Exception as ex:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Failed to upload %(image)s to LXD: '
                                  '%(reason)s'), {
                                      'image': instance.image_ref,
                                      'reason': ex
                                  },
                              instance=instance)
                    self._cleanup_image(instance)
コード例 #34
0
ファイル: dvr_fip_ns.py プロジェクト: mmidolesov2/neutron
 def _fip_port_lock(self, interface_name):
     # Use a namespace and port-specific lock semaphore to allow for
     # concurrency
     lock_name = 'port-lock-' + self.name + '-' + interface_name
     with lockutils.lock(lock_name, runtime.SYNCHRONIZED_PREFIX):
         try:
             yield
         except Exception:
             with excutils.save_and_reraise_exception():
                 LOG.error('DVR: FIP namespace config failure '
                           'for interface %s', interface_name)
コード例 #35
0
ファイル: iptables_manager.py プロジェクト: naanal/reference
    def _apply(self):
        lock_name = 'iptables'
        if self.namespace:
            lock_name += '-' + self.namespace

        try:
            with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
                LOG.debug('Got semaphore / lock "%s"', lock_name)
                return self._apply_synchronized()
        finally:
            LOG.debug('Semaphore / lock released "%s"', lock_name)
コード例 #36
0
ファイル: server.py プロジェクト: git-forked-repos/coriolis
    def _update_replica_volumes_info(self, ctxt, migration_id, instance,
                                     updated_task_info):
        migration = db_api.get_migration(ctxt, migration_id)
        replica_id = migration.replica_id

        with lockutils.lock(replica_id):
            LOG.debug(
                "Updating volume_info in replica due to snapshot "
                "restore during migration. replica id: %s", replica_id)
            db_api.set_transfer_action_info(ctxt, replica_id, instance,
                                            updated_task_info)
コード例 #37
0
ファイル: net.py プロジェクト: bradh/shakenfist
    def deploy_nat(self):
        if not self.provide_nat:
            return

        subst = self.subst_dict()
        floatnet = from_db('floating')
        if not self.floating_gateway:
            self.floating_gateway = floatnet.ipmanager.get_random_free_address(
            )
            self.persist_floating_gateway()
            floatnet.persist_ipmanager()

        subst['floating_router'] = floatnet.ipmanager.get_address_at_index(1)
        subst['floating_gateway'] = self.floating_gateway
        subst['floating_netmask'] = floatnet.netmask

        with lockutils.lock('sf_net_%s' % self.uuid,
                            external=True,
                            lock_path='/tmp/'):
            if not subst['floating_gateway'] in list(
                    util.get_interface_addresses(
                        subst['namespace'], subst['physical_veth_inner'])):
                with util.RecordedOperation('enable virtual routing',
                                            self) as _:
                    processutils.execute(
                        '%(in_namespace)s ip addr add %(floating_gateway)s/%(floating_netmask)s '
                        'dev %(physical_veth_inner)s' % subst,
                        shell=True)
                    processutils.execute(
                        '%(in_namespace)s ip link set %(physical_veth_inner)s up'
                        % subst,
                        shell=True)
                    processutils.execute(
                        '%(in_namespace)s route add default gw %(floating_router)s'
                        % subst,
                        shell=True)

            if not util.nat_rules_for_ipblock(self.ipmanager.network_address):
                with util.RecordedOperation('enable nat', self) as _:
                    processutils.execute(
                        'echo 1 > /proc/sys/net/ipv4/ip_forward', shell=True)
                    processutils.execute(
                        '%(in_namespace)s iptables -A FORWARD -o %(physical_veth_inner)s '
                        '-i %(vx_veth_inner)s -j ACCEPT' % subst,
                        shell=True)
                    processutils.execute(
                        '%(in_namespace)s iptables -A FORWARD -i %(physical_veth_inner)s '
                        '-o %(vx_veth_inner)s -j ACCEPT' % subst,
                        shell=True)
                    processutils.execute(
                        '%(in_namespace)s iptables -t nat -A POSTROUTING -s %(ipblock)s/%(netmask)s '
                        '-o %(physical_veth_inner)s -j MASQUERADE' % subst,
                        shell=True)
コード例 #38
0
ファイル: base_api.py プロジェクト: wentao1101/nova
 def get_instance_nw_info(self, context, instance, **kwargs):
     """Returns all network info related to an instance."""
     with lockutils.lock('refresh_cache-%s' % instance.uuid):
         result = self._get_instance_nw_info(context, instance, **kwargs)
         # NOTE(comstud): Don't update API cell with new info_cache every
         # time we pull network info for an instance.  The periodic healing
         # of info_cache causes too many cells messages.  Healing the API
         # will happen separately.
         update_instance_cache_with_nw_info(self, context, instance,
                                            nw_info=result,
                                            update_cells=False)
     return result
コード例 #39
0
    def snapshot(self, context, instance, image_id, update_task_state):
        """Create a LXD snapshot  of the instance

           Steps involved in creating an LXD Snapshot:

           1. Ensure the container exists
           2. Stop the LXD container: LXD requires a container
              to be stopped in or
           3. Publish the container: Run the API equivalent to
              'lxd publish container --alias <image_name>' to create
              a snapshot and upload it to the local LXD image store.
           4. Create an alias for the image: Create an alias so that
              nova-lxd can re-use the image that was created.
           5. Upload the image to glance so that it can bed on other
              compute hosts.

          :param context: nova security context
          :param instance: nova instance object
          :param image_id: glance image id
        """
        LOG.debug('snapshot called for instance', instance=instance)

        try:
            if not self.session.container_defined(instance.name, instance):
                raise exception.InstanceNotFound(instance_id=instance.name)

            with lockutils.lock(self.lock_path,
                                lock_file_prefix=('lxd-snapshot-%s' %
                                                  instance.name),
                                external=True):

                update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)

                # We have to stop the container before we can publish the
                # image to the local store
                self.session.container_stop(instance.name, instance)
                fingerprint = self._save_lxd_image(instance, image_id)
                self.session.container_start(instance.name, instance)

                update_task_state(
                    task_state=task_states.IMAGE_UPLOADING,
                    expected_state=task_states.IMAGE_PENDING_UPLOAD)  # noqa
                self._save_glance_image(context, instance, image_id,
                                        fingerprint)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to create snapshot for %(instance)s: '
                              '%(ex)s'), {
                                  'instance': instance.name,
                                  'ex': ex
                              },
                          instance=instance)
コード例 #40
0
    def publish_samples(self, samples):
        """Publish the samples to csv formatted output

        :param samples: Samples from pipeline after transformation
        """
        with lockutils.lock(self.conf.host,
                            'csv-publish-samples-',
                            external=True,
                            lock_path='/tmp/'):
            if self.is_enabled:
                if self.publisher_logger:
                    for sample in samples:
                        self.publisher_logger.info(self.format_sample(sample))
コード例 #41
0
ファイル: imagecache.py プロジェクト: 2Exception/patron
    def _age_cached_images(self, context, datastore, dc_info, ds_path):
        """Ages cached images."""
        age_seconds = CONF.remove_unused_original_minimum_age_seconds
        unused_images = self.originals - self.used_images
        ds_browser = self._get_ds_browser(datastore.ref)
        for image in unused_images:
            path = self.timestamp_folder_get(ds_path, image)
            # Lock to ensure that the spawn will not try and access a image
            # that is currently being deleted on the datastore.
            with lockutils.lock(str(path),
                                lock_file_prefix='patron-vmware-ts',
                                external=True):
                ts = self._get_timestamp(ds_browser, path)
                if not ts:
                    ts_path = path.join(self._get_timestamp_filename())
                    try:
                        ds_util.mkdir(self._session, ts_path, dc_info.ref)
                    except vexc.FileAlreadyExistsException:
                        LOG.debug("Timestamp already exists.")
                    LOG.info(
                        _LI("Image %s is no longer used by this node. "
                            "Pending deletion!"), image)
                else:
                    dt = self._get_datetime_from_filename(str(ts))
                    if timeutils.is_older_than(dt, age_seconds):
                        LOG.info(
                            _LI("Image %s is no longer used. "
                                "Deleting!"), path)
                        # Image has aged - delete the image ID folder
                        self._folder_delete(path, dc_info.ref)

        # If the image is used and the timestamp file exists then we delete
        # the timestamp.
        for image in self.used_images:
            path = self.timestamp_folder_get(ds_path, image)
            with lockutils.lock(str(path),
                                lock_file_prefix='patron-vmware-ts',
                                external=True):
                self.timestamp_cleanup(dc_info.ref, ds_browser, path)
コード例 #42
0
    def get_nodes_subnets(self, raise_on_empty=False):
        with lockutils.lock('kuryr-machine-add'):
            # We add any hardcoded ones from config anyway.
            result = self.subnets
            if CONF.pod_vif_nested.worker_nodes_subnets:
                result = result.union(
                    set(CONF.pod_vif_nested.worker_nodes_subnets))
            if not result and raise_on_empty:
                raise exceptions.ResourceNotReady(
                    'OpenShift Machines does not exist or are not yet '
                    'handled. Cannot determine worker nodes subnets.')

            return list(result)
コード例 #43
0
    def delete(self, params):
        kp_name = self._get_obj_name(params)
        try:
            reg_ci = self.registry[kp_name]['containerid']
            LOG.debug('Read containerid = %s for KuryrPort %s', reg_ci,
                      kp_name)
            if reg_ci and reg_ci != params.CNI_CONTAINERID:
                # NOTE(dulek): This is a DEL request for some older (probably
                #              failed) ADD call. We should ignore it or we'll
                #              unplug a running pod.
                LOG.warning(
                    'Received DEL request for unknown ADD call for '
                    'Kuryrport %s (CNI_CONTAINERID=%s). Ignoring.', kp_name,
                    params.CNI_CONTAINERID)
                return
        except KeyError:
            pass

        # Passing arbitrary 5 seconds as timeout, as it does not make any sense
        # to wait on CNI DEL. If kuryrport got deleted from API - VIF info is
        # gone. If kuryrport got the vif info removed - it is now gone too.
        # The number's not 0, because we need to anticipate for restarts and
        # delay before registry is populated by watcher.
        try:
            self._do_work(params, b_base.disconnect, 5)
        except exceptions.ResourceNotReady:
            # So the VIF info seems to be lost at this point, we don't even
            # know what binding driver was used to plug it. Let's at least
            # try to remove the interface we created from the netns to prevent
            # possible VLAN ID conflicts.
            b_base.cleanup(params.CNI_IFNAME, params.CNI_NETNS)
            raise

        # NOTE(ndesh): We need to lock here to avoid race condition
        #              with the deletion code in the watcher to ensure that
        #              we delete the registry entry exactly once
        try:
            with lockutils.lock(kp_name, external=True):
                if self.registry[kp_name]['del_received']:
                    del self.registry[kp_name]
                else:
                    kp_dict = self.registry[kp_name]
                    kp_dict['vif_unplugged'] = True
                    self.registry[kp_name] = kp_dict
        except KeyError:
            # This means the kuryrport was removed before vif was unplugged.
            # This shouldn't happen, but we can't do anything about it now
            LOG.debug(
                'KuryrPort %s not found registry while handling DEL '
                'request. Ignoring.', kp_name)
            pass
コード例 #44
0
ファイル: base_api.py プロジェクト: ychen2u/stx-nova
    def get_instance_nw_info(self, context, instance, **kwargs):
        """Returns all network info related to an instance."""

        # WRS: this is a terrible hack to work around the fact that tox
        # installs oslo_concurrency via pip from vanilla servers.
        try:
            cachelock = lockutils.lock('refresh_cache-%s' % instance.uuid,
                                       fair=True)
        except TypeError:
            cachelock = lockutils.lock('refresh_cache-%s' % instance.uuid)
        with cachelock:
            result = self._get_instance_nw_info(context, instance, **kwargs)
            # NOTE(comstud): Don't update API cell with new info_cache every
            # time we pull network info for an instance.  The periodic healing
            # of info_cache causes too many cells messages.  Healing the API
            # will happen separately.
            update_cells = kwargs.get('update_cells', False)
            update_instance_cache_with_nw_info(self,
                                               context,
                                               instance,
                                               nw_info=result,
                                               update_cells=update_cells)
        return result
コード例 #45
0
 def routers_updated(self, context, routers):
     """Deal with routers modification and creation RPC message."""
     LOG.info(_(' zenic_agent Got routers updated notification :%s'),
              routers)
     if routers:
         # This is needed for backward compatibility
         if isinstance(routers[0], dict):
             routers = [router['id'] for router in routers]
         try:
             with lockutils.lock("zenic_agent_update_router"):
                 LOG.info(_('routers_updated Got update mutex'))
                 self.updated_routers.update(routers)
         except Exception as e:
             LOG.debug("lockutils, except:%s", str(e))
コード例 #46
0
    def get_neutron_instance_info_for_instance(self, instance):

        try:
            ports = self.neutron.list_ports(self.context, device_id=instance.uuid)["ports"]
            networks = [self.client.show_network(network_uuid).get('network') for network_uuid in set([port["network_id"] for port in ports])]
            port_ids = [port["id"] for port in ports]
            with lockutils.lock('refresh_cache-%s' % instance.uuid):
                network_info = NetworkInfo(self.neutron._get_instance_nw_info(self.context, instance, port_ids=port_ids, networks=networks))
        except exception.InstanceNotFound:
            log.debug("- instance %s could not be found on neutron side - ignoring this instance for now", instance.uuid)
            # return None for network_info, so that we can skip this instance in the compare function
            return None

        return network_info
コード例 #47
0
    def set_task_error(self, ctxt, task_id, exception_details):
        LOG.error("Task error: %(task_id)s - %(ex)s", {
            "task_id": task_id,
            "ex": exception_details
        })

        db_api.set_task_status(ctxt, task_id, constants.TASK_STATUS_ERROR,
                               exception_details)

        task = db_api.get_task(ctxt, task_id)
        execution = db_api.get_tasks_execution(ctxt, task.execution_id)

        with lockutils.lock(execution.action_id):
            self._cancel_tasks_execution(ctxt, execution)
コード例 #48
0
    def enlist_image(self, image_id, datastore, dc_ref):
        ds_browser = self._get_ds_browser(datastore.ref)
        cache_root_folder = datastore.build_path(self._base_folder)

        # Check if the timestamp file exists - if so then delete it. This
        # will ensure that the aging will not delete a cache image if it
        # is going to be used now.
        path = self.timestamp_folder_get(cache_root_folder, image_id)

        # Lock to ensure that the spawn will not try and access a image
        # that is currently being deleted on the datastore.
        with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
                            external=True):
            self.timestamp_cleanup(dc_ref, ds_browser, path)
コード例 #49
0
 def _update_metrics(self, command, error, duration):
     """Add a new metric value to the shared metrics dict"""
     params = {}
     try:
         params = self._prepare_request()
     except Exception:
         LOG.exception('Exception when reading CNI params.')
         return
     namespace = params.args.K8S_POD_NAMESPACE
     name = params.args.K8S_POD_NAME
     name = f'export-{namespace}/{name}'
     labels = {'command': command, 'error': error}
     with lockutils.lock(name):
         self.metrics[name] = {'labels': labels, 'duration': duration}
コード例 #50
0
    def test_contextlock(self):
        lock_dir = tempfile.mkdtemp()
        self.config(lock_path=lock_dir, group='oslo_concurrency')

        try:
            # Note(flaper87): Lock is not external, which means
            # a semaphore will be yielded
            with lockutils.lock("test") as sem:
                if six.PY2:
                    self.assertIsInstance(sem, threading._Semaphore)
                else:
                    self.assertIsInstance(sem, threading.Semaphore)

                # NOTE(flaper87): Lock is external so an InterProcessLock
                # will be yielded.
                with lockutils.lock("test2", external=True) as lock:
                    self.assertTrue(lock.exists())

                with lockutils.lock("test1", external=True) as lock1:
                    self.assertIsInstance(lock1, lockutils.InterProcessLock)
        finally:
            if os.path.exists(lock_dir):
                shutil.rmtree(lock_dir, ignore_errors=True)
コード例 #51
0
ファイル: iscsi.py プロジェクト: esberglu/nova-powervm
    def _connect_volume_to_vio(self, vios_w, slot_mgr):
        """Attempts to connect a volume to a given VIO.

        :param vios_w: The Virtual I/O Server wrapper to connect to.
        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM

        :return: True if the volume was connected.  False if the volume was
                 not (could be the Virtual I/O Server does not have
                 connectivity to the hdisk).
        """
        transport_type = self.connection_info["driver_volume_type"]
        host_ip = self.connection_info["data"]["target_portal"]
        iqn = self.connection_info["data"]["target_iqn"]
        password = self.connection_info["data"]["auth_password"]
        user = self.connection_info["data"]["auth_username"]
        target_name = "ISCSI-" + iqn.split(":")[1]
        device_name, udid = hdisk.discover_iscsi(self.adapter,
                                                 host_ip,
                                                 user,
                                                 password,
                                                 iqn,
                                                 vios_w.uuid,
                                                 transport_type=transport_type)
        slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, device_name)
        if device_name is not None and udid is not None:
            device_name = '/dev/' + device_name
            # Found a hdisk on this Virtual I/O Server.  Add the action to
            # map it to the VM when the stg_ftsk is executed.
            with lockutils.lock(hash(self)):
                self._add_append_mapping(vios_w.uuid,
                                         device_name,
                                         lpar_slot_num=slot,
                                         lua=lua,
                                         target_name=target_name,
                                         udid=udid)

            # Save the devname for the disk in the connection info.  It is
            # used for the detach.
            self._set_devname(device_name)
            self._set_udid(udid)

            LOG.debug('Device attached: %s',
                      device_name,
                      instance=self.instance)

            # Valid attachment
            return True

        return False
コード例 #52
0
def _sync_glance_image_to_lxd(client, context, image_ref):
    """Sync an image from glance to LXD image store.

    The image from glance can't go directly into the LXD image store,
    as LXD needs some extra metadata connected to it.

    The image is stored in the LXD image store with an alias to
    the image_ref. This way, it will only copy over once.
    """
    lock_path = os.path.join(CONF.instances_path, 'locks')
    with lockutils.lock(
            lock_path, external=True,
            lock_file_prefix='lxd-image-{}'.format(image_ref)):

        try:
            image_file = tempfile.mkstemp()[1]
            manifest_file = tempfile.mkstemp()[1]

            image = IMAGE_API.get(context, image_ref)
            if image.get('disk_format') not in ACCEPTABLE_IMAGE_FORMATS:
                raise exception.ImageUnacceptable(
                    image_id=image_ref, reason=_('Bad image format'))
            IMAGE_API.download(context, image_ref, dest_path=image_file)

            metadata = {
                'architecture': image.get(
                    'hw_architecture', obj_fields.Architecture.from_host()),
                'creation_date': int(os.stat(image_file).st_ctime)}
            metadata_yaml = json.dumps(
                metadata, sort_keys=True, indent=4,
                separators=(',', ': '),
                ensure_ascii=False).encode('utf-8') + b"\n"

            tarball = tarfile.open(manifest_file, "w:gz")
            tarinfo = tarfile.TarInfo(name='metadata.yaml')
            tarinfo.size = len(metadata_yaml)
            tarball.addfile(tarinfo, io.BytesIO(metadata_yaml))
            tarball.close()

            with open(manifest_file, 'rb') as manifest:
                with open(image_file, 'rb') as image:
                    image = client.images.create(
                        image.read(), metadata=manifest.read(),
                        wait=True)
            image.add_alias(image_ref, '')

        finally:
            os.unlink(image_file)
            os.unlink(manifest_file)
コード例 #53
0
 def reload_extensions(self):
     lock = lockutils.lock('rating-modules')
     with lock:
         ck_utils.refresh_stevedore(PROCESSORS_NAMESPACE)
         # FIXME(sheeprine): Implement RPC messages to trigger reload on
         # processors
         self.extensions = extension.ExtensionManager(
             PROCESSORS_NAMESPACE,
             # FIXME(sheeprine): don't want to load it here as we just need
             # the controller
             invoke_on_load=True)
         if not self._first_call:
             self.notify_reload()
         else:
             self._first_call = False
コード例 #54
0
 def __call__(self, event, *args, **kwargs):
     group = self._group_by(event)
     with lockutils.lock(group):
         try:
             queue = self._queues[group]
             # NOTE(dulek): We don't want to risk injecting an outdated
             #              state if events for that resource are in queue.
             if kwargs.get('injected', False):
                 return
         except KeyError:
             queue = py_queue.Queue(self._queue_depth)
             self._queues[group] = queue
             thread = self._thread_group.add_thread(self._run, group, queue)
             thread.link(self._done, group)
     queue.put((event, args, kwargs))
コード例 #55
0
def get_client(context, admin=False):
    if admin or (context.is_admin and not context.auth_token):
        with lockutils.lock('neutron_admin_auth_token_lock'):
            orig_token = AdminTokenStore.get().admin_auth_token
        client = _get_client(orig_token, admin=True)
        return ClientWrapper(client)

    # We got a user token that we can use as-is
    if context.auth_token:
        token = context.auth_token
        return _get_client(token=token)

    # We did not get a user token and we should not be using
    # an admin token so log an error
    raise neutron_client_exc.Unauthorized()
コード例 #56
0
    def instance(cls, l3_agent):
        """Creates instance (singleton) of service.

        Do not directly call this for the base class. Instead, it should be
        called by a child class, that represents a specific service type.

        This ensures that only one instance is created for all agents of a
        specific service type.
        """
        if not cls._instance:
            with lockutils.lock('instance'):
                if not cls._instance:
                    cls._instance = cls(l3_agent)

        return cls._instance
コード例 #57
0
ファイル: net.py プロジェクト: bradh/shakenfist
 def remove_dhcp(self):
     if config.parsed.get('NODE_IP') == config.parsed.get(
             'NETWORK_NODE_IP'):
         subst = self.subst_dict()
         with util.RecordedOperation('remove dhcp', self) as _:
             with lockutils.lock('sf_net_%s' % self.uuid,
                                 external=True,
                                 lock_path='/tmp/'):
                 d = dhcp.DHCP(self.uuid, subst['vx_veth_inner'])
                 d.remove_dhcpd()
     else:
         requests.request('put', ('http://%s:%d/remove_dhcp' %
                                  (config.parsed.get('NETWORK_NODE_IP'),
                                   config.parsed.get('API_PORT'))),
                          data=json.dumps({'uuid': self.uuid}))
コード例 #58
0
    def _connect_volume_to_vio(self, vios_w, slot_mgr):
        """Attempts to connect a volume to a given VIO.

        :param vios_w: The Virtual I/O Server wrapper to connect to.
        :param slot_mgr: A NovaSlotManager.  Used to delete the client slots
                         used when a volume is detached from the VM

        :return: True if the volume was connected.  False if the volume was
                 not (could be the Virtual I/O Server does not have
                 connectivity to the hdisk).
        """
        status, device_name, udid = self._discover_volume_on_vios(
            vios_w, self.volume_id)

        # Get the slot and LUA to assign.
        slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid)

        if slot_mgr.is_rebuild and not slot:
            LOG.debug(
                'Detected a device with UDID %(udid)s on VIOS '
                '%(vios)s on the rebuild that did not exist on the '
                'source. Ignoring.', {
                    'udid': udid,
                    'vios': vios_w.uuid
                },
                instance=self.instance)
            return False

        if hdisk.good_discovery(status, device_name):
            # Found a hdisk on this Virtual I/O Server.  Add the action to
            # map it to the VM when the stg_ftsk is executed.
            with lockutils.lock(hash(self)):
                self._add_append_mapping(vios_w.uuid,
                                         device_name,
                                         lpar_slot_num=slot,
                                         lua=lua)

            # Save the UDID for the disk in the connection info.  It is
            # used for the detach.
            self._set_udid(udid)
            LOG.debug('Added deferred task to attach device %s',
                      device_name,
                      instance=self.instance)

            # Valid attachment
            return True

        return False
コード例 #59
0
    def add_node(self, machine):
        subnet_id = self._get_subnet_from_machine(machine)
        if not subnet_id:
            LOG.warning('Could not determine subnet of Machine %s',
                        machine['metadata']['name'])
            return False

        with lockutils.lock('kuryr-machine-add'):
            if subnet_id not in self.subnets:
                LOG.info(
                    'Adding subnet %s to the worker nodes subnets as '
                    'machine %s runs in it.', subnet_id,
                    machine['metadata']['name'])
                self.subnets.add(subnet_id)
                return True
            return False
コード例 #60
0
    def _apply(self):
        lock_name = 'iptables'
        if self.namespace:
            lock_name += '-' + self.namespace

        with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
            first = self._apply_synchronized()
            if not cfg.CONF.AGENT.debug_iptables_rules:
                return first
            second = self._apply_synchronized()
            if second:
                msg = (_("IPTables Rules did not converge. Diff: %s") %
                       '\n'.join(second))
                LOG.error(msg)
                raise n_exc.IpTablesApplyException(msg)
            return first