Пример #1
0
 def get_instance_nw_info(self, context, instance, **kwargs):
     """Returns all network info related to an instance."""
     if isinstance(instance, dict):
         instance_uuid = instance['uuid']
     else:
         instance_uuid = instance.uuid
     with lockutils.lock('refresh_cache-%s' % instance_uuid):
         # NOTE(danms): Several places in the code look up instances without
         # pulling system_metadata for performance, and call this function.
         # If we get an instance without it, re-fetch so that the call
         # to network_api (which requires it for instance_type) will
         # succeed.
         attrs = ['system_metadata']
         use_slave = kwargs.get('use_slave', False)
         # NOTE(Rui Chen): Refresh instance in order to avoid race
         # condition between booting/attaching_interface and
         # nova/neutron event reporting mechanism. See details:
         # https://bugs.launchpad.net/nova/+bug/1407664
         instance = objects.Instance.get_by_uuid(context,
                                                 instance_uuid,
                                                 expected_attrs=attrs,
                                                 use_slave=use_slave)
         result = self._get_instance_nw_info(context, instance, **kwargs)
         # NOTE(comstud): Don't update API cell with new info_cache every
         # time we pull network info for an instance.  The periodic healing
         # of info_cache causes too many cells messages.  Healing the API
         # will happen separately.
         update_instance_cache_with_nw_info(self,
                                            context,
                                            instance,
                                            nw_info=result,
                                            update_cells=False)
     return result
Пример #2
0
    def _spawn(self, node, context, instance, image_meta, injected_files,
            admin_password, network_info=None, block_device_info=None):
        try:
            self._plug_vifs(instance, network_info, context=context)
            self._attach_block_devices(instance, block_device_info)
            self._start_firewall(instance, network_info)

            # Caching images is both CPU and I/O expensive. When running many
            # machines from a single nova-compute server, deploys of multiple
            # machines can easily thrash the nova-compute server - unlike a
            # virt hypervisor which is limited by CPU for VMs, baremetal only
            # uses CPU and I/O when deploying. By only downloading one image
            # at a time we serialise rather than thrashing, which leads to a
            # lower average time-to-complete during overload situations, and
            # a (relatively) insignificant delay for compute servers which
            # have sufficient IOPS to handle multiple concurrent image
            # conversions.
            with lockutils.lock('nova-baremetal-cache-images', external=True):
                self.driver.cache_images(
                            context, node, instance,
                            admin_password=admin_password,
                            image_meta=image_meta,
                            injected_files=injected_files,
                            network_info=network_info,
                        )
            self.driver.activate_bootloader(context, node, instance,
                                            network_info=network_info)
            # NOTE(deva): ensure node is really off before we turn it on
            #             fixes bug https://code.launchpad.net/bugs/1178919
            self.power_off(instance, node)
            self.power_on(context, instance, network_info, block_device_info,
                          node)
            _update_state(context, node, instance, baremetal_states.PREPARED)

            self.driver.activate_node(context, node, instance)
            _update_state(context, node, instance, baremetal_states.ACTIVE)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error deploying instance %(instance)s "
                            "on baremetal node %(node)s.") %
                            {'instance': instance['uuid'],
                             'node': node['uuid']})

                # Do not set instance=None yet. This prevents another
                # spawn() while we are cleaning up.
                _update_state(context, node, instance, baremetal_states.ERROR)

                self.driver.deactivate_node(context, node, instance)
                self.power_off(instance, node)
                self.driver.deactivate_bootloader(context, node, instance)
                self.driver.destroy_images(context, node, instance)

                self._detach_block_devices(instance, block_device_info)
                self._stop_firewall(instance, network_info)
                self._unplug_vifs(instance, network_info)

                _update_state(context, node, None, baremetal_states.DELETED)
        else:
            # We no longer need the image since we successfully deployed.
            self.driver.destroy_images(context, node, instance)
Пример #3
0
    def _spawn(self, node, context, instance, image_meta, injected_files,
            admin_password, network_info=None, block_device_info=None):
        try:
            self._plug_vifs(instance, network_info, context=context)
            self._attach_block_devices(instance, block_device_info)
            self._start_firewall(instance, network_info)

            # Caching images is both CPU and I/O expensive. When running many
            # machines from a single nova-compute server, deploys of multiple
            # machines can easily thrash the nova-compute server - unlike a
            # virt hypervisor which is limited by CPU for VMs, baremetal only
            # uses CPU and I/O when deploying. By only downloading one image
            # at a time we serialise rather than thrashing, which leads to a
            # lower average time-to-complete during overload situations, and
            # a (relatively) insignificant delay for compute servers which
            # have sufficient IOPS to handle multiple concurrent image
            # conversions.
            with lockutils.lock('nova-baremetal-cache-images', external=True):
                self.driver.cache_images(
                            context, node, instance,
                            admin_password=admin_password,
                            image_meta=image_meta,
                            injected_files=injected_files,
                            network_info=network_info,
                        )
            self.driver.activate_bootloader(context, node, instance,
                                            network_info=network_info)
            # NOTE(deva): ensure node is really off before we turn it on
            #             fixes bug https://code.launchpad.net/bugs/1178919
            self.power_off(instance, node)
            self.power_on(context, instance, network_info, block_device_info,
                          node)
            _update_state(context, node, instance, baremetal_states.PREPARED)

            self.driver.activate_node(context, node, instance)
            _update_state(context, node, instance, baremetal_states.ACTIVE)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_("Error deploying instance %(instance)s "
                            "on baremetal node %(node)s.") %
                            {'instance': instance['uuid'],
                             'node': node['uuid']})

                # Do not set instance=None yet. This prevents another
                # spawn() while we are cleaning up.
                _update_state(context, node, instance, baremetal_states.ERROR)

                self.driver.deactivate_node(context, node, instance)
                self.power_off(instance, node)
                self.driver.deactivate_bootloader(context, node, instance)
                self.driver.destroy_images(context, node, instance)

                self._detach_block_devices(instance, block_device_info)
                self._stop_firewall(instance, network_info)
                self._unplug_vifs(instance, network_info)

                _update_state(context, node, None, baremetal_states.DELETED)
        else:
            # We no longer need the image since we successfully deployed.
            self.driver.destroy_images(context, node, instance)
Пример #4
0
def manage_main():
    fname = ETC_LXC_USERNET

    parser = argparse.ArgumentParser()
    parser.add_argument("--type", "-t", help="nic type (default: 'veth')",
                        default="veth", dest="ntype")
    parser.add_argument('operation', choices=("set", "inc", "dec", "del", "get"))
    parser.add_argument('user', help="username")
    parser.add_argument('bridge', help="bridge")
    parser.add_argument('count', nargs="?", help="number to operate with.",
                        default=None, const=int)

    args = parser.parse_args()
    if args.operation == "del":
        args.operation = "set"
        args.count = 0
    elif args.operation in ("set", "inc", "dec") and args.count is None:
        args.count = 1

    if args.operation == "get":
        if args.bridge == "*":
            args.bridge = None
        matching = lfilter(fname, user=args.user, bridge=args.bridge,
                           count=args.count, ntype=args.ntype)
        for l in matching:
            print(str(l))
        return 0

    with lockutils.lock(str(fname)):
        if not os.path.exists(fname):
            fp.write("# USERNAME TYPE BRIDGE COUNT\n")
        update_usernet(user=args.user, bridge=args.bridge, op=args.operation,
                       count=args.count, ntype=args.ntype, fname=fname)
Пример #5
0
 def get_instance_nw_info(self,
                          context,
                          instance,
                          networks=None,
                          port_ids=None,
                          use_slave=False,
                          pci_list=None):
     """Return network information for specified instance
        and update cache.
     """
     # NOTE(): It would be nice if use_slave had us call
     #                   special APIs that pummeled slaves instead of
     #                   the master. For now we just ignore this arg.
     with lockutils.lock('refresh_cache-%s' % instance['uuid']):
         instance = objects.Instance.get_by_uuid(
             context,
             instance['uuid'],
             expected_attrs=['system_metadata'],
             use_slave=use_slave)
         result = self._get_instance_nw_info(context, instance, networks,
                                             port_ids, pci_list)
         base_api.update_instance_cache_with_nw_info(self,
                                                     context,
                                                     instance,
                                                     nw_info=result,
                                                     update_cells=False)
     return result
Пример #6
0
    def _age_cached_images(self, context, datastore, dc_info, ds_path):
        """Ages cached images."""
        age_seconds = CONF.remove_unused_original_minimum_age_seconds
        unused_images = self.originals - self.used_images
        ds_browser = self._get_ds_browser(datastore['ref'])
        for image in unused_images:
            path = self.timestamp_folder_get(ds_path, image)
            # Lock to ensure that the spawn will not try and access a image
            # that is currently being deleted on the datastore.
            with lockutils.lock(path,
                                lock_file_prefix='nova-vmware-ts',
                                external=True):
                ts = self._get_timestamp(ds_browser, path)
                if not ts:
                    ts_path = '%s/%s' % (path, self._get_timestamp_filename())
                    try:
                        ds_util.mkdir(self._session, ts_path, dc_info.ref)
                    except error_util.FileAlreadyExistsException:
                        LOG.debug(_("Timestamp already exists."))
                    LOG.info(
                        _("Image %s is no longer used by this node. "
                          "Pending deletion!"), image)
                else:
                    dt = self._get_datetime_from_filename(ts)
                    if timeutils.is_older_than(dt, age_seconds):
                        LOG.info(_("Image %s is no longer used. "
                                   "Deleting!"), path)
                        # Image has aged - delete the image ID folder
                        self._folder_delete(path, dc_info.ref)

        # If the image is used and the timestamp file exists then we delete
        # the timestamp.
        for image in self.used_images:
            path = self.timestamp_folder_get(ds_path, image)
            with lockutils.lock(path,
                                lock_file_prefix='nova-vmware-ts',
                                external=True):
                self.timestamp_cleanup(dc_info.ref, ds_browser,
                                       datastore['ref'], datastore['name'],
                                       path)
Пример #7
0
    def _age_cached_images(self, context, datastore, dc_info,
                           ds_path):
        """Ages cached images."""
        age_seconds = CONF.remove_unused_original_minimum_age_seconds
        unused_images = self.originals - self.used_images
        ds_browser = self._get_ds_browser(datastore['ref'])
        for image in unused_images:
            path = self.timestamp_folder_get(ds_path, image)
            # Lock to ensure that the spawn will not try and access a image
            # that is currently being deleted on the datastore.
            with lockutils.lock(path, lock_file_prefix='nova-vmware-ts',
                                external=True):
                ts = self._get_timestamp(ds_browser, path)
                if not ts:
                    ts_path = '%s/%s' % (path,
                                         self._get_timestamp_filename())
                    try:
                        ds_util.mkdir(self._session, ts_path, dc_info.ref)
                    except error_util.FileAlreadyExistsException:
                        LOG.debug(_("Timestamp already exists."))
                    LOG.info(_("Image %s is no longer used by this node. "
                               "Pending deletion!"), image)
                else:
                    dt = self._get_datetime_from_filename(ts)
                    if timeutils.is_older_than(dt, age_seconds):
                        LOG.info(_("Image %s is no longer used. "
                                   "Deleting!"), path)
                        # Image has aged - delete the image ID folder
                        self._folder_delete(path, dc_info.ref)

        # If the image is used and the timestamp file exists then we delete
        # the timestamp.
        for image in self.used_images:
            path = self.timestamp_folder_get(ds_path, image)
            with lockutils.lock(path, lock_file_prefix='nova-vmware-ts',
                                external=True):
                self.timestamp_cleanup(dc_info.ref, ds_browser,
                                       datastore['ref'], datastore['name'],
                                       path)
Пример #8
0
    def enlist_image(self, image_id, datastore, dc_ref):
        ds_browser = self._get_ds_browser(datastore.ref)
        cache_root_folder = datastore.build_path(self._base_folder)

        # Check if the timestamp file exists - if so then delete it. This
        # will ensure that the aging will not delete a cache image if it
        # is going to be used now.
        path = self.timestamp_folder_get(cache_root_folder, image_id)

        # Lock to ensure that the spawn will not try and access a image
        # that is currently being deleted on the datastore.
        with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
                            external=True):
            self.timestamp_cleanup(dc_ref, ds_browser, path)
Пример #9
0
Файл: api.py Проект: nash-x/hws
 def get_instance_nw_info(self, context, instance, networks=None, port_ids=None, use_slave=False, pci_list=None):
     """Return network information for specified instance
        and update cache.
     """
     # NOTE(): It would be nice if use_slave had us call
     #                   special APIs that pummeled slaves instead of
     #                   the master. For now we just ignore this arg.
     with lockutils.lock("refresh_cache-%s" % instance["uuid"]):
         instance = objects.Instance.get_by_uuid(
             context, instance["uuid"], expected_attrs=["system_metadata"], use_slave=use_slave
         )
         result = self._get_instance_nw_info(context, instance, networks, port_ids, pci_list)
         base_api.update_instance_cache_with_nw_info(self, context, instance, nw_info=result, update_cells=False)
     return result
Пример #10
0
    def enlist_image(self, image_id, datastore, dc_ref):
        ds_browser = self._get_ds_browser(datastore.ref)
        cache_root_folder = datastore.build_path(self._base_folder)

        # Check if the timestamp file exists - if so then delete it. This
        # will ensure that the aging will not delete a cache image if it
        # is going to be used now.
        path = self.timestamp_folder_get(cache_root_folder, image_id)

        # Lock to ensure that the spawn will not try and access a image
        # that is currently being deleted on the datastore.
        with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
                            external=True):
            self.timestamp_cleanup(dc_ref, ds_browser, path)
Пример #11
0
    def wrapper(self, context, *args, **kwargs):
        res = f(self, context, *args, **kwargs)
        try:
            # get the instance from arguments (or raise ValueError)
            instance = kwargs.get('instance')
            if not instance:
                instance = args[argspec.args.index('instance') - 2]
        except ValueError:
            msg = _('instance is a required argument to use @refresh_cache')
            raise Exception(msg)

        with lockutils.lock('refresh_cache-%s' % instance['uuid']):
            update_instance_cache_with_nw_info(self, context, instance,
                                               nw_info=res)
        # return the original function's return value
        return res
Пример #12
0
def manage_main():
    fname = ETC_LXC_USERNET

    parser = argparse.ArgumentParser()
    parser.add_argument("--type",
                        "-t",
                        help="nic type (default: 'veth')",
                        default="veth",
                        dest="ntype")
    parser.add_argument('operation',
                        choices=("set", "inc", "dec", "del", "get"))
    parser.add_argument('user', help="username")
    parser.add_argument('bridge', help="bridge")
    parser.add_argument('count',
                        nargs="?",
                        help="number to operate with.",
                        default=None,
                        const=int)

    args = parser.parse_args()
    if args.operation == "del":
        args.operation = "set"
        args.count = 0
    elif args.operation in ("set", "inc", "dec") and args.count is None:
        args.count = 1

    if args.operation == "get":
        if args.bridge == "*":
            args.bridge = None
        matching = lfilter(fname,
                           user=args.user,
                           bridge=args.bridge,
                           count=args.count,
                           ntype=args.ntype)
        for l in matching:
            print(str(l))
        return 0

    with lockutils.lock(str(fname)):
        if not os.path.exists(fname):
            fp.write("# USERNAME TYPE BRIDGE COUNT\n")
        update_usernet(user=args.user,
                       bridge=args.bridge,
                       op=args.operation,
                       count=args.count,
                       ntype=args.ntype,
                       fname=fname)
Пример #13
0
    def _open_cached_file(self, context, image_ref, image_meta, dst):
        image_id = image_meta['id']
        fpath = self._get_cached_file(image_id)

        f = self._open(fpath)
        if f:
            return f

        with lockutils.lock(image_id, external=True, lock_path=self.locks_dir):
            f = self._open(fpath)
            if f:
                return f

            tmp = tempfile.mktemp(dir=self.tmp_dir)
            self._cache_image(context, image_ref, image_meta, tmp)
            f = open(tmp)
            os.rename(tmp, fpath)
            return f
Пример #14
0
    def _open_cached_file(self, context, image_ref, image_meta, dst):
        image_id = image_meta['id']
        fpath = self._get_cached_file(image_id)

        f = self._open(fpath)
        if f:
            return f

        with lockutils.lock(image_id, external=True, lock_path=self.locks_dir):
            f = self._open(fpath)
            if f:
                return f

            tmp = tempfile.mktemp(dir=self.tmp_dir)
            self._cache_image(context, image_ref, image_meta, tmp)
            f = open(tmp)
            os.rename(tmp, fpath)
            return f
Пример #15
0
def get_client(context, admin=False):
    # NOTE(dprince): In the case where no auth_token is present
    # we allow use of neutron admin tenant credentials if
    # it is an admin context.
    # This is to support some services (metadata API) where
    # an admin context is used without an auth token.
    if admin or (context.is_admin and not context.auth_token):
        with lockutils.lock('neutron_admin_auth_token_lock'):
            orig_token = AdminTokenStore.get().admin_auth_token
        client = _get_client(orig_token, admin=True)
        return ClientWrapper(client)

    # We got a user token that we can use that as-is
    if context.auth_token:
        token = context.auth_token
        return _get_client(token=token)

    # We did not get a user token and we should not be using
    # an admin token so log an error
    raise exceptions.Unauthorized()
Пример #16
0
def get_client(context, admin=False):
    # NOTE(dprince): In the case where no auth_token is present
    # we allow use of neutron admin tenant credentials if
    # it is an admin context.
    # This is to support some services (metadata API) where
    # an admin context is used without an auth token.
    if admin or (context.is_admin and not context.auth_token):
        with lockutils.lock('neutron_admin_auth_token_lock'):
            orig_token = AdminTokenStore.get().admin_auth_token
        client = _get_client(orig_token, admin=True)
        return ClientWrapper(client)

    # We got a user token that we can use that as-is
    if context.auth_token:
        token = context.auth_token
        return _get_client(token=token)

    # We did not get a user token and we should not be using
    # an admin token so log an error
    raise exceptions.Unauthorized()
Пример #17
0
 def __init__(self, name, lock_file_prefix=None):
     self.mgr = lockutils.lock(name, lock_file_prefix, True)
Пример #18
0
def _update_token(new_token):
    with lockutils.lock('neutron_admin_auth_token_lock'):
        token_store = AdminTokenStore.get()
        token_store.admin_auth_token = new_token
Пример #19
0
def _update_token(new_token):
    with lockutils.lock('neutron_admin_auth_token_lock'):
        token_store = AdminTokenStore.get()
        token_store.admin_auth_token = new_token