Ejemplo n.º 1
0
    def refresh_server(self, server):
        """Refresh server's attributes.

        Also log warnings for non-critical API errors.
        """
        try:
            server.get()
        except exceptions.OverLimit as exc:
            LOG.warning(
                _LW("Server %(name)s (%(id)s) received an OverLimit "
                    "response during server.get(): %(exception)s"), {
                        'name': server.name,
                        'id': server.id,
                        'exception': exc
                    })
        except exceptions.ClientException as exc:
            if ((getattr(exc, 'http_status', getattr(exc, 'code', None))
                 in (500, 503))):
                LOG.warning(
                    _LW('Server "%(name)s" (%(id)s) received the '
                        'following exception during server.get(): '
                        '%(exception)s'), {
                            'name': server.name,
                            'id': server.id,
                            'exception': exc
                        })
            else:
                raise
Ejemplo n.º 2
0
    def fetch_server(self, server_id):
        """Fetch fresh server object from Nova.

        Log warnings and return None for non-critical API errors.
        Use this method in various ``check_*_complete`` resource methods,
        where intermittent errors can be tolerated.
        """
        server = None
        try:
            server = self.client().servers.get(server_id)
        except exceptions.OverLimit as exc:
            LOG.warning(_LW("Received an OverLimit response when "
                            "fetching server (%(id)s) : %(exception)s"),
                        {'id': server_id,
                         'exception': exc})
        except exceptions.ClientException as exc:
            if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
                 (500, 503))):
                LOG.warning(_LW("Received the following exception when "
                            "fetching server (%(id)s) : %(exception)s"),
                            {'id': server_id,
                             'exception': exc})
            else:
                raise
        return server
Ejemplo n.º 3
0
    def fetch_server(self, server_id):
        """Fetch fresh server object from Nova.

        Log warnings and return None for non-critical API errors.
        Use this method in various ``check_*_complete`` resource methods,
        where intermittent errors can be tolerated.
        """
        server = None
        try:
            server = self.client().servers.get(server_id)
        except exceptions.OverLimit as exc:
            LOG.warning(
                _LW("Received an OverLimit response when "
                    "fetching server (%(id)s) : %(exception)s"), {
                        'id': server_id,
                        'exception': exc
                    })
        except exceptions.ClientException as exc:
            if ((getattr(exc, 'http_status', getattr(exc, 'code', None))
                 in (500, 503))):
                LOG.warning(
                    _LW("Received the following exception when "
                        "fetching server (%(id)s) : %(exception)s"), {
                            'id': server_id,
                            'exception': exc
                        })
            else:
                raise
        return server
Ejemplo n.º 4
0
 def _validate_type(self, attrib, value):
     if attrib.schema.type == attrib.schema.STRING:
         if not isinstance(value, six.string_types):
             LOG.warning(_LW("Attribute %(name)s is not of type "
                             "%(att_type)s"),
                         {'name': attrib.name,
                          'att_type': attrib.schema.STRING})
     elif attrib.schema.type == attrib.schema.LIST:
         if (not isinstance(value, collections.Sequence) or
                 isinstance(value, six.string_types)):
             LOG.warning(_LW("Attribute %(name)s is not of type "
                             "%(att_type)s"),
                         {'name': attrib.name,
                          'att_type': attrib.schema.LIST})
     elif attrib.schema.type == attrib.schema.MAP:
         if not isinstance(value, collections.Mapping):
             LOG.warning(_LW("Attribute %(name)s is not of type "
                             "%(att_type)s"),
                         {'name': attrib.name,
                          'att_type': attrib.schema.MAP})
     elif attrib.schema.type == attrib.schema.INTEGER:
         if not isinstance(value, int):
             LOG.warning(_LW("Attribute %(name)s is not of type "
                             "%(att_type)s"),
                         {'name': attrib.name,
                          'att_type': attrib.schema.INTEGER})
     elif attrib.schema.type == attrib.schema.BOOLEAN:
         try:
             strutils.bool_from_string(value, strict=True)
         except ValueError:
             LOG.warning(_LW("Attribute %(name)s is not of type "
                             "%(att_type)s"),
                         {'name': attrib.name,
                          'att_type': attrib.schema.BOOLEAN})
Ejemplo n.º 5
0
    def delete_stack_domain_project(self, project_id):
        if not self.stack_domain:
            # FIXME(shardy): Legacy fallback for folks using old heat.conf
            # files which lack domain configuration
            return

        # If stacks are created before configuring the heat domain, they
        # exist in the default domain, in the user's project, which we
        # do *not* want to delete!  However, if the keystone v3cloudsample
        # policy is used, it's possible that we'll get Forbidden when trying
        # to get the project, so again we should do nothing
        try:
            project = self.domain_admin_client.projects.get(project=project_id)
        except kc_exception.NotFound:
            return
        except kc_exception.Forbidden:
            LOG.warning(_LW('Unable to get details for project %s, '
                            'not deleting'), project_id)
            return

        if project.domain_id != self.stack_domain_id:
            LOG.warning(_LW('Not deleting non heat-domain project'))
            return

        try:
            project.delete()
        except kc_exception.NotFound:
            pass
Ejemplo n.º 6
0
def startup_sanity_check():
    if (not cfg.CONF.stack_user_domain_id
            and not cfg.CONF.stack_user_domain_name):
        # FIXME(shardy): Legacy fallback for folks using old heat.conf
        # files which lack domain configuration
        LOG.warning(
            _LW('stack_user_domain_id or stack_user_domain_name not '
                'set in heat.conf falling back to using default'))
    else:
        domain_admin_user = cfg.CONF.stack_domain_admin
        domain_admin_password = cfg.CONF.stack_domain_admin_password
        if not (domain_admin_user and domain_admin_password):
            raise exception.Error(
                _('heat.conf misconfigured, cannot '
                  'specify "stack_user_domain_id" or '
                  '"stack_user_domain_name" without '
                  '"stack_domain_admin" and '
                  '"stack_domain_admin_password"'))
    auth_key_len = len(cfg.CONF.auth_encryption_key)
    if auth_key_len in (16, 24):
        LOG.warning(
            _LW('Please update auth_encryption_key to be 32 characters.'))
    elif auth_key_len != 32:
        raise exception.Error(
            _('heat.conf misconfigured, auth_encryption_key '
              'must be 32 characters'))
Ejemplo n.º 7
0
    def _register_info(self, path, info):
        """Place the new info in the correct location in the registry.

        :param path: a list of keys ['resources', 'my_srv', 'OS::Nova::Server']
        """
        descriptive_path = '/'.join(path)
        name = path[-1]
        # create the structure if needed
        registry = self._registry
        for key in path[:-1]:
            if key not in registry:
                registry[key] = {}
            registry = registry[key]

        if info is None:
            if name.endswith('*'):
                # delete all matching entries.
                for res_name in list(six.iterkeys(registry)):
                    if (isinstance(registry[res_name], ResourceInfo) and
                            res_name.startswith(name[:-1])):
                        LOG.warning(_LW('Removing %(item)s from %(path)s'), {
                            'item': res_name,
                            'path': descriptive_path})
                        del registry[res_name]
            else:
                # delete this entry.
                LOG.warning(_LW('Removing %(item)s from %(path)s'), {
                    'item': name,
                    'path': descriptive_path})
                registry.pop(name, None)
            return

        if name in registry and isinstance(registry[name], ResourceInfo):
            if registry[name] == info:
                return
            details = {
                'path': descriptive_path,
                'was': str(registry[name].value),
                'now': str(info.value)}
            LOG.warning(_LW('Changing %(path)s from %(was)s to %(now)s'),
                        details)

        if isinstance(info, ClassResourceInfo):
            if info.value.support_status.status != support.SUPPORTED:
                if info.value.support_status.message is not None:
                    details = {
                        'name': info.name,
                        'status': six.text_type(
                            info.value.support_status.status),
                        'message': six.text_type(
                            info.value.support_status.message)
                        }
                    LOG.warning(_LW('%(name)s is %(status)s. %(message)s'),
                                details)

        info.user_resource = (self.global_registry is not None)
        registry[name] = info
Ejemplo n.º 8
0
    def start_template_clone(self, context, resource_name, instance):

        # 1 Traverse the list of resource, cloning every instance
        if not instance:
            LOG.error("Resources in template is null")

        # 2 set resource info according to template topo
        # (if the value of key links to other, here must set again)
        try:
            create_instance_wait_fun = \
                self.resource_common._await_instance_create
            trans_data_wait_fun = \
                self.resource_common._await_data_trans_status
            create_volume_wait_fun = \
                self.resource_common._await_block_device_map_created
            self.clone_driver.start_template_clone(
                context, resource_name, instance,
                create_volume_wait_fun=create_volume_wait_fun,
                volume_wait_fun=self.resource_common._await_volume_status,
                create_instance_wait_fun=create_instance_wait_fun,
                port_wait_fun=self.resource_common._await_port_status,
                trans_data_wait_fun=trans_data_wait_fun)

        except Exception as e:
            LOG.error(_LW("Clone vm error: %s"), e)
            _msg = 'Instance clone error: %s' % e
            raise exception.V2vException(message=_msg)
Ejemplo n.º 9
0
    def load(cls, context, watch_name=None, watch=None):
        """Load the watchrule object.

        The object can be loaded either from the DB by name or from an existing
        DB object.
        """
        if watch is None:
            try:
                watch = watch_rule_objects.WatchRule.get_by_name(
                    context, watch_name)
            except Exception as ex:
                LOG.warning(
                    _LW('WatchRule.load (%(watch_name)s) db error '
                        '%(ex)s'), {
                            'watch_name': watch_name,
                            'ex': ex
                        })
        if watch is None:
            raise exception.EntityNotFound(entity='Watch Rule',
                                           name=watch_name)
        else:
            return cls(context=context,
                       watch_name=watch.name,
                       rule=watch.rule,
                       stack_id=watch.stack_id,
                       state=watch.state,
                       wid=watch.id,
                       watch_data=watch.watch_data,
                       last_evaluated=watch.last_evaluated)
Ejemplo n.º 10
0
    def _await_instance_status(self, context, instance_id, status):
        start = time.time()
        retries = CONF.block_device_allocate_retries
        if retries < 0:
            LOG.warn(_LW("Treating negative config value (%(retries)s) for "
                         "'block_device_retries' as 0."),
                     {'retries': retries})
        # (1) treat  negative config value as 0
        # (2) the configured value is 0, one attempt should be made
        # (3) the configured value is > 0, then the total number attempts
        #      is (retries + 1)
        attempts = 1
        if retries >= 1:
            attempts = retries + 1
        for attempt in range(1, attempts + 1):
            instance = self.nova_api.get_server(context, instance_id)
            instance_status = instance.get('status', None)
            if instance_status == status:
                LOG.error(_("Instance id: %(id)s finished being %(st)s"),
                          {'id': instance_id, 'st': status})
                return attempt
            greenthread.sleep(CONF.block_device_allocate_retries_interval)

        if 'SHUTOFF' == status:
            LOG.error(_("Instance id: %s stop failed"), instance_id)
            raise exception.InstanceNotStop(instance_id=instance_id,
                                            seconds=int(time.time() - start),
                                            attempts=attempts)
        elif 'ACTIVE' == status:
            LOG.error(_("Instance id: %s start failed"), instance_id)
            raise exception.InstanceNotStart(instance_id=instance_id,
                                             seconds=int(time.time() - start),
                                             attempts=attempts)
        else:
            raise exception.Error(message="Instance option error.")
Ejemplo n.º 11
0
    def load(cls, context, watch_name=None, watch=None):
        """Load the watchrule object.

        The object can be loaded either from the DB by name or from an existing
        DB object.
        """
        if watch is None:
            try:
                watch = watch_rule_objects.WatchRule.get_by_name(context,
                                                                 watch_name)
            except Exception as ex:
                LOG.warning(_LW('WatchRule.load (%(watch_name)s) db error '
                                '%(ex)s'), {'watch_name': watch_name,
                                            'ex': ex})
        if watch is None:
            raise exception.EntityNotFound(entity='Watch Rule',
                                           name=watch_name)
        else:
            return cls(context=context,
                       watch_name=watch.name,
                       rule=watch.rule,
                       stack_id=watch.stack_id,
                       state=watch.state,
                       wid=watch.id,
                       watch_data=watch.watch_data,
                       last_evaluated=watch.last_evaluated)
Ejemplo n.º 12
0
    def _await_instance_create(self, context, instance_id):
        # TODO(yamahata): creating volume simultaneously
        #                 reduces creation time?
        # TODO(yamahata): eliminate dumb polling
        start = time.time()
        retries = CONF.instance_allocate_retries
        if retries < 0:
            LOG.warn(_LW("Treating negative config value (%(retries)s) for "
                         "'instance_create_retries' as 0."),
                     {'retries': retries})
        # (1) treat  negative config value as 0
        # (2) the configured value is 0, one attempt should be made
        # (3) the configured value is > 0, then the total number attempts
        #      is (retries + 1)
        attempts = 1
        if retries >= 1:
            attempts = retries + 1
        for attempt in range(1, attempts + 1):
            instance = self.nova_api.get_server(context, instance_id)
            instance_status = instance.get('status', None)
            if instance_status == 'ACTIVE':
                LOG.debug(_("Instance:%s finished being created"), instance_id)
                return attempt

            greenthread.sleep(CONF.instance_create_retries_interval)

        # NOTE(harlowja): Should only happen if we ran out of attempts
        raise exception.InstanceNotCreated(instance_id=instance_id,
                                           seconds=int(time.time() - start),
                                           attempts=attempts)
Ejemplo n.º 13
0
 def _await_port_status(self, context, port_id, ip_address):
     # TODO(yamahata): creating volume simultaneously
     #                 reduces creation time?
     # TODO(yamahata): eliminate dumb polling
     start = time.time()
     retries = CONF.port_allocate_retries
     if retries < 0:
         LOG.warn(_LW("Treating negative config value (%(retries)s) for "
                      "'block_device_retries' as 0."),
                  {'retries': retries})
     # (1) treat  negative config value as 0
     # (2) the configured value is 0, one attempt should be made
     # (3) the configured value is > 0, then the total number attempts
     #      is (retries + 1)
     attempts = 1
     if retries >= 1:
         attempts = retries + 1
     for attempt in range(1, attempts + 1):
         LOG.debug("Port id: %s finished being attached", port_id)
         exit_status = self._check_connect_sucess(ip_address)
         if exit_status:
             return attempt
         else:
             continue
         greenthread.sleep(CONF.port_allocate_retries_interval)
     # NOTE(harlowja): Should only happen if we ran out of attempts
     raise exception.PortNotattach(port_id=port_id,
                                   seconds=int(time.time() - start),
                                   attempts=attempts)
Ejemplo n.º 14
0
    def set_watch_state(self, state):
        """Temporarily set the watch state.

        :returns: list of functions to be scheduled in the stack ThreadGroup
                  for the specified state.
        """

        if state not in self.WATCH_STATES:
            raise ValueError(_('Unknown watch state %s') % state)

        actions = []
        if state != self.state:
            actions = self.rule_actions(state)
            if actions:
                LOG.debug("Overriding state %(self_state)s for watch "
                          "%(name)s with %(state)s" % {
                              'self_state': self.state,
                              'name': self.name,
                              'state': state
                          })
            else:
                LOG.warning(
                    _LW("Unable to override state %(state)s for "
                        "watch %(name)s"), {
                            'state': self.state,
                            'name': self.name
                        })
        return actions
Ejemplo n.º 15
0
 def _await_block_device_map_created(self, context, vol_id):
     # TODO(yamahata): creating volume simultaneously
     #                 reduces creation time?
     # TODO(yamahata): eliminate dumb polling
     start = time.time()
     retries = CONF.block_device_allocate_retries
     if retries < 0:
         LOG.warn(
             _LW("Treating negative config value (%(retries)s) for "
                 "'block_device_retries' as 0."), {'retries': retries})
     # (1) treat  negative config value as 0
     # (2) the configured value is 0, one attempt should be made
     # (3) the configured value is > 0, then the total number attempts
     #      is (retries + 1)
     attempts = 1
     if retries >= 1:
         attempts = retries + 1
     for attempt in range(1, attempts + 1):
         volume = self.volume_api.get(context, vol_id)
         volume_status = volume['status']
         if volume_status not in ['creating', 'downloading']:
             if volume_status != 'available':
                 LOG.warn(
                     _("Volume id: %s finished being created but was"
                       " not set as 'available'"), vol_id)
             return attempt
         greenthread.sleep(CONF.block_device_allocate_retries_interval)
     # NOTE(harlowja): Should only happen if we ran out of attempts
     raise exception.VolumeNotCreated(volume_id=vol_id,
                                      seconds=int(time.time() - start),
                                      attempts=attempts)
Ejemplo n.º 16
0
    def handle_create(self):
        """Add a floating IP address to a server."""
        if self.properties[self.EIP]:
            server = self.client().servers.get(
                self.properties[self.INSTANCE_ID])
            server.add_floating_ip(self.properties[self.EIP])
            self.resource_id_set(self.properties[self.EIP])
            LOG.debug(
                'ElasticIpAssociation '
                '%(instance)s.add_floating_ip(%(eip)s)', {
                    'instance': self.properties[self.INSTANCE_ID],
                    'eip': self.properties[self.EIP]
                })
        elif self.properties[self.ALLOCATION_ID]:
            ni_id = self.properties[self.NETWORK_INTERFACE_ID]
            instance_id = self.properties[self.INSTANCE_ID]
            port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
            if not port_id or not port_rsrc:
                LOG.warning(
                    _LW('Skipping association, resource not '
                        'specified'))
                return

            float_id = self.properties[self.ALLOCATION_ID]
            network_id = port_rsrc['network_id']
            self._neutron_add_gateway_router(float_id, network_id)

            self._neutron_update_floating_ip(float_id, port_id)

            self.resource_id_set(float_id)
Ejemplo n.º 17
0
    def trusts_auth_plugin(self):
        if self._trusts_auth_plugin:
            return self._trusts_auth_plugin

        self._trusts_auth_plugin = auth.load_from_conf_options(
            cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=self.trust_id)

        if self._trusts_auth_plugin:
            return self._trusts_auth_plugin

        LOG.warning(_LW('Using the keystone_authtoken user '
                        'as the conveyorheat '
                        'trustee user directly is deprecated. Please add the '
                        'trustee credentials you need to the %s section of '
                        'your heat.conf file.') % TRUSTEE_CONF_GROUP)

        cfg.CONF.import_group('keystone_authtoken',
                              'keystonemiddleware.auth_token')

        trustee_user_domain = 'default'
        if 'user_domain_id' in cfg.CONF.keystone_authtoken:
            trustee_user_domain = cfg.CONF.keystone_authtoken.user_domain_id

        self._trusts_auth_plugin = v3.Password(
            username=cfg.CONF.keystone_authtoken.admin_user,
            password=cfg.CONF.keystone_authtoken.admin_password,
            user_domain_id=trustee_user_domain,
            auth_url=self.keystone_v3_endpoint,
            trust_id=self.trust_id)
        return self._trusts_auth_plugin
Ejemplo n.º 18
0
    def _await_instance_create(self, context, instance_id):
        # TODO(yamahata): creating volume simultaneously
        #                 reduces creation time?
        # TODO(yamahata): eliminate dumb polling
        start = time.time()
        retries = CONF.instance_allocate_retries
        if retries < 0:
            LOG.warn(
                _LW("Treating negative config value (%(retries)s) for "
                    "'instance_create_retries' as 0."), {'retries': retries})
        # (1) treat  negative config value as 0
        # (2) the configured value is 0, one attempt should be made
        # (3) the configured value is > 0, then the total number attempts
        #      is (retries + 1)
        attempts = 1
        if retries >= 1:
            attempts = retries + 1
        for attempt in range(1, attempts + 1):
            instance = self.nova_api.get_server(context, instance_id)
            instance_status = instance.get('status', None)
            if instance_status == 'ACTIVE':
                LOG.debug(_("Instance:%s finished being created"), instance_id)
                return attempt

            greenthread.sleep(CONF.instance_create_retries_interval)

        # NOTE(harlowja): Should only happen if we ran out of attempts
        raise exception.InstanceNotCreated(instance_id=instance_id,
                                           seconds=int(time.time() - start),
                                           attempts=attempts)
Ejemplo n.º 19
0
 def _await_port_status(self, context, port_id, ip_address):
     # TODO(yamahata): creating volume simultaneously
     #                 reduces creation time?
     # TODO(yamahata): eliminate dumb polling
     start = time.time()
     retries = CONF.port_allocate_retries
     if retries < 0:
         LOG.warn(
             _LW("Treating negative config value (%(retries)s) for "
                 "'block_device_retries' as 0."), {'retries': retries})
     # (1) treat  negative config value as 0
     # (2) the configured value is 0, one attempt should be made
     # (3) the configured value is > 0, then the total number attempts
     #      is (retries + 1)
     attempts = 1
     if retries >= 1:
         attempts = retries + 1
     for attempt in range(1, attempts + 1):
         LOG.debug("Port id: %s finished being attached", port_id)
         exit_status = self._check_connect_sucess(ip_address)
         if exit_status:
             return attempt
         else:
             continue
         greenthread.sleep(CONF.port_allocate_retries_interval)
     # NOTE(harlowja): Should only happen if we ran out of attempts
     raise exception.PortNotattach(port_id=port_id,
                                   seconds=int(time.time() - start),
                                   attempts=attempts)
Ejemplo n.º 20
0
 def _await_block_device_map_created(self, context, vol_id):
     # TODO(yamahata): creating volume simultaneously
     #                 reduces creation time?
     # TODO(yamahata): eliminate dumb polling
     start = time.time()
     retries = CONF.block_device_allocate_retries
     if retries < 0:
         LOG.warn(_LW("Treating negative config value (%(retries)s) for "
                      "'block_device_retries' as 0."),
                  {'retries': retries})
     # (1) treat  negative config value as 0
     # (2) the configured value is 0, one attempt should be made
     # (3) the configured value is > 0, then the total number attempts
     #      is (retries + 1)
     attempts = 1
     if retries >= 1:
         attempts = retries + 1
     for attempt in range(1, attempts + 1):
         volume = self.volume_api.get(context, vol_id)
         volume_status = volume['status']
         if volume_status not in ['creating', 'downloading']:
             if volume_status != 'available':
                 LOG.warn(_("Volume id: %s finished being created but was"
                            " not set as 'available'"), vol_id)
             return attempt
         greenthread.sleep(CONF.block_device_allocate_retries_interval)
     # NOTE(harlowja): Should only happen if we ran out of attempts
     raise exception.VolumeNotCreated(volume_id=vol_id,
                                      seconds=int(time.time() - start),
                                      attempts=attempts)
Ejemplo n.º 21
0
def get_cinder_client_version(context):
    """Parse cinder client version by endpoint url.

    :param context: Nova auth context.
    :return: str value(1 or 2).
    """
    global CINDER_URL
    # FIXME: the cinderclient ServiceCatalog object is mis-named.
    #        It actually contains the entire access blob.
    # Only needed parts of the service catalog are passed in, see
    # nova/context.py.
    compat_catalog = {
        'access': {
            'serviceCatalog': context.service_catalog or []
        }
    }
    sc = service_catalog.ServiceCatalog(compat_catalog)
    if CONF.cinder.endpoint_template:
        url = CONF.cinder.endpoint_template % context.to_dict()
    else:
        info = CONF.cinder.catalog_info
        service_type, service_name, endpoint_type = info.split(':')
        # extract the region if set in configuration
        if CONF.os_region_name:
            attr = 'region'
            filter_value = CONF.os_region_name
        else:
            attr = None
            filter_value = None
        url = sc.url_for(attr=attr,
                         filter_value=filter_value,
                         service_type=service_type,
                         service_name=service_name,
                         endpoint_type=endpoint_type)
    LOG.debug('Cinderclient connection created using URL: %s', url)

    valid_versions = ['v1', 'v2']
    magic_tuple = urlparse.urlsplit(url)
    scheme, netloc, path, query, frag = magic_tuple
    components = path.split("/")

    for version in valid_versions:
        if version in components[1]:
            version = version[1:]

            if not CINDER_URL and version == '1':
                msg = _LW('Cinder V1 API is deprecated as of the Juno '
                          'release, and Nova is still configured to use it. '
                          'Enable the V2 API in Cinder and set '
                          'cinder_catalog_info in nova.conf to use it.')
                LOG.warn(msg)

            CINDER_URL = url
            return version
    msg = _("Invalid client version, must be one of: %s") % valid_versions
    raise cinder_exception.UnsupportedVersion(msg)
Ejemplo n.º 22
0
 def server_to_ipaddress(self, server):
     """Return the server's IP address, fetching it from Nova."""
     try:
         server = self.client().servers.get(server)
     except exceptions.NotFound as ex:
         LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'),
                     {'server': server, 'ex': ex})
     else:
         for n in sorted(server.networks, reverse=True):
             if len(server.networks[n]) > 0:
                 return server.networks[n][0]
Ejemplo n.º 23
0
    def _load_extensions(self):
        """Load extensions specified on the command line."""

        extensions = list(self.cls_list)

        for ext_factory in extensions:
            try:
                self.load_extension(ext_factory)
            except Exception as exc:
                LOG.warning(_LW('Failed to load extension %(ext_factory)s: '
                                '%(exc)s'),
                            {'ext_factory': ext_factory, 'exc': exc})
Ejemplo n.º 24
0
 def server_to_ipaddress(self, server):
     """Return the server's IP address, fetching it from Nova."""
     try:
         server = self.client().servers.get(server)
     except exceptions.NotFound as ex:
         LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'), {
             'server': server,
             'ex': ex
         })
     else:
         for n in sorted(server.networks, reverse=True):
             if len(server.networks[n]) > 0:
                 return server.networks[n][0]
Ejemplo n.º 25
0
def startup_sanity_check():
    if (not cfg.CONF.stack_user_domain_id and
            not cfg.CONF.stack_user_domain_name):
        # FIXME(shardy): Legacy fallback for folks using old heat.conf
        # files which lack domain configuration
        LOG.warning(_LW('stack_user_domain_id or stack_user_domain_name not '
                        'set in heat.conf falling back to using default'))
    else:
        domain_admin_user = cfg.CONF.stack_domain_admin
        domain_admin_password = cfg.CONF.stack_domain_admin_password
        if not (domain_admin_user and domain_admin_password):
            raise exception.Error(_('heat.conf misconfigured, cannot '
                                    'specify "stack_user_domain_id" or '
                                    '"stack_user_domain_name" without '
                                    '"stack_domain_admin" and '
                                    '"stack_domain_admin_password"'))
    auth_key_len = len(cfg.CONF.auth_encryption_key)
    if auth_key_len in (16, 24):
        LOG.warning(
            _LW('Please update auth_encryption_key to be 32 characters.'))
    elif auth_key_len != 32:
        raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '
                                'must be 32 characters'))
Ejemplo n.º 26
0
    def client(self, name):
        client_plugin = self.client_plugin(name)
        if client_plugin:
            return client_plugin.client()

        if name in self._clients:
            return self._clients[name]
        # call the local method _<name>() if a real client plugin
        # doesn't exist
        method_name = '_%s' % name
        if callable(getattr(self, method_name, None)):
            client = getattr(self, method_name)()
            self._clients[name] = client
            return client
        LOG.warning(_LW('Requested client "%s" not found'), name)
Ejemplo n.º 27
0
    def _load_extensions(self):
        """Load extensions specified on the command line."""

        extensions = list(self.cls_list)

        for ext_factory in extensions:
            try:
                self.load_extension(ext_factory)
            except Exception as exc:
                LOG.warning(
                    _LW('Failed to load extension %(ext_factory)s: '
                        '%(exc)s'), {
                            'ext_factory': ext_factory,
                            'exc': exc
                        })
Ejemplo n.º 28
0
 def _resolve_attribute(self, name):
     if name == self.SUBNETS_ATTR:
         subnets = []
         try:
             fixed_ips = self._show_resource().get('fixed_ips', [])
             for fixed_ip in fixed_ips:
                 subnet_id = fixed_ip.get('subnet_id')
                 if subnet_id:
                     subnets.append(self.client().show_subnet(
                         subnet_id)['subnet'])
         except Exception as ex:
             LOG.warning(_LW("Failed to fetch resource attributes: %s"), ex)
             return
         return subnets
     return super(Port, self)._resolve_attribute(name)
Ejemplo n.º 29
0
    def client(self, name):
        client_plugin = self.client_plugin(name)
        if client_plugin:
            return client_plugin.client()

        if name in self._clients:
            return self._clients[name]
        # call the local method _<name>() if a real client plugin
        # doesn't exist
        method_name = '_%s' % name
        if callable(getattr(self, method_name, None)):
            client = getattr(self, method_name)()
            self._clients[name] = client
            return client
        LOG.warning(_LW('Requested client "%s" not found'), name)
Ejemplo n.º 30
0
    def refresh_server(self, server):
        """Refresh server's attributes.

        Also log warnings for non-critical API errors.
        """
        try:
            server.get()
        except exceptions.OverLimit as exc:
            LOG.warning(_LW("Server %(name)s (%(id)s) received an OverLimit "
                            "response during server.get(): %(exception)s"),
                        {'name': server.name,
                         'id': server.id,
                         'exception': exc})
        except exceptions.ClientException as exc:
            if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
                 (500, 503))):
                LOG.warning(_LW('Server "%(name)s" (%(id)s) received the '
                                'following exception during server.get(): '
                                '%(exception)s'),
                            {'name': server.name,
                             'id': server.id,
                             'exception': exc})
            else:
                raise
Ejemplo n.º 31
0
    def release(self):
        """Release a stack lock."""

        # Only the engine that owns the lock will be releasing it.
        result = stack_lock_object.StackLock.release(self.stack_id,
                                                     self.engine_id)
        if result is True:
            LOG.warning(_LW("Lock was already released on stack %s!"),
                        self.stack_id)
        else:
            LOG.debug("Engine %(engine)s released lock on stack "
                      "%(stack)s" % {
                          'engine': self.engine_id,
                          'stack': self.stack_id
                      })
Ejemplo n.º 32
0
    def start_template_migrate(self, context, resource_name, instance):

        if not instance:
            LOG.error("Resources in template is null")

        # (if the value of key links to other, here must set again)
        try:
            trans_data_wait_fun = \
                self.resource_common._await_data_trans_status
            self.clone_driver.start_template_migrate(
                context, resource_name, instance,
                port_wait_fun=self.resource_common._await_port_status,
                trans_data_wait_fun=trans_data_wait_fun)
        except Exception as e:
            LOG.error(_LW("Migrate vm error: %s"), e)
            _msg = 'Instance clone error: %s' % e
            raise exception.V2vException(message=_msg)
Ejemplo n.º 33
0
 def basic_config_check(self):
     """Perform basic config checks before starting service."""
     # Make sure report interval is less than service down time
     if self.report_interval:
         if CONF.service_down_time <= self.report_interval:
             new_down_time = int(self.report_interval * 2.5)
             LOG.warning(
                 _LW("Report interval must be less than service down "
                     "time. Current config service_down_time: "
                     "%(service_down_time)s, report_interval for this: "
                     "service is: %(report_interval)s. Setting global "
                     "service_down_time to: %(new_down_time)s"), {
                         'service_down_time': CONF.service_down_time,
                         'report_interval': self.report_interval,
                         'new_down_time': new_down_time
                     })
             CONF.set_override('service_down_time', new_down_time)
Ejemplo n.º 34
0
    def start_template_clone(self, context, resource_name, template):

        if not template:
            LOG.error("Resources in template is null")
            raise exception.V2vException(message='Template is null')

        try:
            trans_data_wait_fun = \
                self.resource_common._await_data_trans_status
            self.clone_driver.start_volume_clone(
                context, resource_name, template,
                volume_wait_fun=self.resource_common._await_volume_status,
                trans_data_wait_fun=trans_data_wait_fun,
                set_plan_state=self._set_plan_statu)
        except Exception as e:
            LOG.error(_LW("Clone volume error: %s"), e)
            _msg = 'Volume clone error: %s' % e
            raise exception.V2vException(message=_msg)
Ejemplo n.º 35
0
    def check_stack_watches(self, sid):
        # Retrieve the stored credentials & create context
        # Require tenant_safe=False to the stack_get to defeat tenant
        # scoping otherwise we fail to retrieve the stack
        LOG.debug("Periodic watcher task for stack %s" % sid)
        admin_context = context.get_admin_context()
        db_stack = stack_object.Stack.get_by_id(admin_context,
                                                sid,
                                                tenant_safe=False,
                                                eager_load=True)
        if not db_stack:
            LOG.error(_LE("Unable to retrieve stack %s for periodic task"),
                      sid)
            return
        stk = stack.Stack.load(admin_context, stack=db_stack,
                               use_stored_context=True)

        # recurse into any nested stacks.
        children = stack_object.Stack.get_all_by_owner_id(admin_context, sid)
        for child in children:
            self.check_stack_watches(child.id)

        # Get all watchrules for this stack and evaluate them
        try:
            wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context,
                                                               sid)
        except Exception as ex:
            LOG.warning(_LW('periodic_task db error watch rule'
                            ' removed? %(ex)s'),
                        ex)
            return

        def run_alarm_action(stk, actions, details):
            for action in actions:
                action(details=details)
            for res in six.itervalues(stk):
                res.metadata_update()

        for wr in wrs:
            rule = watchrule.WatchRule.load(stk.context, watch=wr)
            actions = rule.evaluate()
            if actions:
                self.thread_group_mgr.start(sid, run_alarm_action, stk,
                                            actions, rule.get_details())
Ejemplo n.º 36
0
    def validate(self):
        """Validate any of the provided params."""
        res = super(Instance, self).validate()
        if res:
            return res

        # check validity of security groups vs. network interfaces
        security_groups = self._get_security_groups()
        network_interfaces = self.properties.get(self.NETWORK_INTERFACES)
        if security_groups and network_interfaces:
            raise exception.ResourcePropertyConflict(
                '/'.join([self.SECURITY_GROUPS, self.SECURITY_GROUP_IDS]),
                self.NETWORK_INTERFACES)

        # check bdm property
        # now we don't support without snapshot_id in bdm
        bdm = self.properties.get(self.BLOCK_DEVICE_MAPPINGS)
        if bdm:
            for mapping in bdm:
                ebs = mapping.get(self.EBS)
                if ebs:
                    snapshot_id = ebs.get(self.SNAPSHOT_ID)
                    if not snapshot_id:
                        msg = _("SnapshotId is missing, this is required "
                                "when specifying BlockDeviceMappings.")
                        raise exception.StackValidationFailed(message=msg)
                else:
                    msg = _("Ebs is missing, this is required "
                            "when specifying BlockDeviceMappings.")
                    raise exception.StackValidationFailed(message=msg)

        subnet_id = self.properties.get(self.SUBNET_ID)
        if network_interfaces and subnet_id:
            # consider the old templates, we only to log to warn user
            # NetworkInterfaces has higher priority than SubnetId
            LOG.warning(
                _LW('"%(subnet)s" will be ignored if specified '
                    '"%(net_interfaces)s". So if you specified the '
                    '"%(net_interfaces)s" property, '
                    'do not specify "%(subnet)s" property.'), {
                        'subnet': self.SUBNET_ID,
                        'net_interfaces': self.NETWORK_INTERFACES
                    })
Ejemplo n.º 37
0
def setup_profiler(binary, host):
    if CONF.profiler.profiler_enabled:
        _notifier = osprofiler.notifier.create(
            "Messaging", messaging,
            context.get_admin_context().to_dict(), rpc.TRANSPORT, "conveyor",
            binary, host)
        osprofiler.notifier.set(_notifier)
        LOG.warning(
            _LW("OSProfiler is enabled.\nIt means that person who knows "
                "any of hmac_keys that are specified in "
                "/etc/cinder/api-paste.ini can trace his requests. \n"
                "In real life only operator can read this file so there "
                "is no security issue. Note that even if person can "
                "trigger profiler, only admin user can retrieve trace "
                "information.\n"
                "To disable OSprofiler set in cinder.conf:\n"
                "[profiler]\nenabled=false"))
    else:
        osprofiler.web.disable()
Ejemplo n.º 38
0
    def __init__(self,
                 data_type,
                 description=None,
                 default=None,
                 schema=None,
                 required=False,
                 constraints=None,
                 label=None,
                 immutable=False):
        self._len = None
        self.label = label
        self.type = data_type
        if self.type not in self.TYPES:
            raise exception.InvalidSchemaError(message=_('Invalid type (%s)') %
                                               self.type)

        if required and default is not None:
            LOG.warning(
                _LW("Option 'required=True' should not be used with "
                    "any 'default' value (%s)") % default)

        self.description = description
        self.required = required
        self.immutable = immutable

        if isinstance(schema, type(self)):
            if self.type != self.LIST:
                msg = _('Single schema valid only for '
                        '%(ltype)s, not %(utype)s') % dict(ltype=self.LIST,
                                                           utype=self.type)
                raise exception.InvalidSchemaError(message=msg)

            self.schema = AnyIndexDict(schema)
        else:
            self.schema = schema
        if self.schema is not None and self.type not in (self.LIST, self.MAP):
            msg = _('Schema valid only for %(ltype)s or '
                    '%(mtype)s, not %(utype)s') % dict(
                        ltype=self.LIST, mtype=self.MAP, utype=self.type)
            raise exception.InvalidSchemaError(message=msg)

        self.constraints = constraints or []
        self.default = default
Ejemplo n.º 39
0
    def start_template_clone(self, context, resource_name, template):

        if not template:
            LOG.error("Resources in template is null")
            raise exception.V2vException(message='Template is null')

        try:
            trans_data_wait_fun = \
                self.resource_common._await_data_trans_status
            self.clone_driver.start_volume_clone(
                context,
                resource_name,
                template,
                volume_wait_fun=self.resource_common._await_volume_status,
                trans_data_wait_fun=trans_data_wait_fun,
                set_plan_state=self._set_plan_statu)
        except Exception as e:
            LOG.error(_LW("Clone volume error: %s"), e)
            _msg = 'Volume clone error: %s' % e
            raise exception.V2vException(message=_msg)
Ejemplo n.º 40
0
    def _setup_extensions(self, ext_mgr):
        for extension in ext_mgr.get_controller_extensions():
            collection = extension.collection
            controller = extension.controller

            if collection not in self.resources:
                LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
                                'resource %(collection)s: No such resource'),
                            {'ext_name': extension.extension.name,
                             'collection': collection})
                continue

            LOG.debug('Extension %(ext_name)s extending resource: '
                      '%(collection)s',
                      {'ext_name': extension.extension.name,
                       'collection': collection})

            resource = self.resources[collection]
            resource.register_actions(controller)
            resource.register_extensions(controller)
Ejemplo n.º 41
0
 def _reset_resources_state(self, context, resources):
     for key, value in resources.items():
         try:
             resource_type = value.get('type')
             resource_id = value.get('extra_properties', {}).get('id')
             if resource_type == 'OS::Nova::Server':
                 vm_state = value.get('extra_properties', {}) \
                                 .get('vm_state')
                 self.compute_api.reset_state(context, resource_id,
                                              vm_state)
             elif resource_type == 'OS::Cinder::Volume':
                 volume_state = value.get('extra_properties', {}) \
                                     .get('status')
                 self.volume_api.reset_state(context, resource_id,
                                             volume_state)
             elif resource_type == 'OS::Heat::Stack':
                 self._reset_resources_state_for_stack(context, value)
         except Exception as e:
             LOG.warn(_LW('Reset resource state error, Error=%(e)s'),
                      {'e': e})
Ejemplo n.º 42
0
 def _reset_resources_state(self, context, resources):
     for key, value in resources.items():
         try:
             resource_type = value.get('type')
             resource_id = value.get('extra_properties', {}).get('id')
             if resource_type == 'OS::Nova::Server':
                 vm_state = value.get('extra_properties', {}) \
                                 .get('vm_state')
                 self.compute_api.reset_state(context, resource_id,
                                              vm_state)
             elif resource_type == 'OS::Cinder::Volume':
                 volume_state = value.get('extra_properties', {}) \
                                     .get('status')
                 self.volume_api.reset_state(context, resource_id,
                                             volume_state)
             elif resource_type == 'OS::Heat::Stack':
                 self._reset_resources_state_for_stack(context, value)
         except Exception as e:
             LOG.warn(_LW('Reset resource state error, Error=%(e)s'),
                      {'e': e})
Ejemplo n.º 43
0
    def validate(self):
        """Validate any of the provided params."""
        res = super(Instance, self).validate()
        if res:
            return res

        # check validity of security groups vs. network interfaces
        security_groups = self._get_security_groups()
        network_interfaces = self.properties.get(self.NETWORK_INTERFACES)
        if security_groups and network_interfaces:
            raise exception.ResourcePropertyConflict(
                '/'.join([self.SECURITY_GROUPS, self.SECURITY_GROUP_IDS]),
                self.NETWORK_INTERFACES)

        # check bdm property
        # now we don't support without snapshot_id in bdm
        bdm = self.properties.get(self.BLOCK_DEVICE_MAPPINGS)
        if bdm:
            for mapping in bdm:
                ebs = mapping.get(self.EBS)
                if ebs:
                    snapshot_id = ebs.get(self.SNAPSHOT_ID)
                    if not snapshot_id:
                        msg = _("SnapshotId is missing, this is required "
                                "when specifying BlockDeviceMappings.")
                        raise exception.StackValidationFailed(message=msg)
                else:
                    msg = _("Ebs is missing, this is required "
                            "when specifying BlockDeviceMappings.")
                    raise exception.StackValidationFailed(message=msg)

        subnet_id = self.properties.get(self.SUBNET_ID)
        if network_interfaces and subnet_id:
            # consider the old templates, we only to log to warn user
            # NetworkInterfaces has higher priority than SubnetId
            LOG.warning(_LW('"%(subnet)s" will be ignored if specified '
                            '"%(net_interfaces)s". So if you specified the '
                            '"%(net_interfaces)s" property, '
                            'do not specify "%(subnet)s" property.'),
                        {'subnet': self.SUBNET_ID,
                         'net_interfaces': self.NETWORK_INTERFACES})
Ejemplo n.º 44
0
    def _await_volume_status(self, context, vol_id, status):
        # TODO(yamahata): creating volume simultaneously
        #                 reduces creation time?
        # TODO(yamahata): eliminate dumb polling
        start = time.time()
        retries = CONF.block_device_allocate_retries
        if retries < 0:
            LOG.warn(_LW("Treating negative config value (%(retries)s) for "
                         "'block_device_retries' as 0."),
                     {'retries': retries})
        # (1) treat  negative config value as 0
        # (2) the configured value is 0, one attempt should be made
        # (3) the configured value is > 0, then the total number attempts
        #      is (retries + 1)
        attempts = 1
        if retries >= 1:
            attempts = retries + 1
        for attempt in range(1, attempts + 1):
            volume = self.volume_api.get(context, vol_id)
            volume_status = volume['status']
            if volume_status == status:
                LOG.debug(_("Volume id: %s finished being detached"), vol_id)
                return attempt

            greenthread.sleep(CONF.block_device_allocate_retries_interval)

        # NOTE(harlowja): Should only happen if we ran out of attempts
        if 'available' == status:
            LOG.error(_("Volume id: %s detach failed"), vol_id)
            raise exception.VolumeNotdetach(
                                volume_id=vol_id,
                                seconds=int(time.time() - start),
                                attempts=attempts)
        elif 'in-use' == status:
            LOG.error(_("Volume id: %s attach failed"), vol_id)
            raise exception.VolumeNotAttach(
                                volume_id=vol_id,
                                seconds=int(time.time() - start),
                                attempts=attempts)
        else:
            raise exception.Error(message="Volume option error.")
Ejemplo n.º 45
0
    def preview(self):
        """Preview a StackResource as resources within a Stack.

        This method overrides the original Resource.preview to return a preview
        of all the resources contained in this Stack.  For this to be possible,
        the specific resources need to override both ``child_template`` and
        ``child_params`` with specific information to allow the stack to be
        parsed correctly. If any of these methods is missing, the entire
        StackResource will be returned as if it were a regular Resource.
        """
        try:
            child_template = self.child_template()
            params = self.child_params()
        except NotImplementedError:
            class_name = reflection.get_class_name(self, fully_qualified=False)
            LOG.warning(_LW("Preview of '%s' not yet implemented"), class_name)
            return self

        name = "%s-%s" % (self.stack.name, self.name)
        self._nested = self._parse_nested_stack(name, child_template, params)

        return self.nested().preview_resources()
Ejemplo n.º 46
0
 def rule_actions(self, new_state):
     LOG.info(_LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
                  'new_state:%(new_state)s'), {'stack': self.stack_id,
                                               'watch_name': self.name,
                                               'new_state': new_state})
     actions = []
     if self.ACTION_MAP[new_state] not in self.rule:
         LOG.info(_LI('no action for new state %s'), new_state)
     else:
         s = stack_object.Stack.get_by_id(
             self.context,
             self.stack_id,
             eager_load=True)
         stk = stack.Stack.load(self.context, stack=s)
         if (stk.action != stk.DELETE
                 and stk.status == stk.COMPLETE):
             for refid in self.rule[self.ACTION_MAP[new_state]]:
                 actions.append(stk.resource_by_refid(refid).signal)
         else:
             LOG.warning(_LW("Could not process watch state %s for stack"),
                         new_state)
     return actions
Ejemplo n.º 47
0
    def __init__(self, data_type, description=None,
                 default=None, schema=None,
                 required=False, constraints=None, label=None,
                 immutable=False):
        self._len = None
        self.label = label
        self.type = data_type
        if self.type not in self.TYPES:
            raise exception.InvalidSchemaError(
                message=_('Invalid type (%s)') % self.type)

        if required and default is not None:
            LOG.warning(_LW("Option 'required=True' should not be used with "
                            "any 'default' value (%s)") % default)

        self.description = description
        self.required = required
        self.immutable = immutable

        if isinstance(schema, type(self)):
            if self.type != self.LIST:
                msg = _('Single schema valid only for '
                        '%(ltype)s, not %(utype)s') % dict(ltype=self.LIST,
                                                           utype=self.type)
                raise exception.InvalidSchemaError(message=msg)

            self.schema = AnyIndexDict(schema)
        else:
            self.schema = schema
        if self.schema is not None and self.type not in (self.LIST,
                                                         self.MAP):
            msg = _('Schema valid only for %(ltype)s or '
                    '%(mtype)s, not %(utype)s') % dict(ltype=self.LIST,
                                                       mtype=self.MAP,
                                                       utype=self.type)
            raise exception.InvalidSchemaError(message=msg)

        self.constraints = constraints or []
        self.default = default
Ejemplo n.º 48
0
    def _await_volume_status(self, context, vol_id, status):
        # TODO(yamahata): creating volume simultaneously
        #                 reduces creation time?
        # TODO(yamahata): eliminate dumb polling
        start = time.time()
        retries = CONF.block_device_allocate_retries
        if retries < 0:
            LOG.warn(
                _LW("Treating negative config value (%(retries)s) for "
                    "'block_device_retries' as 0."), {'retries': retries})
        # (1) treat  negative config value as 0
        # (2) the configured value is 0, one attempt should be made
        # (3) the configured value is > 0, then the total number attempts
        #      is (retries + 1)
        attempts = 1
        if retries >= 1:
            attempts = retries + 1
        for attempt in range(1, attempts + 1):
            volume = self.volume_api.get(context, vol_id)
            volume_status = volume['status']
            if volume_status == status:
                LOG.debug(_("Volume id: %s finished being detached"), vol_id)
                return attempt

            greenthread.sleep(CONF.block_device_allocate_retries_interval)

        # NOTE(harlowja): Should only happen if we ran out of attempts
        if 'available' == status:
            LOG.error(_("Volume id: %s detach failed"), vol_id)
            raise exception.VolumeNotdetach(volume_id=vol_id,
                                            seconds=int(time.time() - start),
                                            attempts=attempts)
        elif 'in-use' == status:
            LOG.error(_("Volume id: %s attach failed"), vol_id)
            raise exception.VolumeNotAttach(volume_id=vol_id,
                                            seconds=int(time.time() - start),
                                            attempts=attempts)
        else:
            raise exception.Error(message="Volume option error.")
Ejemplo n.º 49
0
    def preview(self):
        """Preview a StackResource as resources within a Stack.

        This method overrides the original Resource.preview to return a preview
        of all the resources contained in this Stack.  For this to be possible,
        the specific resources need to override both ``child_template`` and
        ``child_params`` with specific information to allow the stack to be
        parsed correctly. If any of these methods is missing, the entire
        StackResource will be returned as if it were a regular Resource.
        """
        try:
            child_template = self.child_template()
            params = self.child_params()
        except NotImplementedError:
            class_name = reflection.get_class_name(self, fully_qualified=False)
            LOG.warning(_LW("Preview of '%s' not yet implemented"), class_name)
            return self

        name = "%s-%s" % (self.stack.name, self.name)
        self._nested = self._parse_nested_stack(name, child_template, params)

        return self.nested().preview_resources()
Ejemplo n.º 50
0
 def rule_actions(self, new_state):
     LOG.info(
         _LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
             'new_state:%(new_state)s'), {
                 'stack': self.stack_id,
                 'watch_name': self.name,
                 'new_state': new_state
             })
     actions = []
     if self.ACTION_MAP[new_state] not in self.rule:
         LOG.info(_LI('no action for new state %s'), new_state)
     else:
         s = stack_object.Stack.get_by_id(self.context,
                                          self.stack_id,
                                          eager_load=True)
         stk = stack.Stack.load(self.context, stack=s)
         if (stk.action != stk.DELETE and stk.status == stk.COMPLETE):
             for refid in self.rule[self.ACTION_MAP[new_state]]:
                 actions.append(stk.resource_by_refid(refid).signal)
         else:
             LOG.warning(_LW("Could not process watch state %s for stack"),
                         new_state)
     return actions
Ejemplo n.º 51
0
    def set_watch_state(self, state):
        """Temporarily set the watch state.

        :returns: list of functions to be scheduled in the stack ThreadGroup
                  for the specified state.
        """

        if state not in self.WATCH_STATES:
            raise ValueError(_('Unknown watch state %s') % state)

        actions = []
        if state != self.state:
            actions = self.rule_actions(state)
            if actions:
                LOG.debug("Overriding state %(self_state)s for watch "
                          "%(name)s with %(state)s"
                          % {'self_state': self.state, 'name': self.name,
                             'state': state})
            else:
                LOG.warning(_LW("Unable to override state %(state)s for "
                                "watch %(name)s"), {'state': self.state,
                                                    'name': self.name})
        return actions
Ejemplo n.º 52
0
    def _await_instance_status(self, context, instance_id, status):
        start = time.time()
        retries = CONF.block_device_allocate_retries
        if retries < 0:
            LOG.warn(
                _LW("Treating negative config value (%(retries)s) for "
                    "'block_device_retries' as 0."), {'retries': retries})
        # (1) treat  negative config value as 0
        # (2) the configured value is 0, one attempt should be made
        # (3) the configured value is > 0, then the total number attempts
        #      is (retries + 1)
        attempts = 1
        if retries >= 1:
            attempts = retries + 1
        for attempt in range(1, attempts + 1):
            instance = self.nova_api.get_server(context, instance_id)
            instance_status = instance.get('status', None)
            if instance_status == status:
                LOG.error(_("Instance id: %(id)s finished being %(st)s"), {
                    'id': instance_id,
                    'st': status
                })
                return attempt
            greenthread.sleep(CONF.block_device_allocate_retries_interval)

        if 'SHUTOFF' == status:
            LOG.error(_("Instance id: %s stop failed"), instance_id)
            raise exception.InstanceNotStop(instance_id=instance_id,
                                            seconds=int(time.time() - start),
                                            attempts=attempts)
        elif 'ACTIVE' == status:
            LOG.error(_("Instance id: %s start failed"), instance_id)
            raise exception.InstanceNotStart(instance_id=instance_id,
                                             seconds=int(time.time() - start),
                                             attempts=attempts)
        else:
            raise exception.Error(message="Instance option error.")
Ejemplo n.º 53
0
    def _delete_user(self):
        user_id = self._get_user_id()
        if user_id is None:
            return

        # the user is going away, so we want the keypair gone as well
        self._delete_keypair()

        try:
            self.keystone().delete_stack_domain_user(
                user_id=user_id, project_id=self.stack.stack_user_project_id)
        except kc_exception.NotFound:
            pass
        except ValueError:
            # FIXME(shardy): This is a legacy delete path for backwards
            # compatibility with resources created before the migration
            # to stack_user.StackUser domain users.  After an appropriate
            # transitional period, this should be removed.
            LOG.warning(_LW('Reverting to legacy user delete path'))
            try:
                self.keystone().delete_stack_user(user_id)
            except kc_exception.NotFound:
                pass
        self.data_delete('user_id')
Ejemplo n.º 54
0
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
    """Registers all standard API extensions."""

    # Walk through all the modules in our directory...
    our_dir = path[0]
    for dirpath, dirnames, filenames in os.walk(our_dir):
        # Compute the relative package name from the dirpath
        relpath = os.path.relpath(dirpath, our_dir)
        if relpath == '.':
            relpkg = ''
        else:
            relpkg = '.%s' % '.'.join(relpath.split(os.sep))

        # Now, consider each file in turn, only considering .py files
        for fname in filenames:
            root, ext = os.path.splitext(fname)

            # Skip __init__ and anything that's not .py
            if ext != '.py' or root == '__init__':
                continue

            # Try loading it
            classname = "%s%s" % (root[0].upper(), root[1:])
            classpath = ("%s%s.%s.%s" %
                         (package, relpkg, root, classname))

            if ext_list is not None and classname not in ext_list:
                logger.debug("Skipping extension: %s" % classpath)
                continue

            try:
                ext_mgr.load_extension(classpath)
            except Exception as exc:
                logger.warning(_LW('Failed to load extension %(classpath)s: '
                                   '%(exc)s'),
                               {'classpath': classpath, 'exc': exc})

        # Now, let's consider any subdirectories we may have...
        subdirs = []
        for dname in dirnames:
            # Skip it if it does not have __init__.py
            if not os.path.exists(os.path.join(dirpath, dname,
                                               '__init__.py')):
                continue

            # If it has extension(), delegate...
            ext_name = ("%s%s.%s.extension" %
                        (package, relpkg, dname))
            try:
                ext = importutils.import_class(ext_name)
            except ImportError:
                # extension() doesn't exist on it, so we'll explore
                # the directory for ourselves
                subdirs.append(dname)
            else:
                try:
                    ext(ext_mgr)
                except Exception as exc:
                    logger.warning(_LW('Failed to load extension '
                                       '%(ext_name)s: %(exc)s'),
                                   {'ext_name': ext_name, 'exc': exc})

        # Update the list of directories we'll explore...
        dirnames[:] = subdirs