def startup_sanity_check(): if (not cfg.CONF.stack_user_domain_id and not cfg.CONF.stack_user_domain_name): # FIXME(shardy): Legacy fallback for folks using old heat.conf # files which lack domain configuration LOG.warning('stack_user_domain_id or stack_user_domain_name not ' 'set in heat.conf falling back to using default') else: domain_admin_user = cfg.CONF.stack_domain_admin domain_admin_password = cfg.CONF.stack_domain_admin_password if not (domain_admin_user and domain_admin_password): raise exception.Error( _('heat.conf misconfigured, cannot ' 'specify "stack_user_domain_id" or ' '"stack_user_domain_name" without ' '"stack_domain_admin" and ' '"stack_domain_admin_password"')) auth_key_len = len(cfg.CONF.auth_encryption_key) if auth_key_len in (16, 24): LOG.warning('Please update auth_encryption_key to be 32 characters.') elif auth_key_len != 32: raise exception.Error( _('heat.conf misconfigured, auth_encryption_key ' 'must be 32 characters'))
def suspend(self): ''' Suspend the resource. Subclasses should provide a handle_suspend() method to implement suspend ''' action = self.SUSPEND # Don't try to suspend the resource unless it's in a stable state if (self.action == self.DELETE or self.status != self.COMPLETE): exc = exception.Error( _('State %s invalid for suspend') % str(self.state)) raise exception.ResourceFailure(exc, self, action) LOG.info(_('suspending %s') % str(self)) return self._do_action(action)
def _backup_restore(self, vol_id, backup_id): try: self.client().restores.restore(backup_id, vol_id) except Exception as ex: if self.client_plugin().is_client_exception(ex): raise exception.Error( _("Failed to restore volume %(vol)s from backup %(backup)s " "- %(err)s") % { 'vol': vol_id, 'backup': backup_id, 'err': ex }) else: raise return True
def resume(self): ''' Resume the resource. Subclasses should provide a handle_resume() method to implement resume ''' action = self.RESUME # Can't resume a resource unless it's SUSPEND_COMPLETE if self.state != (self.SUSPEND, self.COMPLETE): exc = exception.Error(_('State %s invalid for resume') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) LOG.info(_LI('resuming %s'), six.text_type(self)) return self._do_action(action)
def _verify_check_conditions(self, checks): def valid(check): if isinstance(check['expected'], list): return check['current'] in check['expected'] else: return check['current'] == check['expected'] msg = _("'%(attr)s': expected '%(expected)s', got '%(current)s'") invalid_checks = [ msg % check for check in checks if not valid(check) ] if invalid_checks: raise exception.Error('; '.join(invalid_checks))
def _stub_resume(self, cookies=None, with_error=None): cookies = cookies or [] self.m.StubOutWithMock(instance.Instance, 'handle_resume') self.m.StubOutWithMock(instance.Instance, 'check_resume_complete') if with_error: instance.Instance.handle_resume().AndRaise( exception.Error(with_error)) return inst_cookies = cookies or [(object(), object(), object())] for cookie in inst_cookies: instance.Instance.handle_resume().InAnyOrder().AndReturn(cookie) instance.Instance.check_resume_complete( cookie).InAnyOrder().AndReturn(False) instance.Instance.check_resume_complete( cookie).InAnyOrder().AndReturn(True)
def check_resize(server, flavor, flavor_id): """ Verify that a resizing server is properly resized. If that's the case, confirm the resize, if not raise an error. """ refresh_server(server) while server.status == 'RESIZE': yield refresh_server(server) if server.status == 'VERIFY_RESIZE': server.confirm_resize() else: raise exception.Error( _("Resizing to '%(flavor)s' failed, status '%(status)s'") % dict(flavor=flavor, status=server.status))
def attach_volume(self, server_id, volume_id, device): try: va = self.client().volumes.create_server_volume( server_id=server_id, volume_id=volume_id, device=device) except Exception as ex: if self.is_client_exception(ex): raise exception.Error( _("Failed to attach volume %(vol)s to server %(srv)s " "- %(err)s") % { 'vol': volume_id, 'srv': server_id, 'err': ex }) else: raise return va.id
def check_active(self, create_data=None): if self._server_status == 'ACTIVE': return True server = self.nova().servers.get(self.resource_id) self._server_status = server.status if server.status == 'BUILD': return False if server.status == 'ACTIVE': self._set_ipaddress(server.networks) self.attach_volumes() return True else: raise exception.Error('%s instance[%s] status[%s]' % ('nova reported unexpected', self.name, server.status))
def _stub_create(self, num, with_error=None): self.m.StubOutWithMock(instance.Instance, 'handle_create') self.m.StubOutWithMock(instance.Instance, 'check_create_complete') self.stub_ImageConstraint_validate() self.stub_FlavorConstraint_validate() self.stub_SnapshotConstraint_validate() if with_error: instance.Instance.handle_create().AndRaise( exception.Error(with_error)) return cookie = object() for x in range(num): instance.Instance.handle_create().AndReturn(cookie) instance.Instance.check_create_complete(cookie).AndReturn(False) instance.Instance.check_create_complete( cookie).MultipleTimes().AndReturn(True)
def get_volume_api_version(self): '''Returns the most recent API version.''' self.interface = self._get_client_option(CLIENT_NAME, 'endpoint_type') try: self.context.keystone_session.get_endpoint( service_type=self.VOLUME_V3, interface=self.interface) self.service_type = self.VOLUME_V3 self.client_version = '3' except ks_exceptions.EndpointNotFound: try: self.context.keystone_session.get_endpoint( service_type=self.VOLUME_V2, interface=self.interface) self.service_type = self.VOLUME_V2 self.client_version = '2' except ks_exceptions.EndpointNotFound: raise exception.Error(_('No volume service available.'))
def test_scaleup_failure(self, mock_error, mock_info, mock_send): self.mock_stack_except_for_group() group = self.create_autoscaling_stack_and_get_group() err_message = 'Boooom' m_as = self.patchobject(aws_asg.AutoScalingGroup, 'resize') m_as.side_effect = exception.Error(err_message) info, error = self.expected_notifs_calls(group, adjust=2, start_capacity=1, with_error=err_message) self.assertRaises(exception.Error, group.adjust, 2) self.assertEqual(1, grouputils.get_size(group)) mock_error.assert_has_calls([error]) mock_info.assert_has_calls([info])
def _delete(self, backup=False): if self.resource_id is not None: try: vol = self.cinder().volumes.get(self.resource_id) if backup: scheduler.TaskRunner(self._backup)() vol.get() if vol.status == 'in-use': logger.warn('cant delete volume when in-use') raise exception.Error("Volume in use") self.cinder().volumes.delete(self.resource_id) except clients.cinderclient.exceptions.NotFound: pass
def stack_domain_user_token(self, user_id, project_id, password): """Get a token for a stack domain user.""" if not self.stack_domain: # Note, no legacy fallback path as we don't want to deploy # tokens for non stack-domain users inside instances msg = _('Cannot get stack domain user token, no stack domain id ' 'configured, please fix your heat.conf') raise exception.Error(msg) # Create a keystoneclient session, then request a token with no # catalog (the token is expected to be used inside an instance # where a specific endpoint will be specified, and user-data # space is limited..) sess = session.Session.construct(self._ssl_options()) # Note we do this directly via a post as there's currently # no way to get a nocatalog token via keystoneclient token_url = "%s/auth/tokens?nocatalog" % self.v3_endpoint headers = {'Accept': 'application/json'} if self._stack_domain_is_id: domain = {'id': self.stack_domain} else: domain = {'name': self.stack_domain} body = { 'auth': { 'scope': { 'project': { 'id': project_id } }, 'identity': { 'password': { 'user': { 'domain': domain, 'password': password, 'id': user_id } }, 'methods': ['password'] } } } t = sess.post(token_url, headers=headers, json=body, authenticated=False) return t.headers['X-Subject-Token']
def create_stack_domain_user(self, username, project_id, password=None): """Create a domain user defined as part of a stack. The user is defined either via template or created internally by a resource. This user will be added to the heat_stack_user_role as defined in the config, and created in the specified project (which is expected to be in the stack_domain). Returns the keystone ID of the resulting user. """ if not self.stack_domain: # FIXME(shardy): Legacy fallback for folks using old heat.conf # files which lack domain configuration return self.create_stack_user(username=username, password=password) # We add the new user to a special keystone role # This role is designed to allow easier differentiation of the # heat-generated "stack users" which will generally have credentials # deployed on an instance (hence are implicitly untrusted) stack_user_role = self.domain_admin_client.roles.list( name=cfg.CONF.heat_stack_user_role) if len(stack_user_role) == 1: role_id = stack_user_role[0].id # Create user user = self.domain_admin_client.users.create( name=self._get_username(username), password=password, default_project=project_id, domain=self.stack_domain_id) # Add to stack user role LOG.debug("Adding user %(user)s to role %(role)s" % { 'user': user.id, 'role': role_id }) self.domain_admin_client.roles.grant(role=role_id, user=user.id, project=project_id) else: LOG.error( _LE("Failed to add user %(user)s to role %(role)s, " "check role exists!"), { 'user': username, 'role': cfg.CONF.heat_stack_user_role }) raise exception.Error( _("Can't find role %s") % cfg.CONF.heat_stack_user_role) return user.id
def _create(self): con = self.context volume_api_version = self.get_volume_api_version() if volume_api_version == 1: service_type = self.VOLUME client_version = '1' elif volume_api_version == 2: service_type = self.VOLUME_V2 client_version = '2' else: raise exception.Error(_('No volume service available.')) LOG.info(_LI('Creating Cinder client with volume API version %d.'), volume_api_version) endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type') args = { 'service_type': service_type, 'auth_url': con.auth_url or '', 'project_id': con.tenant_id, 'username': None, 'api_key': None, 'endpoint_type': endpoint_type, 'http_log_debug': self._get_client_option(CLIENT_NAME, 'http_log_debug'), 'cacert': self._get_client_option(CLIENT_NAME, 'ca_file'), 'insecure': self._get_client_option(CLIENT_NAME, 'insecure') } client = cc.Client(client_version, **args) management_url = self.url_for(service_type=service_type, endpoint_type=endpoint_type) client.client.auth_token = self.auth_token client.client.management_url = management_url client.volume_api_version = volume_api_version return client
def handle_create(self): router_id = self.properties.get(self.ROUTER_ID) routes = self.client().show_router( router_id).get('router').get('routes') if not routes: routes = [] new_route = {'destination': self.properties[self.DESTINATION], 'nexthop': self.properties[self.NEXTHOP]} if new_route in routes: msg = _('Route duplicates an existing route.') raise exception.Error(msg) routes.append(new_route) self.client().update_router(router_id, {'router': {'routes': routes}}) new_route['router_id'] = router_id self.resource_id_set( '%(router_id)s:%(destination)s:%(nexthop)s' % new_route)
def create_with_template(self, child_template): ''' Handle the creation of the nested stack from a given JSON template. ''' template = parser.Template(child_template) params = parser.Parameters(self.physical_resource_name(), template, self._params()) self._nested = parser.Stack(self.context, self.physical_resource_name(), template, params) nested_id = self._nested.store(self.stack) self.resource_id_set(nested_id) self._nested.create() if self._nested.state != self._nested.CREATE_COMPLETE: raise exception.Error(self._nested.state_description)
def handle_create(self): client = self.quantum() ext_filter = {'router:external': True} ext_nets = client.list_networks(**ext_filter)['networks'] if len(ext_nets) != 1: # TODO sbaker if there is more than one external network # add a heat configuration variable to set the ID of # the default one raise exception.Error( 'Expected 1 external network, found %d' % len(ext_nets)) external_network_id = ext_nets[0]['id'] md = { 'external_network_id': external_network_id } self.metadata = md
def check_resize(server, flavor, flavor_id): """ Verify that a resizing server is properly resized. If that's the case, confirm the resize, if not raise an error. """ warnings.warn('nova_utils.check_resize is deprecated. ' 'Use self.client_plugin("nova").check_resize') refresh_server(server) while server.status == 'RESIZE': yield refresh_server(server) if server.status == 'VERIFY_RESIZE': server.confirm_resize() else: raise exception.Error( _("Resizing to '%(flavor)s' failed, status '%(status)s'") % dict(flavor=flavor, status=server.status))
def _check_complete(sd): if not sd: return True # NOTE(dprince): when lazy loading the sd attributes # we need to support multiple versions of heatclient if hasattr(sd, 'get'): sd.get() else: sd._get() if sd.status == SoftwareDeployment.COMPLETE: return True elif sd.status == SoftwareDeployment.FAILED: message = _("Deployment to server " "failed: %s") % sd.status_reason LOG.error(message) exc = exception.Error(message) raise exc
def update_with_template(self, child_template, user_params, timeout_mins=None): """Update the nested stack with the new template.""" nested_stack = self.nested() if nested_stack is None: raise exception.Error(_('Cannot update %s, stack not created') % self.name) name = self.physical_resource_name() stack = self._parse_nested_stack(name, child_template, user_params, timeout_mins) stack.validate() stack.parameters.set_stack_id(nested_stack.identifier()) nested_stack.updated_time = self.updated_time updater = scheduler.TaskRunner(nested_stack.update_task, stack) updater.start() return updater
def _delete_volume(self): try: cinder = self.client() vol = cinder.volumes.get(self.resource_id) if vol.status == 'in-use': raise exception.Error(_('Volume in use')) # if the volume is already in deleting status, # just wait for the deletion to complete if vol.status != 'deleting': cinder.volumes.delete(self.resource_id) else: return True except Exception as ex: self.client_plugin().ignore_not_found(ex) return True else: return False
def attach_volume_to_instance(self, server_id, volume_id, device_id): logger.warn('Attaching InstanceId %s VolumeId %s Device %s' % (server_id, volume_id, device_id)) va = self.nova().volumes.create_server_volume( server_id=server_id, volume_id=volume_id, device=device_id) vol = self.cinder().volumes.get(va.id) while vol.status == 'available' or vol.status == 'attaching': eventlet.sleep(1) vol.get() if vol.status == 'in-use': return va.id else: raise exception.Error(vol.status)
def check_resize(self, server_id, flavor_id, flavor): """Verify that a resizing server is properly resized. If that's the case, confirm the resize, if not raise an error. """ server = self.fetch_server(server_id) # resize operation is asynchronous so the server resize may not start # when checking server status (the server may stay ACTIVE instead # of RESIZE). if not server or server.status in ('RESIZE', 'ACTIVE'): return False if server.status == 'VERIFY_RESIZE': return True else: raise exception.Error( _("Resizing to '%(flavor)s' failed, status '%(status)s'") % dict(flavor=flavor, status=server.status))
def update_with_template(self, child_template, user_params, timeout_mins=None): """Update the nested stack with the new template.""" template = parser.Template(child_template, files=self.stack.t.files) # Note that there is no call to self._outputs_to_attribs here. # If we have a use case for updating attributes of the resource based # on updated templates we should make sure it's optional because not # all subclasses want that behavior, since they may offer custom # attributes. nested_stack = self.nested() if nested_stack is None: raise exception.Error( _('Cannot update %s, stack not created') % self.name) res_diff = (len(template[template.RESOURCES]) - len(nested_stack.resources)) new_size = nested_stack.root_stack.total_resources() + res_diff if new_size > cfg.CONF.max_resources_per_stack: raise exception.RequestLimitExceeded( message=exception.StackResourceLimitExceeded.msg_fmt) if timeout_mins is None: timeout_mins = self.stack.timeout_mins # Note we disable rollback for nested stacks, since they # should be rolled back by the parent stack on failure stack = parser.Stack(self.context, self.physical_resource_name(), template, self._nested_environment(user_params), timeout_mins=timeout_mins, disable_rollback=True, parent_resource=self, owner_id=self.stack.id) stack.parameters.set_stack_id(nested_stack.identifier()) stack.validate() if not hasattr(type(self), 'attributes_schema'): self.attributes = None self._outputs_to_attribs(child_template) updater = scheduler.TaskRunner(nested_stack.update_task, stack) updater.start() return updater
def update_with_template(self, child_template, user_params, timeout_mins=None): """Update the nested stack with the new template.""" if isinstance(child_template, parser.Template): template = child_template template.files = self.stack.t.files else: template = parser.Template(child_template, files=self.stack.t.files) nested_stack = self.nested() if nested_stack is None: raise exception.Error(_('Cannot update %s, stack not created') % self.name) res_diff = ( len(template[template.RESOURCES]) - len(nested_stack.resources)) new_size = nested_stack.root_stack.total_resources() + res_diff if new_size > cfg.CONF.max_resources_per_stack: raise exception.RequestLimitExceeded( message=exception.StackResourceLimitExceeded.msg_fmt) if timeout_mins is None: timeout_mins = self.stack.timeout_mins # Note we disable rollback for nested stacks, since they # should be rolled back by the parent stack on failure stack = parser.Stack(self.context, self.physical_resource_name(), template, self._nested_environment(user_params), timeout_mins=timeout_mins, disable_rollback=True, parent_resource=self, owner_id=self.stack.id) stack.parameters.set_stack_id(nested_stack.identifier()) stack.validate() # Don't overwrite the attributes_schema on update for subclasses that # define their own attributes_schema. if not hasattr(type(self), 'attributes_schema'): self.attributes = None self._outputs_to_attribs(template) updater = scheduler.TaskRunner(nested_stack.update_task, stack) updater.start() return updater
def handle_create(self): server_id = self.properties['InstanceId'] volume_id = self.properties['VolumeId'] logger.warn('Attaching InstanceId %s VolumeId %s Device %s' % (server_id, volume_id, self.properties['Device'])) volapi = self.nova().volumes va = volapi.create_server_volume(server_id=server_id, volume_id=volume_id, device=self.properties['Device']) vol = self.nova('volume').volumes.get(va.id) while vol.status == 'available' or vol.status == 'attaching': eventlet.sleep(1) vol.get() if vol.status == 'in-use': self.resource_id_set(va.id) else: raise exception.Error(vol.status)
def check_create_complete(self, instance): ''' Check if cloud DB instance creation is complete. ''' self._refresh_instance(instance) if instance.status == 'ERROR': raise exception.Error(_("Database instance creation failed.")) if instance.status != 'ACTIVE': return False msg = _("Database instance %(database)s created (flavor:%(flavor)s, " "volume:%(volume)s)") logger.info(msg % ({'database': self.dbinstancename, 'flavor': self.flavor, 'volume': self.volume})) return True
def startup_sanity_check(): if (not cfg.CONF.stack_user_domain_id and not cfg.CONF.stack_user_domain_name): # FIXME(shardy): Legacy fallback for folks using old heat.conf # files which lack domain configuration LOG.warn( _LW('stack_user_domain_id or stack_user_domain_name not ' 'set in heat.conf falling back to using default')) else: domain_admin_user = cfg.CONF.stack_domain_admin domain_admin_password = cfg.CONF.stack_domain_admin_password if not (domain_admin_user and domain_admin_password): raise exception.Error( _('heat.conf misconfigured, cannot ' 'specify "stack_user_domain_id" or ' '"stack_user_domain_name" without ' '"stack_domain_admin" and ' '"stack_domain_admin_password"'))