def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if message: self.message = message try: self.message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_LE('Exception in string format operation')) for name, value in kwargs.items(): LOG.error(_LE("%(name)s: %(value)s") % {'name': name, 'value': value}) try: if CONF.fatal_exception_format_errors: raise e except cfg.NoSuchOptError: # Note: work around for Bug: #1447873 if CONF.oslo_versionedobjects.fatal_exception_format_errors: raise e super(MagnumException, self).__init__(self.message)
def _get_ks_client(self): kwargs = {'auth_url': self.auth_url, 'endpoint': self.auth_url} if self.context.trust_id: kwargs.update(self._get_admin_credentials()) kwargs['trust_id'] = self.context.trust_id kwargs.pop('project_name') elif self.context.auth_token_info: kwargs['token'] = self.context.auth_token if self._is_v2_valid(self.context.auth_token_info): LOG.warning('Keystone v2 is deprecated.') kwargs['auth_ref'] = self.context.auth_token_info['access'] kwargs['auth_ref']['version'] = 'v2.0' elif self._is_v3_valid(self.context.auth_token_info): kwargs['auth_ref'] = self.context.auth_token_info['token'] kwargs['auth_ref']['version'] = 'v3' else: LOG.error(_LE('Unknown version in auth_token_info')) raise exception.AuthorizationFailure() elif self.context.auth_token: kwargs['token'] = self.context.auth_token else: LOG.error(_LE('Keystone v3 API conntection failed, no password ' 'trust or auth_token')) raise exception.AuthorizationFailure() return kc_v3.Client(**kwargs)
def mkfs(fs, path, label=None): """Format a file or block device :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4' 'btrfs', etc.) :param path: Path to file or block device to format :param label: Volume label to use """ if fs == 'swap': args = ['mkswap'] else: args = ['mkfs', '-t', fs] # add -F to force no interactive execute on non-block device. if fs in ('ext3', 'ext4'): args.extend(['-F']) if label: if fs in ('msdos', 'vfat'): label_opt = '-n' else: label_opt = '-L' args.extend([label_opt, label]) args.append(path) try: execute(*args, run_as_root=True, use_standard_locale=True) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception() as ctx: if os.strerror(errno.ENOENT) in e.stderr: ctx.reraise = False LOG.exception(_LE('Failed to make file system. ' 'File system %s is not supported.'), fs) raise exception.FileSystemNotSupported(fs=fs) else: LOG.exception(_LE('Failed to create a file system ' 'in %(path)s. Error: %(error)s'), {'path': path, 'error': e})
def poll_and_check(self): # TODO(yuanying): temporary implementation to update api_address, # node_addresses and bay status stack = self.openstack_client.heat().stacks.get(self.bay.stack_id) self.attempts += 1 # poll_and_check is detached and polling long time to check status, # so another user/client can call delete bay/stack. if stack.stack_status == 'DELETE_COMPLETE': LOG.info(_LI('Bay has been deleted, stack_id: %s') % self.bay.stack_id) self.bay.destroy() raise loopingcall.LoopingCallDone() if (stack.stack_status in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']): _update_stack_outputs(self.context, stack, self.bay) self.bay.status = stack.stack_status self.bay.save() raise loopingcall.LoopingCallDone() elif stack.stack_status != self.bay.status: self.bay.status = stack.stack_status self.bay.save() if stack.stack_status == 'CREATE_FAILED': LOG.error(_LE('Unable to create bay, stack_id: %(stack_id)s, ' 'reason: %(reason)s') % {'stack_id': self.bay.stack_id, 'reason': stack.stack_status_reason}) raise loopingcall.LoopingCallDone() if stack.stack_status == 'DELETE_FAILED': LOG.error(_LE('Unable to delete bay, stack_id: %(stack_id)s, ' 'reason: %(reason)s') % {'stack_id': self.bay.stack_id, 'reason': stack.stack_status_reason}) raise loopingcall.LoopingCallDone() if stack.stack_status == 'UPDATE_FAILED': LOG.error(_LE('Unable to update bay, stack_id: %(stack_id)s, ' 'reason: %(reason)s') % {'stack_id': self.bay.stack_id, 'reason': stack.stack_status_reason}) raise loopingcall.LoopingCallDone() # only check max attempts when the stack is being created when # the timeout hasn't been set. If the timeout has been set then # the loop will end when the stack completes or the timeout occurs if stack.stack_status == 'CREATE_IN_PROGRESS': if (stack.timeout_mins is None and self.attempts > cfg.CONF.k8s_heat.max_attempts): LOG.error(_LE('Bay check exit after %(attempts)s attempts,' 'stack_id: %(id)s, stack_status: %(status)s') % {'attempts': cfg.CONF.k8s_heat.max_attempts, 'id': self.bay.stack_id, 'status': stack.stack_status}) raise loopingcall.LoopingCallDone() else: if self.attempts > cfg.CONF.k8s_heat.max_attempts: LOG.error(_LE('Bay check exit after %(attempts)s attempts,' 'stack_id: %(id)s, stack_status: %(status)s') % {'attempts': cfg.CONF.k8s_heat.max_attempts, 'id': self.bay.stack_id, 'status': stack.stack_status}) raise loopingcall.LoopingCallDone()
def _v3_client_init(self): kwargs = { 'auth_url': self.v3_endpoint, 'endpoint': self.v3_endpoint } # Note try trust_id first, as we can't reuse auth_token in that case if self.context.trust_id is not None: # We got a trust_id, so we use the admin credentials # to authenticate with the trust_id so we can use the # trust impersonating the trustor user. kwargs.update(self._service_admin_creds()) kwargs['trust_id'] = self.context.trust_id kwargs.pop('project_name') elif self.context.auth_token_info is not None: # The auth_ref version must be set according to the token version if 'access' in self.context.auth_token_info: kwargs['auth_ref'] = copy.deepcopy( self.context.auth_token_info['access']) kwargs['auth_ref']['version'] = 'v2.0' kwargs['auth_ref']['token']['id'] = self.context.auth_token elif 'token' in self.context.auth_token_info: kwargs['auth_ref'] = copy.deepcopy( self.context.auth_token_info['token']) kwargs['auth_ref']['version'] = 'v3' kwargs['auth_ref']['auth_token'] = self.context.auth_token else: LOG.error(_LE("Unknown version in auth_token_info")) raise exception.AuthorizationFailure() elif self.context.auth_token is not None: kwargs['token'] = self.context.auth_token kwargs['project_id'] = self.context.project_id else: LOG.error(_LE("Keystone v3 API connection failed, no password " "trust or auth_token!")) raise exception.AuthorizationFailure() client = kc_v3.Client(**kwargs) if 'auth_ref' not in kwargs: client.authenticate() # If we are authenticating with a trust set the context auth_token # with the trust scoped token if 'trust_id' in kwargs: # Sanity check if not client.auth_ref.trust_scoped: LOG.error(_LE("trust token re-scoping failed!")) raise exception.AuthorizationFailure() # All OK so update the context with the token self.context.auth_token = client.auth_ref.auth_token self.context.auth_url = self.v3_endpoint self.context.user = client.auth_ref.user_id self.context.project_id = client.auth_ref.project_id self.context.user_name = client.auth_ref.username return client
def wait_for_created_bay(self, bay_id, delete_on_error=True): try: utils.wait_for_condition( lambda: self.does_bay_exist(bay_id), 10, 1800) except Exception: # In error state. Clean up the bay id if desired self.LOG.error(_LE('Bay %s entered an exception state.'), bay_id) if delete_on_error: self.LOG.error(_LE('We will attempt to delete bays now.')) self.delete_bay(bay_id) self.wait_for_bay_to_delete(bay_id) raise
def wait_for_created_cluster(self, cluster_id, delete_on_error=True): try: utils.wait_for_condition( lambda: self.does_cluster_exist(cluster_id), 10, 1800) except Exception: # In error state. Clean up the cluster id if desired self.LOG.error(_LE('Cluster %s entered an exception state.'), cluster_id) if delete_on_error: self.LOG.error(_LE('We will attempt to delete clusters now.')) self.delete_cluster(cluster_id) self.wait_for_cluster_to_delete(cluster_id) raise
def _bay_failed(self, stack): LOG.error(_LE('Bay error, stack status: %(bay_status)s, ' 'stack_id: %(stack_id)s, ' 'reason: %(reason)s') % {'bay_status': stack.stack_status, 'stack_id': self.bay.stack_id, 'reason': self.bay.status_reason})
def _get_auth(self): if self.context.is_admin: try: auth = ka_loading.load_auth_from_conf_options( CONF, ksconf.CFG_GROUP) except ka_exception.MissingRequiredOptions: auth = self._get_legacy_auth() elif self.context.auth_token_info: access_info = ka_access.create(body=self.context.auth_token_info, auth_token=self.context.auth_token) auth = ka_access_plugin.AccessInfoPlugin(access_info) elif self.context.auth_token: auth = ka_v3.Token(auth_url=self.auth_url, token=self.context.auth_token) elif self.context.trust_id: auth_info = { 'auth_url': self.auth_url, 'username': self.context.user_name, 'password': self.context.password, 'user_domain_id': self.context.user_domain_id, 'user_domain_name': self.context.user_domain_name, 'trust_id': self.context.trust_id } auth = ka_v3.Password(**auth_info) else: LOG.error(_LE('Keystone API connection failed: no password, ' 'trust_id or token found.')) raise exception.AuthorizationFailure() return auth
def _get_containers_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): limit = api_utils.validate_limit(limit) sort_dir = api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Container.get_by_uuid(pecan.request.context, marker) containers = objects.Container.list(pecan.request.context, limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir) for i, c in enumerate(containers): try: containers[i] = pecan.request.rpcapi.container_show(c.uuid) except Exception as e: LOG.exception(_LE("Error while list container %(uuid)s: " "%(e)s."), {'uuid': c.uuid, 'e': e}) containers[i].status = fields.ContainerStatus.UNKNOWN return ContainerCollection.convert_with_links(containers, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir)
def get_params(self, context, cluster_template, cluster, **kwargs): osc = self.get_osc(context) extra_params = kwargs.pop('extra_params', {}) extra_params['trustee_domain_id'] = osc.keystone().trustee_domain_id extra_params['trustee_user_id'] = cluster.trustee_user_id extra_params['trustee_username'] = cluster.trustee_username extra_params['trustee_password'] = cluster.trustee_password # Only pass trust ID into the template when it is needed. if (cluster_template.volume_driver == 'rexray' or cluster_template.registry_enabled): if CONF.trust.cluster_user_trust: extra_params['trust_id'] = cluster.trust_id else: missing_setting = ('trust/cluster_user_trust = True') msg = _LE('This cluster can only be created with %s in ' 'magnum.conf') raise exception.ConfigInvalid(msg % missing_setting) else: extra_params['trust_id'] = "" extra_params['auth_url'] = context.auth_url return super(BaseTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, **kwargs)
def _get_auth(self): if self.context.is_admin: try: auth = ka_loading.load_auth_from_conf_options(CONF, CFG_GROUP) except ka_exception.MissingRequiredOptions: auth = self._get_legacy_auth() elif self.context.auth_token_info: access_info = ka_access.create(body=self.context.auth_token_info, auth_token=self.context.auth_token) auth = ka_access_plugin.AccessInfoPlugin(access_info) elif self.context.auth_token: auth = ka_v3.Token(auth_url=self.auth_url, token=self.context.auth_token) elif self.context.trust_id: auth_info = { 'auth_url': self.auth_url, 'username': self.context.user_name, 'password': self.context.password, 'user_domain_id': self.context.user_domain_id, 'user_domain_name': self.context.user_domain_name, 'trust_id': self.context.trust_id } auth = ka_v3.Password(**auth_info) else: LOG.error( _LE('Keystone API connection failed: no password, ' 'trust_id or token found.')) raise exception.AuthorizationFailure() return auth
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs and hasattr(self, 'code'): self.kwargs['code'] = self.code if message: self.message = message try: self.message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_LE('Exception in string format operation, ' 'kwargs: %s') % kwargs) try: if CONF.fatal_exception_format_errors: raise e except cfg.NoSuchOptError: # Note: work around for Bug: #1447873 if CONF.oslo_versionedobjects.fatal_exception_format_errors: raise e super(MagnumException, self).__init__(self.message)
def get_cert(cert_ref, service_name='Magnum', resource_ref=None, check_only=False, **kwargs): """Retrieves the specified cert and registers as a consumer. :param cert_ref: the UUID of the cert to retrieve :param service_name: Friendly name for the consuming service :param resource_ref: Full HATEOAS reference to the consuming resource :param check_only: Read Certificate data without registering :return: Magnum.certificates.common.Cert representation of the certificate data :raises Exception: if certificate retrieval fails """ connection = get_admin_clients().barbican() LOG.info(_LI( "Loading certificate container {0} from Barbican." ).format(cert_ref)) try: if check_only: cert_container = connection.containers.get( container_ref=cert_ref ) else: cert_container = connection.containers.register_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) return Cert(cert_container) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting {0}").format(cert_ref))
def delete_cert(cert_ref, service_name='Magnum', resource_ref=None, **kwargs): """Deregister as a consumer for the specified cert. :param cert_ref: the UUID of the cert to retrieve :param service_name: Friendly name for the consuming service :param resource_ref: Full HATEOAS reference to the consuming resource :raises Exception: if deregistration fails """ connection = get_admin_clients().barbican() LOG.info(_LI( "Deregistering as a consumer of {0} in Barbican." ).format(cert_ref)) try: connection.containers.remove_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE( "Error deregistering as a consumer of {0}" ).format(cert_ref))
def get_validate_region_name(self, region_name): if region_name is None: message = _("region_name needs to be configured in magnum.conf") raise exception.InvalidParameterValue(message) """matches the region of a public endpoint for the Keystone service.""" try: regions = self.client.regions.list() except kc_exception.NotFound: pass except Exception: LOG.exception(_LE('Failed to list regions')) raise exception.RegionsListFailed() region_list = [] for region in regions: region_list.append(region.id) if region_name not in region_list: raise exception.InvalidParameterValue( _('region_name %(region_name)s is invalid, ' 'expecting a region_name in %(region_name_list)s.') % { 'region_name': region_name, 'region_name_list': '/'.join(region_list + ['unspecified']) }) return region_name
def delete_trust(self, context, bay): if bay.trust_id is None: return # Trust can only be deleted by the user who creates it. So when # other users in the same project want to delete the bay, we need # use the trustee which can impersonate the trustor to delete the # trust. if context.user_id == bay.user_id: client = self.client else: auth = ka_v3.Password(auth_url=self.auth_url, user_id=bay.trustee_user_id, password=bay.trustee_password, trust_id=bay.trust_id) sess = ka_loading.session.Session().load_from_options( auth=auth, insecure=CONF[CFG_LEGACY_GROUP].insecure, cacert=CONF[CFG_LEGACY_GROUP].cafile, key=CONF[CFG_LEGACY_GROUP].keyfile, cert=CONF[CFG_LEGACY_GROUP].certfile) client = kc_v3.Client(session=sess) try: client.trusts.delete(bay.trust_id) except kc_exception.NotFound: pass except Exception: LOG.exception(_LE('Failed to delete trust')) raise exception.TrustDeleteFailed(trust_id=bay.trust_id)
def main(): magnum_service.prepare_service(sys.argv) LOG.info(_LI('Starting server in PID %s') % os.getpid()) LOG.debug("Configuration:") cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor') conductor_id = short_id.generate_id() endpoints = [ docker_conductor.Handler(), k8s_conductor.Handler(), bay_conductor.Handler(), conductor_listener.Handler(), ] if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path) and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)): LOG.error( _LE("The Heat template can not be found for either k8s " "atomic %(atomic_template)s or coreos " "(coreos_template)%s. Install template first if you " "want to create bay.") % { 'atomic_template': cfg.CONF.bay.k8s_atomic_template_path, 'coreos_template': cfg.CONF.bay.k8s_coreos_template_path }) server = rpc_service.Service.create(cfg.CONF.conductor.topic, conductor_id, endpoints) launcher = service.launch(cfg.CONF, server) launcher.wait()
def _actually_delete_cert(cert_ref): """Deletes the specified cert. Very dangerous. Do not recommend. :param cert_ref: the UUID of the cert to delete :raises Exception: if certificate deletion fails """ connection = get_admin_clients().barbican() LOG.info(_LI( "Recursively deleting certificate container {0} from Barbican." ).format(cert_ref)) try: certificate_container = connection.containers.get(cert_ref) certificate_container.certificate.delete() if certificate_container.intermediates: certificate_container.intermediates.delete() if certificate_container.private_key_passphrase: certificate_container.private_key_passphrase.delete() certificate_container.private_key.delete() certificate_container.delete() except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE( "Error recursively deleting certificate container {0}" ).format(cert_ref))
def get_openstack_resource(manager, resource_ident, resource_type): """Get the openstack resource from the uuid or logical name. :param manager: the resource manager class. :param resource_ident: the UUID or logical name of the resource. :param resource_type: the type of the resource :returns: The openstack resource. :raises: ResourceNotFound if the openstack resource is not exist. Conflict if multi openstack resources have same name. """ if uuidutils.is_uuid_like(resource_ident): resource_data = manager.get(resource_ident) else: filters = {'name': resource_ident} matches = list(manager.list(filters=filters)) if len(matches) == 0: raise exception.ResourceNotFound(name=resource_type, id=resource_ident) if len(matches) > 1: msg = _LE("Multiple %(resource_type)s exist with same name " "%(resource_ident)s. Please use the resource id " "instead.") % {'resource_type': resource_type, 'resource_ident': resource_ident} raise exception.Conflict(msg) resource_data = matches[0] return resource_data
def get_cert(cert_ref, service_name='Magnum', resource_ref=None, check_only=False, **kwargs): """Retrieves the specified cert and registers as a consumer. :param cert_ref: the UUID of the cert to retrieve :param service_name: Friendly name for the consuming service :param resource_ref: Full HATEOAS reference to the consuming resource :param check_only: Read Certificate data without registering :return: Magnum.certificates.common.Cert representation of the certificate data :raises Exception: if certificate retrieval fails """ connection = get_admin_clients().barbican() LOG.info(_LI( "Loading certificate container {0} from Barbican." ).format(cert_ref)) try: if check_only: cert_container = connection.containers.get( container_ref=cert_ref ) else: cert_container = connection.containers.register_consumer( container_ref=cert_ref, name=service_name, url=resource_ref ) return Cert(cert_container) except barbican_exc.HTTPClientError: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting {0}").format(cert_ref))
def _cluster_failed(self, stack): LOG.error(_LE('Cluster error, stack status: %(cluster_status)s, ' 'stack_id: %(stack_id)s, ' 'reason: %(reason)s') % {'cluster_status': stack.stack_status, 'stack_id': self.cluster.stack_id, 'reason': self.cluster.status_reason})
def main(): logging.register_options(cfg.CONF) cfg.CONF(sys.argv[1:], project='magnum') logging.setup(cfg.CONF, 'magnum') LOG.info(_LI('Starting server in PID %s') % os.getpid()) LOG.debug("Configuration:") cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor') conductor_id = short_id.generate_id() endpoints = [ docker_conductor.Handler(), k8s_conductor.Handler(), bay_conductor.Handler(), conductor_listener.Handler(), ] if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path) and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)): LOG.error(_LE("The Heat template can not be found for either k8s " "atomic %(atomic_template)s or coreos " "(coreos_template)%s. Install template first if you " "want to create bay.") % {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path, 'coreos_template': cfg.CONF.bay.k8s_coreos_template_path}) server = service.Service(cfg.CONF.conductor.topic, conductor_id, endpoints) server.serve()
def delete_cert(cert_ref, service_name='Magnum', resource_ref=None, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete :raises Exception: if certificate deletion fails """ connection = get_admin_clients().barbican() LOG.info(_LI( "Recursively deleting certificate container {0} from Barbican." ).format(cert_ref)) try: certificate_container = connection.containers.get(cert_ref) certificate_container.certificate.delete() if certificate_container.intermediates: certificate_container.intermediates.delete() if certificate_container.private_key_passphrase: certificate_container.private_key_passphrase.delete() certificate_container.private_key.delete() certificate_container.delete() except barbican_exc.HTTPClientError: with excutils.save_and_reraise_exception(): LOG.exception(_LE( "Error recursively deleting certificate container {0}" ).format(cert_ref))
def delete_cert(cert_ref, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete :raises CertificateStorageException: if certificate deletion fails """ LOG.warning(_LW( "Deleting certificate {0} from the local filesystem. " "CertManager type 'local' should be used for testing purpose." ).format(cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) try: os.remove(filename_certificate) os.remove(filename_private_key) if path.isfile(filename_intermediates): os.remove(filename_intermediates) if path.isfile(filename_pkp): os.remove(filename_pkp) except IOError as ioe: LOG.error(_LE( "Failed to delete certificate {0}." ).format(cert_ref)) raise exception.CertificateStorageException(msg=str(ioe))
def delete_cert(cert_ref, service_name='Magnum', resource_ref=None, **kwargs): """Deregister as a consumer for the specified cert. :param cert_ref: the UUID of the cert to retrieve :param service_name: Friendly name for the consuming service :param resource_ref: Full HATEOAS reference to the consuming resource :raises Exception: if deregistration fails """ connection = get_admin_clients().barbican() LOG.info( _LI("Deregistering as a consumer of {0} in Barbican.").format( cert_ref)) try: connection.containers.remove_consumer(container_ref=cert_ref, name=service_name, url=resource_ref) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Error deregistering as a consumer of {0}").format( cert_ref))
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs and hasattr(self, 'code'): self.kwargs['code'] = self.code if message: self.message = message try: self.message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception( _LE('Exception in string format operation, ' 'kwargs: %s') % kwargs) try: if CONF.fatal_exception_format_errors: raise e except cfg.NoSuchOptError: # Note: work around for Bug: #1447873 if CONF.oslo_versionedobjects.fatal_exception_format_errors: raise e super(MagnumException, self).__init__(self.message)
def container_show(self, context, container_uuid): LOG.debug("container_show %s", container_uuid) with docker_utils.docker_for_container(context, container_uuid) as docker: container = objects.Container.get_by_uuid(context, container_uuid) try: docker_id = self._find_container_by_name(docker, container_uuid) if not docker_id: LOG.exception(_LE("Can not find docker instance with %s," "set it to Error status"), container_uuid) container.status = fields.ContainerStatus.ERROR container.save() return container result = docker.inspect_container(docker_id) status = result.get('State') if status: if status.get('Error') is True: container.status = fields.ContainerStatus.ERROR elif status.get('Paused'): container.status = fields.ContainerStatus.PAUSED elif status.get('Running'): container.status = fields.ContainerStatus.RUNNING else: container.status = fields.ContainerStatus.STOPPED container.save() return container except errors.APIError as api_error: error_message = str(api_error) if '404' in error_message: container.status = fields.ContainerStatus.ERROR container.save() return container raise
def container_show(self, context, container_uuid): LOG.debug("container_show %s" % container_uuid) docker = self.get_docker_client(context, container_uuid) container = objects.Container.get_by_uuid(context, container_uuid) try: docker_id = self._find_container_by_name(docker, container_uuid) if not docker_id: LOG.exception( _LE("Can not find docker instance with %s," "set it to Error status"), container_uuid) container.status = obj_container.ERROR container.save() return container result = docker.inspect_container(docker_id) status = result.get('State') if status: if status.get('Error') is True: container.status = obj_container.ERROR elif status.get('Running'): container.status = obj_container.RUNNING elif status.get('Paused'): container.status = obj_container.PAUSED else: container.status = obj_container.STOPPED container.save() return container except errors.APIError as api_error: error_message = str(api_error) if '404' in error_message: container.status = obj_container.ERROR container.save() return container raise exception.ContainerException("Docker API Error : %s" % (error_message))
def main(): magnum_service.prepare_service(sys.argv) gmr.TextGuruMeditation.setup_autorun(version) LOG.info(_LI('Starting server in PID %s'), os.getpid()) LOG.debug("Configuration:") cfg.CONF.log_opt_values(LOG, logging.DEBUG) cfg.CONF.import_opt('topic', 'magnum.conductor.config', group='conductor') conductor_id = short_id.generate_id() endpoints = [ indirection_api.Handler(), docker_conductor.Handler(), k8s_conductor.Handler(), bay_conductor.Handler(), conductor_listener.Handler(), ca_conductor.Handler(), ] if (not os.path.isfile(cfg.CONF.bay.k8s_atomic_template_path) and not os.path.isfile(cfg.CONF.bay.k8s_coreos_template_path)): LOG.error(_LE("The Heat template can not be found for either k8s " "atomic %(atomic_template)s or coreos " "%(coreos_template)s. Install template first if you " "want to create bay.") % {'atomic_template': cfg.CONF.bay.k8s_atomic_template_path, 'coreos_template': cfg.CONF.bay.k8s_coreos_template_path}) server = rpc_service.Service.create(cfg.CONF.conductor.topic, conductor_id, endpoints, binary='magnum-conductor') launcher = service.launch(cfg.CONF, server) launcher.wait()
def load(s): try: yml_dict = yaml.safe_load(s) except yaml.YAMLError as exc: msg = _LE('An error occurred during YAML parsing.') if hasattr(exc, 'problem_mark'): msg += _LE(' Error position: ' '(%(l)s:%(c)s)') % {'l': exc.problem_mark.line + 1, 'c': exc.problem_mark.column + 1} raise ValueError(msg) if not isinstance(yml_dict, dict) and not isinstance(yml_dict, list): raise ValueError(_LE('The source is not a YAML mapping or list.')) if isinstance(yml_dict, dict) and len(yml_dict) < 1: raise ValueError(_LE('Could not find any element in your YAML ' 'mapping.')) return yml_dict
def do_copy_logs(prefix, nodes_address): if not nodes_address: return msg = _LI("copy logs from : %s") % ','.join(nodes_address) cls.LOG.info(msg) log_name = prefix + "-" + func_name for node_address in nodes_address: try: cls.LOG.debug("running %s" % full_location) cls.LOG.debug("keypair: %s" % keypair) subprocess.check_call([ full_location, node_address, coe, log_name, str(keypair) ]) except Exception: cls.LOG.error(msg) msg = (_LE("failed to copy from %(node_address)s " "to %(base_path)s%(log_name)s-" "%(node_address)s") % {'node_address': node_address, 'base_path': "/opt/stack/logs/bay-nodes/", 'log_name': log_name}) cls.LOG.exception(msg)
def _actually_delete_cert(cert_ref): """Deletes the specified cert. Very dangerous. Do not recommend. :param cert_ref: the UUID of the cert to delete :raises Exception: if certificate deletion fails """ connection = get_admin_clients().barbican() LOG.info( _LI("Recursively deleting certificate container {0} from Barbican." ).format(cert_ref)) try: certificate_container = connection.containers.get(cert_ref) certificate_container.certificate.delete() if certificate_container.intermediates: certificate_container.intermediates.delete() if certificate_container.private_key_passphrase: certificate_container.private_key_passphrase.delete() certificate_container.private_key.delete() certificate_container.delete() except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Error recursively deleting certificate container {0}" ).format(cert_ref))
def delete_cert(cert_ref, **kwargs): """Deletes the specified cert. :param cert_ref: the UUID of the cert to delete :raises CertificateStorageException: if certificate deletion fails """ LOG.warning(_LW( "Deleting certificate {0} from the local filesystem. " "CertManager type 'local' should be used for testing purpose." ).format(cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_certificate = "{0}.crt".format(filename_base) filename_private_key = "{0}.key".format(filename_base) filename_intermediates = "{0}.int".format(filename_base) filename_pkp = "{0}.pass".format(filename_base) try: os.remove(filename_certificate) os.remove(filename_private_key) if path.isfile(filename_intermediates): os.remove(filename_intermediates) if path.isfile(filename_pkp): os.remove(filename_pkp) except IOError as ioe: LOG.error(_LE( "Failed to delete certificate {0}." ).format(cert_ref)) raise exception.CertificateStorageException(msg=ioe.message)
def poll_and_check(self): # TODO(yuanying): temporary implementation to update api_address, # node_addresses and bay status stack = self.openstack_client.heat().stacks.get(self.bay.stack_id) self.attempts += 1 # poll_and_check is detached and polling long time to check status, # so another user/client can call delete bay/stack. if stack.stack_status == bay_status.DELETE_COMPLETE: self._delete_complete() raise loopingcall.LoopingCallDone() if stack.stack_status in (bay_status.CREATE_COMPLETE, bay_status.UPDATE_COMPLETE): self._create_or_update_complete(stack) raise loopingcall.LoopingCallDone() elif stack.stack_status != self.bay.status: self._sync_bay_status(stack) if stack.stack_status in (bay_status.CREATE_FAILED, bay_status.DELETE_FAILED, bay_status.UPDATE_FAILED): self._bay_failed(stack) raise loopingcall.LoopingCallDone() # only check max attempts when the stack is being created when # the timeout hasn't been set. If the timeout has been set then # the loop will end when the stack completes or the timeout occurs if stack.stack_status == bay_status.CREATE_IN_PROGRESS: if (stack.timeout_mins is None and self.attempts > cfg.CONF.bay_heat.max_attempts): LOG.error( _LE('Bay check exit after %(attempts)s attempts,' 'stack_id: %(id)s, stack_status: %(status)s') % { 'attempts': cfg.CONF.bay_heat.max_attempts, 'id': self.bay.stack_id, 'status': stack.stack_status }) raise loopingcall.LoopingCallDone() else: if self.attempts > cfg.CONF.bay_heat.max_attempts: LOG.error( _LE('Bay check exit after %(attempts)s attempts,' 'stack_id: %(id)s, stack_status: %(status)s') % { 'attempts': cfg.CONF.bay_heat.max_attempts, 'id': self.bay.stack_id, 'status': stack.stack_status }) raise loopingcall.LoopingCallDone()
def sign(csr, issuer_name, ca_key, ca_key_password=None, skip_validation=False): """Sign a given csr :param csr: certificate signing request object or pem encoded csr :param issuer_name: issuer name :param ca_key: private key of CA :param ca_key_password: private key password for given ca key :param skip_validation: skip csr validation if true :returns: generated certificate """ if not isinstance(ca_key, rsa.RSAPrivateKey): ca_key = serialization.load_pem_private_key(ca_key, password=ca_key_password, backend=default_backend()) if not isinstance(issuer_name, six.text_type): issuer_name = six.text_type(issuer_name.decode('utf-8')) if isinstance(csr, six.text_type): csr = six.b(str(csr)) if not isinstance(csr, x509.CertificateSigningRequest): try: csr = x509.load_pem_x509_csr(csr, backend=default_backend()) except ValueError: LOG.exception(_LE("Received invalid csr {0}.").format(csr)) raise exception.InvalidCsr(csr=csr) term_of_validity = cfg.CONF.x509.term_of_validity one_day = datetime.timedelta(1, 0, 0) expire_after = datetime.timedelta(term_of_validity, 0, 0) builder = x509.CertificateBuilder() builder = builder.subject_name(csr.subject) # issuer_name is set as common name builder = builder.issuer_name(x509.Name([ x509.NameAttribute(x509.OID_COMMON_NAME, issuer_name), ])) builder = builder.not_valid_before(datetime.datetime.today() - one_day) builder = builder.not_valid_after(datetime.datetime.today() + expire_after) builder = builder.serial_number(int(uuid.uuid4())) builder = builder.public_key(csr.public_key()) if skip_validation: extensions = csr.extensions else: extensions = validator.filter_extensions(csr.extensions) for extention in extensions: builder = builder.add_extension(extention.value, critical=extention.critical) certificate = builder.sign( private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend() ).public_bytes(serialization.Encoding.PEM) return certificate
def delete_trustee(self, trustee_id): try: self.domain_admin_client.users.delete(trustee_id) except kc_exception.NotFound: pass except Exception: LOG.exception(_LE('Failed to delete trustee')) raise exception.TrusteeDeleteFailed(trustee_id=trustee_id)
def _cluster_failed(self, stack): LOG.error( _LE('Cluster error, stack status: %(cluster_status)s, ' 'stack_id: %(stack_id)s, ' 'reason: %(reason)s') % { 'cluster_status': stack.stack_status, 'stack_id': self.cluster.stack_id, 'reason': self.cluster.status_reason })
def poll_and_check(self): # TODO(yuanying): temporary implementation to update api_address, # node_addresses and bay status stack = self.openstack_client.heat().stacks.get(self.bay.stack_id) self.attempts += 1 # poll_and_check is detached and polling long time to check status, # so another user/client can call delete bay/stack. if stack.stack_status == bay_status.DELETE_COMPLETE: self._delete_complete() raise loopingcall.LoopingCallDone() if stack.stack_status in (bay_status.CREATE_COMPLETE, bay_status.UPDATE_COMPLETE): self._sync_bay_and_template_status(stack) raise loopingcall.LoopingCallDone() elif stack.stack_status != self.bay.status: self._sync_bay_status(stack) if stack.stack_status in (bay_status.CREATE_FAILED, bay_status.DELETE_FAILED, bay_status.UPDATE_FAILED): self._sync_bay_and_template_status(stack) self._bay_failed(stack) raise loopingcall.LoopingCallDone() # only check max attempts when the stack is being created when # the timeout hasn't been set. If the timeout has been set then # the loop will end when the stack completes or the timeout occurs if stack.stack_status == bay_status.CREATE_IN_PROGRESS: if (stack.timeout_mins is None and self.attempts > cfg.CONF.bay_heat.max_attempts): LOG.error(_LE('Bay check exit after %(attempts)s attempts,' 'stack_id: %(id)s, stack_status: %(status)s') % {'attempts': cfg.CONF.bay_heat.max_attempts, 'id': self.bay.stack_id, 'status': stack.stack_status}) raise loopingcall.LoopingCallDone() else: if self.attempts > cfg.CONF.bay_heat.max_attempts: LOG.error(_LE('Bay check exit after %(attempts)s attempts,' 'stack_id: %(id)s, stack_status: %(status)s') % {'attempts': cfg.CONF.bay_heat.max_attempts, 'id': self.bay.stack_id, 'status': stack.stack_status}) raise loopingcall.LoopingCallDone()
def sign(csr, issuer_name, ca_key, ca_key_password=None, skip_validation=False): """Sign a given csr :param csr: certificate signing request object or pem encoded csr :param issuer_name: issuer name :param ca_key: private key of CA :param ca_key_password: private key password for given ca key :param skip_validation: skip csr validation if true :returns: generated certificate """ ca_key = _load_pem_private_key(ca_key, ca_key_password) if not isinstance(issuer_name, six.text_type): issuer_name = six.text_type(issuer_name.decode('utf-8')) if isinstance(csr, six.text_type): csr = six.b(str(csr)) if not isinstance(csr, x509.CertificateSigningRequest): try: csr = x509.load_pem_x509_csr(csr, backend=default_backend()) except ValueError: LOG.exception(_LE("Received invalid csr {0}.").format(csr)) raise exception.InvalidCsr(csr=csr) term_of_validity = CONF.x509.term_of_validity one_day = datetime.timedelta(1, 0, 0) expire_after = datetime.timedelta(term_of_validity, 0, 0) builder = x509.CertificateBuilder() builder = builder.subject_name(csr.subject) # issuer_name is set as common name builder = builder.issuer_name(x509.Name([ x509.NameAttribute(x509.OID_COMMON_NAME, issuer_name), ])) builder = builder.not_valid_before(datetime.datetime.today() - one_day) builder = builder.not_valid_after(datetime.datetime.today() + expire_after) builder = builder.serial_number(int(uuid.uuid4())) builder = builder.public_key(csr.public_key()) if skip_validation: extensions = csr.extensions else: extensions = validator.filter_extensions(csr.extensions) for extention in extensions: builder = builder.add_extension(extention.value, critical=extention.critical) certificate = builder.sign( private_key=ca_key, algorithm=hashes.SHA256(), backend=default_backend() ).public_bytes(serialization.Encoding.PEM) return certificate
def create_trustee(self, username, password, domain_id): try: user = self.domain_admin_client.users.create(name=username, password=password, domain=domain_id) except Exception: LOG.exception(_LE('Failed to create trustee')) raise exception.TrusteeCreateFailed(username=username, domain_id=domain_id) return user
def tempdir(**kwargs): tempfile.tempdir = CONF.tempdir tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.error(_LE('Could not remove tmpdir: %s'), e)
def delete_trust(self, trust_id): if trust_id is None: return try: self.client.trusts.delete(trust_id) except kc_exception.NotFound: pass except Exception: LOG.exception(_LE('Failed to delete trust')) raise exception.TrustDeleteFailed(trust_id=trust_id)
def load(s): try: yml_dict = yaml.safe_load(s) except yaml.YAMLError as exc: msg = _LE('An error occurred during YAML parsing.') if hasattr(exc, 'problem_mark'): msg += _LE(' Error position: ' '(%(l)s:%(c)s)') % { 'l': exc.problem_mark.line + 1, 'c': exc.problem_mark.column + 1 } raise ValueError(msg) if not isinstance(yml_dict, dict) and not isinstance(yml_dict, list): raise ValueError(_LE('The source is not a YAML mapping or list.')) if isinstance(yml_dict, dict) and len(yml_dict) < 1: raise ValueError( _LE('Could not find any element in your YAML ' 'mapping.')) return yml_dict
def admin_client(self): if not self._admin_client: # Create admin client connection to v3 API admin_creds = self._service_admin_creds() c = kc_v3.Client(**admin_creds) if c.authenticate(): self._admin_client = c else: LOG.error(_LE("Admin client authentication failed")) raise exception.AuthorizationFailure() return self._admin_client
def trustee_domain_id(self): if not self._trustee_domain_id: try: access = self.domain_admin_auth.get_access( self.domain_admin_session) except kc_exception.Unauthorized: LOG.error(_LE("Keystone client authentication failed")) raise exception.AuthorizationFailure() self._trustee_domain_id = access.domain_id return self._trustee_domain_id
def wrapped(self, context, *args, **kwargs): try: return f(self, context, *args, **kwargs) except Exception as e: container_uuid = kwargs.get('container_uuid') if container_uuid is not None: LOG.exception(_LE("Error while connect to docker " "container %(name)s: %(error)s"), {'name': container_uuid, 'error': str(e)}) raise exception.ContainerException( "Docker internal Error: %s" % str(e))
def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception( _('ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type')] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) if (req.accept.best_match(['application/json', 'application/xml' ]) == 'application/xml'): try: # simple check xml is valid body = [ et.ElementTree.tostring( et.ElementTree.fromstring('<error_message>' + '\n'.join(app_iter) + '</error_message>')) ] except et.ElementTree.ParseError as err: LOG.error(_LE('Error parsing HTTP response: %s'), err) body = [ '<error_message>%s' % state['status_code'] + '</error_message>' ] state['headers'].append(('Content-Type', 'application/xml')) else: body = [json.dumps({'error_message': '\n'.join(app_iter)})] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body
def process_bind_param(self, value, dialect): if value is None: # Save default value according to current type to keep the # interface the consistent. value = self.type() elif not isinstance(value, self.type): raise TypeError(_LE("%(class)s supposes to store " "%(type)s objects, but %(value)s " "given") % {'class': self.__class__.__name__, 'type': self.type.__name__, 'value': type(value).__name__}) serialized_value = json.dumps(value) return serialized_value
def _create_temp_file_with_content(self, content): """Creates temp file and write content to the file. :param content: file content :returns: temp file """ try: tmp = tempfile.NamedTemporaryFile(delete=True) tmp.write(content) tmp.flush() except Exception as err: LOG.error(_LE("Error while creating temp file: %s"), err) raise return tmp
def wrapped(self, context, *args, **kwargs): try: return f(self, context, *args, **kwargs) except Exception as e: container_uuid = None if 'container_uuid' in kwargs: container_uuid = kwargs.get('container_uuid') elif 'container' in kwargs: container_uuid = kwargs.get('container').uuid LOG.exception(_LE("Error while connect to docker " "container %s"), container_uuid) raise exception.ContainerException("Docker internal Error: %s" % str(e))