def result(self): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ result ")) strings = function.resolve(self._strings) if strings is None: strings = [] if (isinstance(strings, six.string_types) or not isinstance(strings, collections.Sequence)): raise TypeError(_('"%s" must operate on a list') % self.fn_name) delim = function.resolve(self._delim) if not isinstance(delim, six.string_types): raise TypeError(_('"%s" delimiter must be a string') % self.fn_name) def ensure_string(s): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ ensure_string ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ ensure_string ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ ensure_string ")) if s is None: return '' if not isinstance(s, six.string_types): raise TypeError( _('Items to join must be strings %s') % (repr(s)[:200])) return s return delim.join(ensure_string(s) for s in strings)
def get_image_id_by_name(self, image_identifier): ''' Return an id for the specified image name. :param image_identifier: image name :returns: the id of the requested :image_identifier: :raises: exception.ImageNotFound, exception.PhysicalResourceNameAmbiguity ''' try: filters = {'name': image_identifier} image_list = self.client().images.find(**filters) except sahara_base.APIException as ex: raise exception.Error( _("Error retrieving image list from sahara: " "%s") % six.text_type(ex)) num_matches = len(image_list) if num_matches == 0: LOG.info(_LI("Image %s was not found in sahara images"), image_identifier) raise exception.ImageNotFound(image_name=image_identifier) elif num_matches > 1: LOG.info(_LI("Multiple images %s were found in sahara with name"), image_identifier) raise exception.PhysicalResourceNameAmbiguity( name=image_identifier) else: return image_list[0].id
def _secret_accesskey(self): """Return the user's access key. Fetching it from keystone if necessary. """ if self._secret is None: if not self.resource_id: LOG.info(_LI('could not get secret for %(username)s ' 'Error:%(msg)s'), {'username': self.properties[self.USER_NAME], 'msg': "resource_id not yet set"}) else: # First try to retrieve the secret from resource_data, but # for backwards compatibility, fall back to requesting from # keystone self._secret = self.data().get('secret_key') if self._secret is None: try: user_id = self._get_user().resource_id kp = self.keystone().get_ec2_keypair( user_id=user_id, access=self.resource_id) self._secret = kp.secret # Store the key in resource_data self.data_set('secret_key', kp.secret, redact=True) # And the ID of the v3 credential self.data_set('credential_id', kp.id, redact=True) except Exception as ex: LOG.info(_LI('could not get secret for %(username)s ' 'Error:%(msg)s'), { 'username': self.properties[self.USER_NAME], 'msg': ex}) return self._secret or '000-000-000'
def run_child(self): def child_hup(*args): """Shuts down child processes, existing requests are handled.""" signal.signal(signal.SIGHUP, signal.SIG_IGN) eventlet.wsgi.is_accepting = False self.sock.close() pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, child_hup) signal.signal(signal.SIGTERM, signal.SIG_DFL) # ignore the interrupt signal to avoid a race whereby # a child worker receives the signal before the parent # and is respawned unnecessarily as a result signal.signal(signal.SIGINT, signal.SIG_IGN) # The child has no need to stash the unwrapped # socket, and the reference prevents a clean # exit on sighup self._sock = None self.run_server() LOG.info(_LI('Child %d exiting normally'), os.getpid()) # self.pool.waitall() is now called in wsgi's server so # it's safe to exit here sys.exit(0) else: LOG.info(_LI('Started child %s'), pid) self.children.add(pid)
def handle_create(self): """Allocate a floating IP for the current tenant.""" ips = None if self.properties[self.DOMAIN]: ext_net = internet_gateway.InternetGateway.get_external_network_id( self.neutron()) props = {'floating_network_id': ext_net} ips = self.neutron().create_floatingip({ 'floatingip': props})['floatingip'] self.ipaddress = ips['floating_ip_address'] self.resource_id_set(ips['id']) LOG.info(_LI('ElasticIp create %s'), str(ips)) else: try: ips = self.nova().floating_ips.create() except Exception as e: with excutils.save_and_reraise_exception(): if self.client_plugin('nova').is_not_found(e): LOG.error(_LE("No default floating IP pool configured." " Set 'default_floating_pool' in " "nova.conf.")) if ips: self.ipaddress = ips.ip self.resource_id_set(ips.id) LOG.info(_LI('ElasticIp create %s'), str(ips)) instance_id = self.properties[self.INSTANCE_ID] if instance_id: server = self.nova().servers.get(instance_id) server.add_floating_ip(self._ipaddress())
def handle_signal(self, details=None): if self.action in (self.SUSPEND, self.DELETE): msg = _('Cannot signal resource during %s') % self.action raise Exception(msg) if details is None: alarm_state = 'alarm' else: alarm_state = details.get('state', 'alarm').lower() LOG.info(_LI('%(name)s Alarm, new state %(state)s'), {'name': self.name, 'state': alarm_state}) if alarm_state != 'alarm': return target_id = self.properties[self.INSTANCE_ID] victim = self.stack.resource_by_refid(target_id) if victim is None: LOG.info(_LI('%(name)s Alarm, can not find instance ' '%(instance)s'), {'name': self.name, 'instance': target_id}) return LOG.info(_LI('%(name)s Alarm, restarting resource: %(victim)s'), {'name': self.name, 'victim': victim.name}) self.stack.restart_resource(victim.name)
def __init__(self, stack, fn_name, args): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ __init__ ")) super(GetAtt, self).__init__(stack, fn_name, args) self._resource_name, self._attribute = self._parse_args()
def __init__(self, stack, fn_name, args): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class ParamRef/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class ParamRef/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class ParamRef/ __init__ ")) super(ParamRef, self).__init__(stack, fn_name, args) self.parameters = self.stack.parameters
def get_image_id_by_name(self, image_identifier): ''' Return an id for the specified image name. :param image_identifier: image name :returns: the id of the requested :image_identifier: :raises: exception.EntityNotFound, exception.PhysicalResourceNameAmbiguity ''' try: filters = {'name': image_identifier} image_list = list(self.client().images.list(filters=filters)) except exc.ClientException as ex: raise exception.Error( _("Error retrieving image list from glance: %s") % ex) num_matches = len(image_list) if num_matches == 0: LOG.info(_LI("Image %s was not found in glance"), image_identifier) raise exception.EntityNotFound(entity='Image', name=image_identifier) elif num_matches > 1: LOG.info(_LI("Multiple images %s were found in glance with name"), image_identifier) raise exception.PhysicalResourceNameAmbiguity( name=image_identifier) else: return image_list[0].id
def _wait(self): while True: try: yield except scheduler.Timeout: count = self.properties.get(self.COUNT) raise SwiftSignalTimeout(self) count = self.properties.get(self.COUNT) statuses = self.get_status() if not statuses: continue for status in statuses: if status == self.STATUS_FAILURE: failure = SwiftSignalFailure(self) LOG.info(_LI('%(name)s Failed (%(failure)s)'), {'name': str(self), 'failure': str(failure)}) raise failure elif status != self.STATUS_SUCCESS: raise exception.Error(_("Unknown status: %s") % status) if len(statuses) >= count: LOG.info(_LI("%s Succeeded"), str(self)) return
def _delete_credentials(self, stack_status, reason, abandon): # Cleanup stored user_creds so they aren't accessible via # the soft-deleted stack which remains in the DB # The stack_status and reason passed in are current values, which # may get rewritten and returned from this method if self.user_creds_id: user_creds = db_api.user_creds_get(self.user_creds_id) # If we created a trust, delete it if user_creds is not None: trust_id = user_creds.get('trust_id') if trust_id: try: # If the trustor doesn't match the context user the # we have to use the stored context to cleanup the # trust, as although the user evidently has # permission to delete the stack, they don't have # rights to delete the trust unless an admin trustor_id = user_creds.get('trustor_user_id') if self.context.user_id != trustor_id: LOG.debug('Context user_id doesn\'t match ' 'trustor, using stored context') sc = self.stored_context() sc.clients.client('keystone').delete_trust( trust_id) else: self.clients.client('keystone').delete_trust( trust_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = ("Error deleting trust: %s" % six.text_type(ex)) # Delete the stored credentials try: db_api.user_creds_delete(self.context, self.user_creds_id) except exception.NotFound: LOG.info(_LI("Tried to delete user_creds that do not exist " "(stack=%(stack)s user_creds_id=%(uc)s)"), {'stack': self.id, 'uc': self.user_creds_id}) try: self.user_creds_id = None self.store() except exception.NotFound: LOG.info(_LI("Tried to store a stack that does not exist %s"), self.id) # If the stack has a domain project, delete it if self.stack_user_project_id and not abandon: try: keystone = self.clients.client('keystone') keystone.delete_stack_domain_project( project_id=self.stack_user_project_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = "Error deleting project: %s" % six.text_type(ex) return stack_status, reason
def result(self): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ result ")) template = function.resolve(self._string) mapping = function.resolve(self._mapping) if not isinstance(template, six.string_types): raise TypeError(_('"%s" template must be a string') % self.fn_name) if not isinstance(mapping, collections.Mapping): raise TypeError(_('"%s" params must be a map') % self.fn_name) def replace(string, change): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ replace ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ replace ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ replace ")) placeholder, value = change if not isinstance(placeholder, six.string_types): raise TypeError(_('"%s" param placeholders must be strings') % self.fn_name) if value is None: value = '' if not isinstance(value, (six.string_types, six.integer_types, float, bool)): raise TypeError(_('"%s" params must be strings or numbers') % self.fn_name) return string.replace(placeholder, unicode(value)) return reduce(replace, six.iteritems(mapping), template)
def __call__(self): LOG.debug(str(self)) cinder = self.clients.client('cinder').volumes vol = cinder.get(self.volume_id) try: cinder.extend(self.volume_id, self.size) except Exception as ex: if self.clients.client_plugin('cinder').is_client_exception(ex): raise exception.Error(_( "Failed to extend volume %(vol)s - %(err)s") % { 'vol': vol.id, 'err': ex}) else: raise yield vol = cinder.get(self.volume_id) while vol.status == 'extending': LOG.debug("Volume %s is being extended" % self.volume_id) yield vol = cinder.get(self.volume_id) if vol.status != 'available': LOG.info(_LI("Resize failed: Volume %(vol)s is in %(status)s " "state."), {'vol': vol.id, 'status': vol.status}) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume resize failed')) LOG.info(_LI('%s - complete'), str(self))
def __call__(self): """Return a co-routine which runs the task.""" LOG.debug(str(self)) va = self.clients.client('nova').volumes.create_server_volume( server_id=self.server_id, volume_id=self.volume_id, device=self.device) self.attachment_id = va.id yield cinder = self.clients.client('cinder') vol = cinder.volumes.get(self.volume_id) while vol.status == 'available' or vol.status == 'attaching': LOG.debug('%(name)s - volume status: %(status)s' % {'name': str(self), 'status': vol.status}) yield vol = cinder.volumes.get(self.volume_id) if vol.status != 'in-use': LOG.info(_LI("Attachment failed - volume %(vol)s " "is in %(status)s status"), {"vol": vol.id, "status": vol.status}) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume attachment failed')) LOG.info(_LI('%s - complete'), str(self))
def item(s): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class MemberListToMap/ item ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class MemberListToMap/ item ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class MemberListToMap/ item ")) if not isinstance(s, six.string_types): raise TypeError(_("Member list items must be strings")) return s.split('=', 1)
def result(self): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Base64/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Base64/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Base64/ result ")) resolved = function.resolve(self.args) if not isinstance(resolved, six.string_types): raise TypeError(_('"%s" argument must be a string') % self.fn_name) return resolved
def result(self): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class FindInMap/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class FindInMap/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class FindInMap/ result ")) mapping = self.stack.t.maps[function.resolve(self._mapname)] key = function.resolve(self._mapkey) value = function.resolve(self._mapvalue) return mapping[key][value]
def _from_db_object(tag, db_tag): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/stack_tag.py\Class StackTag_from_db_object ")) LOG.info(_LI("soumiyajit:: class StackTag(base.VersionedObject, ")) if db_tag is None: return None for field in tag.fields: tag[field] = db_tag[field] tag.obj_reset_changes() return tag
def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info(_LI('Removed dead child %s'), pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info(_LI('Removed stale child %s'), pid) else: LOG.warning(_LW('Unrecognised child %s'), pid)
def acquire(self, retry=True): """Acquire a lock on the stack. :param retry: When True, retry if lock was released while stealing. :type retry: boolean """ lock_engine_id = stack_lock_object.StackLock.create(self.context, self.stack_id, self.engine_id) if lock_engine_id is None: LOG.debug("Engine %(engine)s acquired lock on stack " "%(stack)s" % {'engine': self.engine_id, 'stack': self.stack_id}) return stack = stack_object.Stack.get_by_id(self.context, self.stack_id, show_deleted=True, eager_load=False) if (lock_engine_id == self.engine_id or service_utils.engine_alive(self.context, lock_engine_id)): LOG.debug("Lock on stack %(stack)s is owned by engine " "%(engine)s" % {'stack': self.stack_id, 'engine': lock_engine_id}) raise exception.ActionInProgress(stack_name=stack.name, action=stack.action) else: LOG.info(_LI("Stale lock detected on stack %(stack)s. Engine " "%(engine)s will attempt to steal the lock"), {'stack': self.stack_id, 'engine': self.engine_id}) result = stack_lock_object.StackLock.steal(self.context, self.stack_id, lock_engine_id, self.engine_id) if result is None: LOG.info(_LI("Engine %(engine)s successfully stole the lock " "on stack %(stack)s"), {'engine': self.engine_id, 'stack': self.stack_id}) return elif result is True: if retry: LOG.info(_LI("The lock on stack %(stack)s was released " "while engine %(engine)s was stealing it. " "Trying again"), {'stack': self.stack_id, 'engine': self.engine_id}) return self.acquire(retry=False) else: new_lock_engine_id = result LOG.info(_LI("Failed to steal lock on stack %(stack)s. " "Engine %(engine)s stole the lock first"), {'stack': self.stack_id, 'engine': new_lock_engine_id}) raise exception.ActionInProgress( stack_name=stack.name, action=stack.action)
def __init__(self, stack, fn_name, args): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class FindInMap/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class FindInMap/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class FindInMap/ __init__ ")) super(FindInMap, self).__init__(stack, fn_name, args) try: self._mapname, self._mapkey, self._mapvalue = self.args except ValueError as ex: raise KeyError(six.text_type(ex))
def ensure_string(s): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ ensure_string ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ ensure_string ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Join/ ensure_string ")) if s is None: return '' if not isinstance(s, six.string_types): raise TypeError( _('Items to join must be strings %s') % (repr(s)[:200])) return s
def dep_attrs(self, resource_name): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ dep_attrs ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ dep_attrs ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ dep_attrs ")) if self._resource().name == resource_name: attrs = [function.resolve(self._attribute)] else: attrs = [] return itertools.chain(super(GetAtt, self).dep_attrs(resource_name), attrs)
def __init__(self, stack, fn_name, args): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Replace/ __init__ ")) super(Replace, self).__init__(stack, fn_name, args) self._mapping, self._string = self._parse_args() if not isinstance(self._mapping, collections.Mapping): raise TypeError(_('"%s" parameters must be a mapping') % self.fn_name)
def _resource(self, path='unknown'): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ _resource ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ _resource ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ _resource ")) resource_name = function.resolve(self._resource_name) try: return self.stack[resource_name] except KeyError: raise exception.InvalidTemplateReference(resource=resource_name, key=path)
def validate(self): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ validate ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ validate ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ validate ")) super(GetAtt, self).validate() res = self._resource() attr = function.resolve(self._attribute) if (type(res).FnGetAtt == resource.Resource.FnGetAtt and attr not in res.attributes_schema.keys()): raise exception.InvalidTemplateAttribute( resource=self._resource_name, key=attr)
def result(self): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAtt/ result ")) attribute = function.resolve(self._attribute) r = self._resource() if (r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME, r.UPDATE)): return r.FnGetAtt(attribute) else: return None
def result(self): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAZs/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAZs/ result ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class GetAZs/ result ")) # TODO(therve): Implement region scoping # region = function.resolve(self.args) if self.stack is None: return ['nova'] else: return self.stack.get_availability_zones()
def __init__(self, stack, fn_name, args): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class ResourceFacade/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class ResourceFacade/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class ResourceFacade/ __init__ ")) super(ResourceFacade, self).__init__(stack, fn_name, args) if self.args not in self._RESOURCE_ATTRIBUTES: fmt_data = {'fn_name': self.fn_name, 'allowed': ', '.join(self._RESOURCE_ATTRIBUTES)} raise ValueError(_('Incorrect arguments to "%(fn_name)s" ' 'should be one of: %(allowed)s') % fmt_data)
def __init__(self, stack, fn_name, args): LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Select/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Select/ __init__ ")) LOG.info(_LI("soumiyajit:: /home/pankaj/python_program/logs/cfn/functions.py/Class Select/ __init__ ")) super(Select, self).__init__(stack, fn_name, args) try: self._lookup, self._strings = self.args except ValueError: raise ValueError(_('Arguments to "%s" must be of the form ' '[index, collection]') % self.fn_name)
def _resolve_attribute(self, name): res = None if name == self.AVAILABILITY_ZONE_ATTR: res = self._availability_zone() elif name in self.ATTRIBUTES[1:]: res = self._ipaddress() LOG.info(_LI('%(name)s._resolve_attribute(%(attname)s) == %(res)s'), { 'name': self.name, 'attname': name, 'res': res }) return six.text_type(res) if res else None
def start(self): target = oslo_messaging.Target( version=self.RPC_API_VERSION, server=self.engine_id, topic=self.topic) self.target = target LOG.info(_LI("Starting %(topic)s (%(version)s) in engine %(engine)s."), {'topic': self.topic, 'version': self.RPC_API_VERSION, 'engine': self.engine_id}) self._rpc_server = rpc_messaging.get_rpc_server(target, self) self._rpc_server.start()
def handle_create(self): plugin_name = self.properties[self.PLUGIN_NAME] hadoop_version = self.properties[self.HADOOP_VERSION] node_processes = self.properties[self.NODE_PROCESSES] description = self.properties[self.DESCRIPTION] flavor_id = self.client_plugin("nova").get_flavor_id( self.properties[self.FLAVOR]) volumes_per_node = self.properties[self.VOLUMES_PER_NODE] volumes_size = self.properties[self.VOLUMES_SIZE] volume_type = self.properties[self.VOLUME_TYPE] floating_ip_pool = self.properties[self.FLOATING_IP_POOL] security_groups = self.properties[self.SECURITY_GROUPS] auto_security_group = self.properties[self.AUTO_SECURITY_GROUP] availability_zone = self.properties[self.AVAILABILITY_ZONE] vol_availability_zone = self.properties[self.VOLUMES_AVAILABILITY_ZONE] image_id = self.properties[self.IMAGE_ID] if floating_ip_pool and self.is_using_neutron(): floating_ip_pool = self.client_plugin( 'neutron').find_neutron_resource(self.properties, self.FLOATING_IP_POOL, 'network') node_configs = self.properties[self.NODE_CONFIGS] is_proxy_gateway = self.properties[self.IS_PROXY_GATEWAY] volume_local_to_instance = self.properties[ self.VOLUME_LOCAL_TO_INSTANCE] use_autoconfig = self.properties[self.USE_AUTOCONFIG] node_group_template = self.client().node_group_templates.create( self._ngt_name(), plugin_name, hadoop_version, flavor_id, description=description, volumes_per_node=volumes_per_node, volumes_size=volumes_size, volume_type=volume_type, node_processes=node_processes, floating_ip_pool=floating_ip_pool, node_configs=node_configs, security_groups=security_groups, auto_security_group=auto_security_group, availability_zone=availability_zone, volumes_availability_zone=vol_availability_zone, image_id=image_id, is_proxy_gateway=is_proxy_gateway, volume_local_to_instance=volume_local_to_instance, use_autoconfig=use_autoconfig) LOG.info(_LI("Node Group Template '%s' has been created"), node_group_template.name) self.resource_id_set(node_group_template.id) return self.resource_id
def adjust(self, adjustment, adjustment_type=CHANGE_IN_CAPACITY): """ Adjust the size of the scaling group if the cooldown permits. """ if self._cooldown_inprogress(): LOG.info(_LI("%(name)s NOT performing scaling adjustment, " "cooldown %(cooldown)s"), {'name': self.name, 'cooldown': self.properties[self.COOLDOWN]}) return capacity = grouputils.get_size(self) lower = self.properties[self.MIN_SIZE] upper = self.properties[self.MAX_SIZE] new_capacity = _calculate_new_capacity(capacity, adjustment, adjustment_type, lower, upper) # send a notification before, on-error and on-success. notif = { 'stack': self.stack, 'adjustment': adjustment, 'adjustment_type': adjustment_type, 'capacity': capacity, 'groupname': self.FnGetRefId(), 'message': _("Start resizing the group %(group)s") % { 'group': self.FnGetRefId()}, 'suffix': 'start', } notification.send(**notif) try: self.resize(new_capacity) except Exception as resize_ex: with excutils.save_and_reraise_exception(): try: notif.update({'suffix': 'error', 'message': six.text_type(resize_ex), }) notification.send(**notif) except Exception: LOG.exception(_LE('Failed sending error notification')) else: notif.update({ 'suffix': 'end', 'capacity': new_capacity, 'message': _("End resizing the group %(group)s") % { 'group': notif['groupname']}, }) notification.send(**notif) self._cooldown_timestamp("%s : %s" % (adjustment_type, adjustment))
def _register_info(self, path, info): """place the new info in the correct location in the registry. path: a list of keys ['resources', 'my_server', 'OS::Nova::Server'] """ descriptive_path = '/'.join(path) name = path[-1] # create the structure if needed registry = self._registry for key in path[:-1]: if key not in registry: registry[key] = {} registry = registry[key] if info is None: if name.endswith('*'): # delete all matching entries. for res_name in registry.keys(): if (isinstance(registry[res_name], ResourceInfo) and res_name.startswith(name[:-1])): LOG.warn(_LW('Removing %(item)s from %(path)s'), { 'item': res_name, 'path': descriptive_path}) del registry[res_name] else: # delete this entry. LOG.warn(_LW('Removing %(item)s from %(path)s'), { 'item': name, 'path': descriptive_path}) registry.pop(name, None) return if name in registry and isinstance(registry[name], ResourceInfo): if registry[name] == info: return details = { 'path': descriptive_path, 'was': str(registry[name].value), 'now': str(info.value)} LOG.warn(_LW('Changing %(path)s from %(was)s to %(now)s'), details) else: LOG.info(_LI('Registering %(path)s -> %(value)s'), { 'path': descriptive_path, 'value': str(info.value)}) if isinstance(info, ClassResourceInfo): if info.value.support_status.status != support.SUPPORTED: warnings.warn(six.text_type(info.value.support_status.message)) info.user_resource = (self.global_registry is not None) registry[name] = info
def _do_check_resource(self, cnxt, current_traversal, tmpl, resource_data, is_update, rsrc, stack, adopt_stack_data): try: if is_update: try: check_resource_update(rsrc, tmpl.id, resource_data, self.engine_id, stack, self.msg_queue) except resource.UpdateReplace: new_res_id = rsrc.make_replacement(tmpl.id) LOG.info(_LI("Replacing resource with new id %s"), new_res_id) rpc_data = sync_point.serialize_input_data(resource_data) self._rpc_client.check_resource(cnxt, new_res_id, current_traversal, rpc_data, is_update, adopt_stack_data) return False else: check_resource_cleanup(rsrc, tmpl.id, resource_data, self.engine_id, stack.time_remaining(), self.msg_queue) return True except exception.UpdateInProgress: if self._try_steal_engine_lock(cnxt, rsrc.id): rpc_data = sync_point.serialize_input_data(resource_data) # set the resource state as failed status_reason = ('Worker went down ' 'during resource %s' % rsrc.action) rsrc.state_set(rsrc.action, rsrc.FAILED, six.text_type(status_reason)) self._rpc_client.check_resource(cnxt, rsrc.id, current_traversal, rpc_data, is_update, adopt_stack_data) except exception.ResourceFailure as ex: action = ex.action or rsrc.action reason = 'Resource %s failed: %s' % (action, six.text_type(ex)) self._handle_resource_failure(cnxt, is_update, rsrc.id, stack, reason) except scheduler.Timeout: # reload the stack to verify current traversal stack = parser.Stack.load(cnxt, stack_id=stack.id) if stack.current_traversal != current_traversal: return self._handle_stack_timeout(cnxt, stack) except CancelOperation: pass return False
def _secret_accesskey(self): """Return the user's access key. Fetching it from keystone if necessary. """ if self._secret is None: if not self.resource_id: LOG.info( _LI('could not get secret for %(username)s ' 'Error:%(msg)s'), { 'username': self.properties[self.USER_NAME], 'msg': "resource_id not yet set" }) else: # First try to retrieve the secret from resource_data, but # for backwards compatibility, fall back to requesting from # keystone self._secret = self.data().get('secret_key') if self._secret is None: try: user_id = self._get_user().resource_id kp = self.keystone().get_ec2_keypair( user_id=user_id, access=self.resource_id) self._secret = kp.secret # Store the key in resource_data self.data_set('secret_key', kp.secret, redact=True) # And the ID of the v3 credential self.data_set('credential_id', kp.id, redact=True) except Exception as ex: LOG.info( _LI('could not get secret for %(username)s ' 'Error:%(msg)s'), { 'username': self.properties[self.USER_NAME], 'msg': ex }) return self._secret or '000-000-000'
def stop(self): if self._rpc_server is None: return # Stop rpc connection at first for preventing new requests LOG.info(_LI("Stopping %(topic)s in engine %(engine)s."), {'topic': self.topic, 'engine': self.engine_id}) try: self._rpc_server.stop() self._rpc_server.wait() except Exception as e: LOG.error(_LE("%(topic)s is failed to stop, %(exc)s"), {'topic': self.topic, 'exc': e}) super(WorkerService, self).stop()
def validate_template(self, req): """ Implements the ValidateTemplate API action. Validates the specified template. """ self._enforce(req, 'ValidateTemplate') con = req.context try: templ = self._get_template(req) except socket.gaierror: msg = _('Invalid Template URL') return exception.HeatInvalidParameterValueError(detail=msg) if templ is None: msg = _("TemplateBody or TemplateUrl were not given.") return exception.HeatMissingParameterError(detail=msg) try: template = template_format.parse(templ) except ValueError: msg = _("The Template must be a JSON or YAML document.") return exception.HeatInvalidParameterValueError(detail=msg) LOG.info(_LI('validate_template')) def format_validate_parameter(key, value): """ Reformat engine output into the AWS "ValidateTemplate" format """ return { 'ParameterKey': key, 'DefaultValue': value.get(rpc_api.PARAM_DEFAULT, ''), 'Description': value.get(rpc_api.PARAM_DESCRIPTION, ''), 'NoEcho': value.get(rpc_api.PARAM_NO_ECHO, 'false') } try: res = self.rpc_client.validate_template(con, template) if 'Error' in res: return api_utils.format_response('ValidateTemplate', res['Error']) res['Parameters'] = [ format_validate_parameter(k, v) for k, v in res['Parameters'].items() ] return api_utils.format_response('ValidateTemplate', res) except Exception as ex: return exception.map_remote_error(ex)
def update_with_template(self, child_template, user_params=None, timeout_mins=None): """Update the nested stack with the new template.""" if self.id is None: self._store() if self.stack.action == self.stack.ROLLBACK: if self._try_rollback(): LOG.info(_LI('Triggered nested stack %s rollback'), self.physical_resource_name()) return {'target_action': self.stack.ROLLBACK} nested_stack = self.nested() if nested_stack is None: # if the create failed for some reason and the nested # stack was not created, we need to create an empty stack # here so that the update will work. def _check_for_completion(): while not self.check_create_complete(): yield empty_temp = template_format.parse( "heat_template_version: '2013-05-23'") self.create_with_template(empty_temp, {}) checker = scheduler.TaskRunner(_check_for_completion) checker(timeout=self.stack.timeout_secs()) nested_stack = self.nested() if timeout_mins is None: timeout_mins = self.stack.timeout_mins kwargs = self._stack_kwargs(user_params, child_template) cookie = {'previous': { 'updated_at': nested_stack.updated_time, 'state': nested_stack.state}} kwargs.update({ 'stack_identity': dict(nested_stack.identifier()), 'args': {rpc_api.PARAM_TIMEOUT: timeout_mins} }) with self.translate_remote_exceptions: result = None try: result = self.rpc_client()._update_stack(self.context, **kwargs) finally: if not result: raw_template.RawTemplate.delete(self.context, kwargs['template_id']) return cookie
def rule_actions(self, new_state): LOG.info( _LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, ' 'new_state:%(new_state)s'), { 'stack': self.stack_id, 'watch_name': self.name, 'new_state': new_state }) actions = [] if self.ACTION_MAP[new_state] not in self.rule: LOG.info(_LI('no action for new state %s'), new_state) else: s = stack_object.Stack.get_by_id(self.context, self.stack_id, eager_load=True) stk = stack.Stack.load(self.context, stack=s) if (stk.action != stk.DELETE and stk.status == stk.COMPLETE): for refid in self.rule[self.ACTION_MAP[new_state]]: actions.append(stk.resource_by_refid(refid).signal) else: LOG.warn(_LW("Could not process watch state %s for stack"), new_state) return actions
def _wait(self, handle, started_at, timeout_in): if timeutils.is_older_than(started_at, timeout_in): exc = wc_base.WaitConditionTimeout(self, handle) LOG.info(_LI('%(name)s Timed out (%(timeout)s)'), { 'name': str(self), 'timeout': str(exc) }) raise exc handle_status = handle.get_status() if any(s != handle.STATUS_SUCCESS for s in handle_status): failure = wc_base.WaitConditionFailure(self, handle) LOG.info(_LI('%(name)s Failed (%(failure)s)'), { 'name': str(self), 'failure': str(failure) }) raise failure if len(handle_status) >= self.properties[self.COUNT]: LOG.info(_LI("%s Succeeded"), str(self)) return True return False
def check_create_complete(self, create_data): if timeutils.is_older_than(*create_data): raise SwiftSignalTimeout(self) statuses = self.get_status() if not statuses: return False for status in statuses: if status == self.STATUS_FAILURE: failure = SwiftSignalFailure(self) LOG.info(_LI('%(name)s Failed (%(failure)s)'), { 'name': str(self), 'failure': str(failure) }) raise failure elif status != self.STATUS_SUCCESS: raise exception.Error(_("Unknown status: %s") % status) if len(statuses) >= self.properties[self.COUNT]: LOG.info(_LI("%s Succeeded"), str(self)) return True return False
def __call__(self): LOG.debug(str(self)) cinder = self.clients.client('cinder').volumes vol = cinder.get(self.volume_id) try: cinder.extend(self.volume_id, self.size) except Exception as ex: if self.clients.client_plugin('cinder').is_client_exception(ex): raise exception.Error( _("Failed to extend volume %(vol)s - %(err)s") % { 'vol': vol.id, 'err': ex }) else: raise yield vol = cinder.get(self.volume_id) while vol.status == 'extending': LOG.debug("Volume %s is being extended" % self.volume_id) yield vol = cinder.get(self.volume_id) if vol.status != 'available': LOG.info( _LI("Resize failed: Volume %(vol)s is in %(status)s " "state."), { 'vol': vol.id, 'status': vol.status }) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume resize failed')) LOG.info(_LI('%s - complete'), str(self))
def _validate_network(self, network): net_uuid = network.get(self.NETWORK_UUID) net_id = network.get(self.NETWORK_ID) port = network.get(self.NETWORK_PORT) subnet = network.get(self.NETWORK_SUBNET) fixed_ip = network.get(self.NETWORK_FIXED_IP) if (net_id is None and port is None and net_uuid is None and subnet is None): msg = _('One of the properties "%(id)s", "%(port_id)s", ' '"%(uuid)s" or "%(subnet)s" should be set for the ' 'specified network of server "%(server)s".' '') % dict(id=self.NETWORK_ID, port_id=self.NETWORK_PORT, uuid=self.NETWORK_UUID, subnet=self.NETWORK_SUBNET, server=self.name) raise exception.StackValidationFailed(message=msg) if net_uuid and net_id: msg = _('Properties "%(uuid)s" and "%(id)s" are both set ' 'to the network "%(network)s" for the server ' '"%(server)s". The "%(uuid)s" property is deprecated. ' 'Use only "%(id)s" property.' '') % dict(uuid=self.NETWORK_UUID, id=self.NETWORK_ID, network=network[self.NETWORK_ID], server=self.name) raise exception.StackValidationFailed(message=msg) elif net_uuid: LOG.info(_LI('For the server "%(server)s" the "%(uuid)s" ' 'property is set to network "%(network)s". ' '"%(uuid)s" property is deprecated. Use ' '"%(id)s" property instead.'), dict(uuid=self.NETWORK_UUID, id=self.NETWORK_ID, network=network[self.NETWORK_ID], server=self.name)) if port and not self.is_using_neutron(): msg = _('Property "%s" is supported only for ' 'Neutron.') % self.NETWORK_PORT raise exception.StackValidationFailed(message=msg) # Nova doesn't allow specify ip and port at the same time if fixed_ip and port: raise exception.ResourcePropertyConflict( "/".join([self.NETWORKS, self.NETWORK_FIXED_IP]), "/".join([self.NETWORKS, self.NETWORK_PORT]))
def check_create_complete(self, cluster): cluster = self._refresh_cluster(cluster) for instance in cluster.instances: if instance['status'] in self.BAD_STATUSES: raise resource.ResourceInError( resource_status=instance['status'], status_reason=self.TROVE_STATUS_REASON.get( instance['status'], _("Unknown"))) if instance['status'] != self.ACTIVE: return False LOG.info(_LI("Cluster '%s' has been created"), cluster.name) return True
def handle_signal(self, details=None): if details is None: alarm_state = 'alarm' else: alarm_state = details.get('state', 'alarm').lower() LOG.info(_LI('%(name)s Alarm, new state %(state)s'), {'name': self.name, 'state': alarm_state}) if alarm_state != 'alarm': return target_id = self.properties[self.INSTANCE_ID] victim = self.stack.resource_by_refid(target_id) if victim is None: LOG.info(_LI('%(name)s Alarm, can not find instance ' '%(instance)s'), {'name': self.name, 'instance': target_id}) return LOG.info(_LI('%(name)s Alarm, restarting resource: %(victim)s'), {'name': self.name, 'victim': victim.name}) self.stack.restart_resource(victim.name)
def resume(self): ''' Resume the resource. Subclasses should provide a handle_resume() method to implement resume ''' action = self.RESUME # Can't resume a resource unless it's SUSPEND_COMPLETE if self.state != (self.SUSPEND, self.COMPLETE): exc = exception.Error( _('State %s invalid for resume') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) LOG.info(_LI('resuming %s'), six.text_type(self)) return self._do_action(action)
def _create(self): """Create an authenticated CBD client.""" region = cfg.CONF.region_name_for_services.lower() if self.context.region_name: region = self.context.region_name.lower() LOG.info(_LI("CBD client authenticating username %s in region %s"), self.context.username, region) tenant = self.context.tenant_id username = self.context.username endpoint_uri = ("https://{region}.bigdata.api.rackspacecloud.com:443/" "v2/{tenant}".format(region=region, tenant=tenant)) try: return Lava(username=username, tenant_id=self.context.tenant_id, auth_url=self.context.auth_url, api_key=None, token=self.context.auth_token, region=region, endpoint=endpoint_uri, verify_ssl=False) except LavaError as exc: LOG.warn(_LW("CBD client authentication failed: %s."), exc) raise exception.AuthorizationFailure() LOG.info(_LI("CBD user %s authenticated successfully."), username)
def suspend(self): ''' Suspend the resource. Subclasses should provide a handle_suspend() method to implement suspend ''' action = self.SUSPEND # Don't try to suspend the resource unless it's in a stable state if (self.action == self.DELETE or self.status != self.COMPLETE): exc = exception.Error( _('State %s invalid for suspend') % six.text_type(self.state)) raise exception.ResourceFailure(exc, self, action) LOG.info(_LI('suspending %s'), six.text_type(self)) return self._do_action(action)
def check_delete_complete(self, resource_id): if not resource_id: return True try: cluster = self.client().clusters.get(resource_id) except Exception as ex: self.client_plugin().ignore_not_found(ex) LOG.info(_LI("Cluster '%s' has been deleted"), self._cluster_name()) return True else: if cluster.status == self.CLUSTER_ERROR: raise exception.ResourceInError(resource_status=cluster.status) return False
def check(self): """Checks that the physical resource is in its expected state Gets the current status of the physical resource and updates the database accordingly. If check is not supported by the resource, default action is to fail and revert the resource's status to its original state with the added message that check was not performed. """ action = self.CHECK LOG.info(_LI('Checking %s'), six.text_type(self)) if hasattr(self, 'handle_%s' % action.lower()): return self._do_action(action) else: reason = '%s not supported for %s' % (action, self.type()) self.state_set(action, self.COMPLETE, reason)
def _create(self): con = self.context volume_api_version = self.get_volume_api_version() if volume_api_version == 1: service_type = self.VOLUME client_version = '1' elif volume_api_version == 2: service_type = self.VOLUME_V2 client_version = '2' else: raise exception.Error(_('No volume service available.')) LOG.info(_LI('Creating Cinder client with volume API version %d.'), volume_api_version) endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type') args = { 'service_type': service_type, 'auth_url': con.auth_url or '', 'project_id': con.tenant_id, 'username': None, 'api_key': None, 'endpoint_type': endpoint_type, 'http_log_debug': self._get_client_option(CLIENT_NAME, 'http_log_debug'), 'cacert': self._get_client_option(CLIENT_NAME, 'ca_file'), 'insecure': self._get_client_option(CLIENT_NAME, 'insecure') } client = cc.Client(client_version, **args) management_url = self.url_for(service_type=service_type, endpoint_type=endpoint_type) client.client.auth_token = self.auth_token client.client.management_url = management_url client.volume_api_version = volume_api_version return client
def handle_create(self): plugin_name = self.properties[self.PLUGIN_NAME] hadoop_version = self.properties[self.HADOOP_VERSION] cluster_template_id = self.properties[self.CLUSTER_TEMPLATE_ID] image_id = (self.properties[self.IMAGE_ID] or self.properties[self.IMAGE]) if image_id: image_id = self.client_plugin('glance').find_image_by_name_or_id( image_id) # check that image is provided in case when # cluster template is missing one cluster_template = self.client().cluster_templates.get( cluster_template_id) if cluster_template.default_image_id is None and not image_id: msg = _("%(img)s must be provided: Referenced cluster template " "%(tmpl)s has no default_image_id defined.") % { 'img': self.IMAGE, 'tmpl': cluster_template_id } raise exception.StackValidationFailed(message=msg) key_name = self.properties[self.KEY_NAME] net_id = self.properties[self.MANAGEMENT_NETWORK] if net_id: if self.is_using_neutron(): net_id = self.client_plugin('neutron').find_neutron_resource( self.properties, self.MANAGEMENT_NETWORK, 'network') else: net_id = self.client_plugin('nova').get_nova_network_id(net_id) use_autoconfig = self.properties[self.USE_AUTOCONFIG] shares = self.properties[self.SHARES] cluster = self.client().clusters.create( self._cluster_name(), plugin_name, hadoop_version, cluster_template_id=cluster_template_id, user_keypair_id=key_name, default_image_id=image_id, net_id=net_id, use_autoconfig=use_autoconfig, shares=shares) LOG.info(_LI('Cluster "%s" is being started.'), cluster.name) self.resource_id_set(cluster.id) return self.resource_id
def attach_ports(self, server): prev_server_id = server.resource_id for port in self.get_all_ports(server): self.client_plugin().interface_attach(prev_server_id, port['id']) try: if self.client_plugin().check_interface_attach( prev_server_id, port['id']): LOG.info( _LI('Attach interface %(port)s successful to ' 'server %(server)s') % { 'port': port['id'], 'server': prev_server_id }) except retrying.RetryError: raise exception.InterfaceAttachFailed(port=port['id'], server=prev_server_id)
def detach_ports(self, server): existing_server_id = server.resource_id for port in self.get_all_ports(server): self.client_plugin().interface_detach(existing_server_id, port['id']) try: if self.client_plugin().check_interface_detach( existing_server_id, port['id']): LOG.info( _LI('Detach interface %(port)s successful from ' 'server %(server)s.') % { 'port': port['id'], 'server': existing_server_id }) except retrying.RetryError: raise exception.InterfaceDetachFailed( port=port['id'], server=existing_server_id)
def _resolve_attribute(self, key): if self.resource_id: lb = self.clb.get(self.resource_id) attribute_function = { self.PUBLIC_IP: self._public_ip(lb), self.VIPS: [{"id": vip.id, "type": vip.type, "ip_version": vip.ip_version} for vip in lb.virtual_ips] } if key not in attribute_function: raise exception.InvalidTemplateAttribute(resource=self.name, key=key) function = attribute_function[key] LOG.info(_LI('%(name)s.GetAtt(%(key)s) == %(function)s'), {'name': self.name, 'key': key, 'function': function}) return function
def check_attach_volume_complete(self, vol_id): vol = self.client().volumes.get(vol_id) if vol.status in ('available', 'attaching'): LOG.debug("Volume %(id)s is being attached - " "volume status: %(status)s" % {'id': vol_id, 'status': vol.status}) return False if vol.status != 'in-use': LOG.debug("Attachment failed - volume %(vol)s is " "in %(status)s status" % {"vol": vol_id, "status": vol.status}) raise exception.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume attachment failed')) LOG.info(_LI('Attaching volume %(id)s complete'), {'id': vol_id}) return True
def _break_if_required(self, action, hook): '''Block the resource until the hook is cleared if there is one.''' if self.stack.env.registry.matches_hook(self.name, hook): self._add_event( self.action, self.status, _("%(a)s paused until Hook %(h)s is cleared") % { 'a': action, 'h': hook }) self.trigger_hook(hook) LOG.info(_LI('Reached hook on %s'), six.text_type(self)) while self.has_hook(hook) and self.status != self.FAILED: try: yield except Exception: self.clear_hook(hook) self._add_event(self.action, self.status, "Failure occured while waiting.")
def wait_on_children(self): while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): LOG.error(_LE('Removing dead child %s') % pid) self.children.remove(pid) self.run_child() except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info(_LI('Caught keyboard interrupt. Exiting.')) os.killpg(0, signal.SIGTERM) break eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited')