def update(self, plugin, context, mea_id, mea_dict, mea, auth_attr): region_name = mea_dict.get('placement_attr', {}).get('region_name', None) heatclient = hc.HeatClient(auth_attr, region_name) heatclient.get(mea_id) # update config attribute config_yaml = mea_dict.get('attributes', {}).get('config', '') update_yaml = mea['mea'].get('attributes', {}).get('config', '') LOG.debug('yaml orig %(orig)s update %(update)s', { 'orig': config_yaml, 'update': update_yaml }) # If config_yaml is None, yaml.safe_load() will raise Attribute Error. # So set config_yaml to {}, if it is None. if not config_yaml: config_dict = {} else: config_dict = yaml.safe_load(config_yaml) or {} update_dict = yaml.safe_load(update_yaml) if not update_dict: return LOG.debug('dict orig %(orig)s update %(update)s', { 'orig': config_dict, 'update': update_dict }) utils.deep_update(config_dict, update_dict) LOG.debug('dict new %(new)s update %(update)s', { 'new': config_dict, 'update': update_dict }) new_yaml = yaml.safe_dump(config_dict) mea_dict.setdefault('attributes', {})['config'] = new_yaml
def _delete_heat_stack(vim_auth): placement_attr = mea_dict.get('placement_attr', {}) region_name = placement_attr.get('region_name') heatclient = hc.HeatClient(auth_attr=vim_auth, region_name=region_name) heatclient.delete(mea_dict['instance_id']) LOG.debug("Heat stack %s delete initiated", mea_dict['instance_id']) _log_monitor_events(context, mea_dict, "ActionRespawnHeat invoked")
def update_wait(self, plugin, context, mea_id, auth_attr, region_name=None): # do nothing but checking if the stack exists at the moment heatclient = hc.HeatClient(auth_attr, region_name) heatclient.get(mea_id)
def create(self, plugin, context, mea, auth_attr): LOG.debug('mea %s', mea) region_name = mea.get('placement_attr', {}).get('region_name', None) heatclient = hc.HeatClient(auth_attr, region_name) tth = translate_template.TOSCAToHOT(mea, heatclient) tth.generate_hot() stack = self._create_stack(heatclient, tth.mea, tth.fields) return stack['stack']['id']
def scale(self, context, plugin, auth_attr, policy, region_name): heatclient = hc.HeatClient(auth_attr, region_name) policy_rsc = get_scaling_policy_name(policy_name=policy['name'], action=policy['action']) events = heatclient.resource_event_list(policy['instance_id'], policy_rsc, limit=1, sort_dir='desc', sort_keys='event_time') heatclient.resource_signal(policy['instance_id'], policy_rsc) return events[0].id
def delete_wait(self, plugin, context, mea_id, auth_attr, region_name=None): heatclient = hc.HeatClient(auth_attr, region_name) stack = heatclient.get(mea_id) status = stack.stack_status error_reason = None stack_retries = self.STACK_RETRIES while (status == 'DELETE_IN_PROGRESS' and stack_retries > 0): time.sleep(self.STACK_RETRY_WAIT) try: stack = heatclient.get(mea_id) except heatException.HTTPNotFound: return except Exception: LOG.warning( "MEA Instance cleanup may not have " "happened because Heat API request failed " "while waiting for the stack %(stack)s to be " "deleted", {'stack': mea_id}) # Just like create wait, ignore the exception to # avoid temporary connection error. status = stack.stack_status stack_retries = stack_retries - 1 if stack_retries == 0 and status != 'DELETE_COMPLETE': error_reason = _("Resource cleanup for mea is" " not completed within {wait} seconds as " "deletion of Stack {stack} is " "not completed").format( stack=mea_id, wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT)) LOG.warning(error_reason) raise mem.MEADeleteWaitFailed(reason=error_reason) if stack_retries != 0 and status != 'DELETE_COMPLETE': error_reason = _("mea {mea_id} deletion is not completed. " "{stack_status}").format(mea_id=mea_id, stack_status=status) LOG.warning(error_reason) raise mem.MEADeleteWaitFailed(reason=error_reason)
def get_resource_info(self, plugin, context, mea_info, auth_attr, region_name=None): instance_id = mea_info['instance_id'] heatclient = hc.HeatClient(auth_attr, region_name) try: # nested_depth=2 is used to get VDU resources # in case of nested template resources_ids =\ heatclient.resource_get_list(instance_id, nested_depth=2) details_dict = { resource.resource_name: { "id": resource.physical_resource_id, "type": resource.resource_type } for resource in resources_ids } return details_dict # Raise exception when Heat API service is not available except Exception: raise mem.InfraDriverUnreachable(service="Heat API service")
def scale_wait(self, context, plugin, auth_attr, policy, region_name, last_event_id): heatclient = hc.HeatClient(auth_attr, region_name) # TODO(kanagaraj-manickam) make wait logic into separate utility method # and make use of it here and other actions like create and delete stack_retries = self.STACK_RETRIES while (True): try: time.sleep(self.STACK_RETRY_WAIT) stack_id = policy['instance_id'] policy_name = get_scaling_policy_name( policy_name=policy['name'], action=policy['action']) events = heatclient.resource_event_list(stack_id, policy_name, limit=1, sort_dir='desc', sort_keys='event_time') if events[0].id != last_event_id: if events[0].resource_status == 'SIGNAL_COMPLETE': break except Exception as e: error_reason = _("MEA scaling failed for stack %(stack)s with " "error %(error)s") % { 'stack': policy['instance_id'], 'error': str(e) } LOG.warning(error_reason) raise mem.MEAScaleWaitFailed(mea_id=policy['mea']['id'], reason=error_reason) if stack_retries == 0: metadata = heatclient.resource_metadata(stack_id, policy_name) if not metadata['scaling_in_progress']: error_reason = _('when signal occurred within cool down ' 'window, no events generated from heat, ' 'so ignore it') LOG.warning(error_reason) break error_reason = _( "MEA scaling failed to complete within %{wait}s seconds " "while waiting for the stack %(stack)s to be " "scaled.") % { 'stack': stack_id, 'wait': self.STACK_RETRIES * self.STACK_RETRY_WAIT } LOG.warning(error_reason) raise mem.MEAScaleWaitFailed(mea_id=policy['mea']['id'], reason=error_reason) stack_retries -= 1 def _fill_scaling_group_name(): mea = policy['mea'] scaling_group_names = mea['attributes']['scaling_group_names'] policy['group_name'] = jsonutils.loads(scaling_group_names)[ policy['name']] _fill_scaling_group_name() mgmt_ips = self._find_mgmt_ips_from_groups(heatclient, policy['instance_id'], [policy['group_name']]) return jsonutils.dumps(mgmt_ips)
def delete(self, plugin, context, mea_id, auth_attr, region_name=None): heatclient = hc.HeatClient(auth_attr, region_name) heatclient.delete(mea_id)
def create_wait(self, plugin, context, mea_dict, mea_id, auth_attr): region_name = mea_dict.get('placement_attr', {}).get('region_name', None) heatclient = hc.HeatClient(auth_attr, region_name) stack = heatclient.get(mea_id) status = stack.stack_status stack_retries = self.STACK_RETRIES error_reason = None while status == 'CREATE_IN_PROGRESS' and stack_retries > 0: time.sleep(self.STACK_RETRY_WAIT) try: stack = heatclient.get(mea_id) except Exception: LOG.warning( "MEA Instance setup may not have " "happened because Heat API request failed " "while waiting for the stack %(stack)s to be " "created", {'stack': mea_id}) # continue to avoid temporary connection error to target # VIM status = stack.stack_status LOG.debug('status: %s', status) stack_retries = stack_retries - 1 LOG.debug('stack status: %(stack)s %(status)s', { 'stack': str(stack), 'status': status }) if stack_retries == 0 and status != 'CREATE_COMPLETE': error_reason = _("Resource creation is not completed within" " {wait} seconds as creation of stack {stack}" " is not completed").format( wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT), stack=mea_id) LOG.warning("MEA Creation failed: %(reason)s", {'reason': error_reason}) raise mem.MEACreateWaitFailed(reason=error_reason) elif stack_retries != 0 and status != 'CREATE_COMPLETE': error_reason = stack.stack_status_reason raise mem.MEACreateWaitFailed(reason=error_reason) def _find_mgmt_ips(outputs): LOG.debug('outputs %s', outputs) mgmt_ips = dict( (output['output_key'][len(OUTPUT_PREFIX):], output['output_value']) for output in outputs if output.get('output_key', '').startswith(OUTPUT_PREFIX)) return mgmt_ips # scaling enabled if mea_dict['attributes'].get('scaling_group_names'): group_names = jsonutils.loads( mea_dict['attributes'].get('scaling_group_names')).values() mgmt_ips = self._find_mgmt_ips_from_groups(heatclient, mea_id, group_names) else: mgmt_ips = _find_mgmt_ips(stack.outputs) if mgmt_ips: mea_dict['mgmt_url'] = jsonutils.dumps(mgmt_ips)