def _get_validations_from_swift(swift, container, objects, groups, results, skip_existing=False): existing_ids = [validation['id'] for validation in results] for obj in objects: validation_id, ext = os.path.splitext(obj['name']) if ext != '.yaml': continue if skip_existing and validation_id in existing_ids: continue contents = swift_utils.get_object_string(swift, container, obj['name']) validation = yaml.safe_load(contents) validation_groups = get_validation_metadata(validation, 'groups') or [] if not groups or set.intersection(set(groups), set(validation_groups)): results.append({ 'id': validation_id, 'name': get_validation_metadata(validation, 'name'), 'groups': get_validation_metadata(validation, 'groups'), 'description': get_validation_metadata(validation, 'description'), 'metadata': get_remaining_metadata(validation) }) return results
def _get_validations_from_swift(swift, container, objects, groups, results, skip_existing=False): existing_ids = [validation['id'] for validation in results] for obj in objects: validation_id, ext = os.path.splitext(obj['name']) if ext != '.yaml': continue if skip_existing and validation_id in existing_ids: continue contents = swift_utils.get_object_string(swift, container, obj['name']) validation = yaml.safe_load(contents) validation_groups = get_validation_metadata(validation, 'groups') or [] if not groups or set.intersection(set(groups), set(validation_groups)): results.append({ 'id': validation_id, 'name': get_validation_metadata(validation, 'name'), 'groups': get_validation_metadata(validation, 'groups'), 'description': get_validation_metadata(validation, 'description'), 'metadata': get_remaining_metadata(validation) }) return results
def check_neutron_mechanism_drivers(env, stack, plan_client, container): force_update = env.get('parameter_defaults').get( 'ForceNeutronDriverUpdate', False) # Forcing an update and skip checks is need to support migrating from one # driver to another if force_update: return driver_key = 'NeutronMechanismDrivers' current_drivers = search_stack(stack._info, driver_key) # TODO(beagles): We may need to move or copy this check earlier # to automagically pull in an openvswitch ML2 compatibility driver. current_driver = get_exclusive_neutron_driver(current_drivers) configured_drivers = env.get('parameter_defaults').get(driver_key) new_driver = None if configured_drivers: new_driver = get_exclusive_neutron_driver(configured_drivers) else: # TODO(beagles): we need to look for a better way to # get the current template default value. This is fragile # with respect to changing filenames, etc. ml2_tmpl = swiftutils.get_object_string( plan_client, container, 'puppet/services/neutron-plugin-ml2.yaml') ml2_def = yaml.safe_load(ml2_tmpl) default_drivers = ml2_def.get('parameters', {}).get(driver_key, {}).get('default') new_driver = get_exclusive_neutron_driver(default_drivers) if current_driver and new_driver and current_driver != new_driver: msg = ("Unable to switch from {} to {} neutron " "mechanism drivers on upgrade. Please consult the " "documentation.").format(current_driver, new_driver) return msg
def download_validation(swift, plan, validation): """Downloads validations from Swift to a temporary location""" dst_dir = '/tmp/{}-validations'.format(plan) # Download the whole default validations container swift_utils.download_container( swift, constants.VALIDATIONS_CONTAINER_NAME, dst_dir, overwrite_only_newer=True ) filename = '{}.yaml'.format(validation) swift_path = os.path.join(constants.CUSTOM_VALIDATIONS_FOLDER, filename) dst_path = os.path.join(dst_dir, filename) # If a custom validation with that name exists, get it from the plan # container and override. Otherwise, the default one will be used. try: contents = swift_utils.get_object_string(swift, plan, swift_path) except swiftexceptions.ClientException: pass else: with open(dst_path, 'w') as f: f.write(contents) return dst_path
def run(self, context): swift = self.get_object_client(context) swiftutils.create_container(swift, self.logging_container) self._rotate(swift) try: old_contents = swiftutils.get_object_string( swift, self.logging_container, constants.TRIPLEO_UI_LOG_FILENAME) new_contents = "%s\n%s" % (old_contents, self.logging_data) except swiftexceptions.ClientException: LOG.debug( "There is no existing logging data, starting a new file.") new_contents = self.logging_data try: swiftutils.put_object_string( swift, self.logging_container, constants.TRIPLEO_UI_LOG_FILENAME, new_contents) except swiftexceptions.ClientException as err: msg = "Failed to publish logs: %s" % err return actions.Result(error=msg)
def check_neutron_mechanism_drivers(env, stack, plan_client, container): force_update = env.get('parameter_defaults').get( 'ForceNeutronDriverUpdate', False) # Forcing an update and skip checks is need to support migrating from one # driver to another if force_update: return driver_key = 'NeutronMechanismDrivers' current_drivers = search_stack(stack._info, driver_key) # TODO(beagles): We may need to move or copy this check earlier # to automagically pull in an openvswitch ML2 compatibility driver. current_driver = get_exclusive_neutron_driver(current_drivers) configured_drivers = env.get('parameter_defaults').get(driver_key) new_driver = None if configured_drivers: new_driver = get_exclusive_neutron_driver(configured_drivers) else: # TODO(beagles): we need to look for a better way to # get the current template default value. This is fragile # with respect to changing filenames, etc. ml2_tmpl = swiftutils.get_object_string( plan_client, container, 'puppet/services/neutron-plugin-ml2.yaml') ml2_def = yaml.safe_load(ml2_tmpl) default_drivers = ml2_def.get('parameters', {}).get(driver_key, {}).get('default') new_driver = get_exclusive_neutron_driver(default_drivers) if current_driver and new_driver and current_driver != new_driver: msg = ("Unable to switch from {} to {} neutron " "mechanism drivers on upgrade. Please consult the " "documentation.").format(current_driver, new_driver) return msg
def get_deployment_status(clients, plan): """Return current deployment status. :param clients: application client object. :type clients: Object :param plan: Plan name. :type plan: String :returns: string """ try: clients.orchestration.stacks.get(plan) except heat_exc.HTTPNotFound: return None try: body = swift_utils.get_object_string( clients.tripleoclient.object_store, '%s-messages' % plan, 'deployment_status.yaml') return yaml.safe_load(body)['deployment_status'] except swiftexceptions.ClientException: return None
def download_validation(swift, plan, validation): """Downloads validations from Swift to a temporary location""" dst_dir = '/tmp/{}-validations'.format(plan) # Download the whole default validations container swift_utils.download_container( swift, constants.VALIDATIONS_CONTAINER_NAME, dst_dir, overwrite_only_newer=True ) filename = '{}.yaml'.format(validation) swift_path = os.path.join(constants.CUSTOM_VALIDATIONS_FOLDER, filename) dst_path = os.path.join(dst_dir, filename) # If a custom validation with that name exists, get it from the plan # container and override. Otherwise, the default one will be used. try: contents = swift_utils.get_object_string(swift, plan, swift_path) except swiftexceptions.ClientException: pass else: with open(dst_path, 'w') as f: f.write(contents) return dst_path
def run(self, context): swift = self.get_object_client(context) try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % ( self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) for k, v in self.environments.items(): found = False if {'path': k} in env['environments']: found = True if v: if not found: env['environments'].append({'path': k}) else: if found: env['environments'].remove({'path': k}) if self.purge_missing: for e in env['environments']: if e.get('path') not in self.environments: env['environments'].remove(e) self.cache_delete(context, self.container, "tripleo.parameters.get") if self.sort_environments: # get the capabilities-map content to perform the environment # ordering try: swift = self.get_object_client(context) map_file = swiftutils.get_object_string( swift, self.container, 'capabilities-map.yaml') capabilities = yaml.safe_load(map_file) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving capabilities-map.yaml for " "plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) ordered_env = plan_utils.apply_environments_order( capabilities, env.get('environments', [])) env['environments'] = ordered_env try: plan_utils.put_env(swift, env) except swiftexceptions.ClientException as err: err_msg = "Error uploading to container: %s" % err LOG.exception(err_msg) return actions.Result(error=err_msg) return env
def run(self, context): swift = self.get_object_client(context) try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) for k, v in self.environments.items(): found = False if {'path': k} in env['environments']: found = True if v: if not found: env['environments'].append({'path': k}) else: if found: env['environments'].remove({'path': k}) if self.purge_missing: for e in env['environments']: if e.get('path') not in self.environments: env['environments'].remove(e) self.cache_delete(context, self.container, "tripleo.parameters.get") if self.sort_environments: # get the capabilities-map content to perform the environment # ordering try: swift = self.get_object_client(context) map_file = swiftutils.get_object_string( swift, self.container, 'capabilities-map.yaml') capabilities = yaml.safe_load(map_file) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving capabilities-map.yaml for " "plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) ordered_env = plan_utils.apply_environments_order( capabilities, env.get('environments', [])) env['environments'] = ordered_env try: plan_utils.put_env(swift, env) except swiftexceptions.ClientException as err: err_msg = "Error uploading to container: %s" % err LOG.exception(err_msg) return actions.Result(error=err_msg) return env
def _get_role_data(self, swift): try: j2_role_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_ROLES_NAME) role_data = yaml.safe_load(j2_role_file) except swiftexceptions.ClientException: LOG.info( "No %s file found, not filtering container images by role" % constants.OVERCLOUD_J2_ROLES_NAME) role_data = None return role_data
def get_source(self, environment, template): pieces = jinja2.loaders.split_template_path(template) for searchpath in self.searchpath: template_path = os.path.join(searchpath, *pieces) try: source = swiftutils.get_object_string(self.swift, self.container, template_path) return source, None, False except swiftexceptions.ClientException: pass raise jinja2.exceptions.TemplateNotFound(template)
def _get_role_data(self, swift): try: j2_role_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_ROLES_NAME) role_data = yaml.safe_load(j2_role_file) except swiftexceptions.ClientException: LOG.info("No %s file found, not filtering container images by role" % constants.OVERCLOUD_J2_ROLES_NAME) role_data = None return role_data
def get_env(swift, name): """Get plan environment from Swift and convert it to a dictionary.""" env = yaml.safe_load( swiftutils.get_object_string(swift, name, constants.PLAN_ENVIRONMENT) ) # Ensure the name is correct, as it will be used to update the # container later if env.get('name') != name: env['name'] = name return env
def get_source(self, environment, template): pieces = jinja2.loaders.split_template_path(template) for searchpath in self.searchpath: template_path = os.path.join(searchpath, *pieces) try: source = swiftutils.get_object_string(self.swift, self.container, template_path) return source, None, False except swiftexceptions.ClientException: pass raise jinja2.exceptions.TemplateNotFound(template)
def _wait_for_data(self, container_name, object_name, context): body = None count_check = 0 swift_client = self.get_object_client(context) while not body: body = swiftutils.get_object_string(swift_client, container_name, object_name) count_check += 3 if body or count_check > self.timeout: break time.sleep(3) return body
def _wait_for_data(self, container_name, object_name, context): body = None count_check = 0 swift_client = self.get_object_client(context) while not body: body = swiftutils.get_object_string(swift_client, container_name, object_name) count_check += 3 if body or count_check > self.timeout: break time.sleep(3) return body
def run(self, context): try: swift = self.get_object_client(context) roles_data = yaml.safe_load(swiftutils.get_object_string( swift, self.container, self.role_file_name)) except Exception as err: err_msg = ("Error retrieving roles data from deployment plan: %s" % err) LOG.exception(err_msg) return actions.Result(error=err_msg) if self.detail: return roles_data else: return [role['name'] for role in roles_data]
def run(self, context): try: swift = self.get_object_client(context) roles_data = yaml.safe_load(swiftutils.get_object_string( swift, self.container, self.role_file_name)) except Exception as err: err_msg = ("Error retrieving roles data from deployment plan: %s" % err) LOG.exception(err_msg) return actions.Result(error=err_msg) if self.detail: return roles_data else: return [role['name'] for role in roles_data]
def _get_j2_excludes_file(self, context): swift = self.get_object_client(context) try: j2_excl_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_EXCLUDES) j2_excl_data = yaml.safe_load(j2_excl_file) if (j2_excl_data is None or j2_excl_data.get('name') is None): j2_excl_data = {"name": []} LOG.info("j2_excludes.yaml is either empty or there are " "no templates to exclude, defaulting the J2 " "excludes list to: %s" % j2_excl_data) except swiftexceptions.ClientException: j2_excl_data = {"name": []} LOG.info("No J2 exclude file found, defaulting " "the J2 excludes list to: %s" % j2_excl_data) return j2_excl_data
def _get_j2_excludes_file(self, context): swift = self.get_object_client(context) try: j2_excl_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_EXCLUDES) j2_excl_data = yaml.safe_load(j2_excl_file) if (j2_excl_data is None or j2_excl_data.get('name') is None): j2_excl_data = {"name": []} LOG.info("j2_excludes.yaml is either empty or there are " "no templates to exclude, defaulting the J2 " "excludes list to: %s" % j2_excl_data) except swiftexceptions.ClientException: j2_excl_data = {"name": []} LOG.info("No J2 exclude file found, defaulting " "the J2 excludes list to: %s" % j2_excl_data) return j2_excl_data
def run(self, context): try: swift = self.get_object_client(context) # Upload template dir to tmp container container_tmp = '%s-tmp' % self.container with tempfile.NamedTemporaryFile() as tmp_tarball: tarball.create_tarball(self.templates_dir, tmp_tarball.name) tarball.tarball_extract_to_swift_container( swift, tmp_tarball.name, container_tmp) # Get all new templates: new_templates = swiftutils.get_object_string(swift, container_tmp, '').splitlines() old_templates = swiftutils.get_object_string(swift, self.container, '').splitlines() exclude_user_data = [constants.PLAN_ENVIRONMENT, constants.OVERCLOUD_J2_ROLES_NAME, constants.OVERCLOUD_J2_NETWORKS_NAME, constants.OVERCLOUD_J2_EXCLUDES] # Update the old container for new in new_templates: # if doesn't exist, push it: if new not in old_templates: swiftutils.put_object_string( swift, self.container, new, swiftutils.get_object_string(swift, container_tmp, new) ) else: content_new = swiftutils.get_object_string(swift, container_tmp, new) content_old = swiftutils.get_object_string(swift, self.container, new) if (not content_new == content_old and new not in exclude_user_data): swiftutils.put_object_string( swift, self.container, new, swiftutils.get_object_string(swift, container_tmp, new) ) except swiftexceptions.ClientException as err: msg = "Error attempting an operation on container: %s" % err LOG.exception(msg) return actions.Result(error=msg) except Exception as err: msg = "Error while updating plan: %s" % err LOG.exception(msg) return actions.Result(error=msg)
def run(self, context): swift = self.get_object_client(context) swiftutils.create_container(swift, self.logging_container) self._rotate(swift) try: old_contents = swiftutils.get_object_string( swift, self.logging_container, constants.TRIPLEO_UI_LOG_FILENAME) new_contents = "%s\n%s" % (old_contents, self.logging_data) except swiftexceptions.ClientException: LOG.debug( "There is no existing logging data, starting a new file.") new_contents = self.logging_data try: swiftutils.put_object_string(swift, self.logging_container, constants.TRIPLEO_UI_LOG_FILENAME, new_contents) except swiftexceptions.ClientException as err: msg = "Failed to publish logs: %s" % err return actions.Result(error=msg)
def run(self, context): try: swift = self.get_object_client(context) map_file = swiftutils.get_object_string(swift, self.container, 'capabilities-map.yaml') capabilities = yaml.safe_load(map_file) except Exception: err_msg = ( "Error parsing capabilities-map.yaml.") LOG.exception(err_msg) return actions.Result(error=err_msg) try: container_files = swift.get_container(self.container) container_file_list = [entry['name'] for entry in container_files[1]] except Exception as swift_err: err_msg = ("Error retrieving plan files: %s" % swift_err) LOG.exception(err_msg) return actions.Result(error=err_msg) try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % ( self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) selected_envs = [item['path'] for item in env['environments'] if 'path' in item] # extract environment files plan_environments = [] for env_group in capabilities['topics']: for envs in env_group['environment_groups']: for files in envs['environments']: file = files.get('file') if file: plan_environments.append(file) # parse plan for environment files env_files = fnmatch.filter( container_file_list, '*environments/*.yaml') env_user_files = fnmatch.filter( container_file_list, '*user-environment.yaml') outstanding_envs = list(set(env_files).union( env_user_files) - set(plan_environments)) # change capabilities format data_to_return = {} for topic in capabilities['topics']: title = topic.get('title', '_title_holder') data_to_return[title] = topic for eg in topic['environment_groups']: for env in eg['environments']: if selected_envs and env.get('file') in selected_envs: env['enabled'] = True else: env['enabled'] = False # add custom environment files other_environments = [] for env in outstanding_envs: flag = selected_envs and env in selected_envs new_env = { "description": "Enable %s environment" % env, "enabled": flag, "file": env, "title": env, } other_environments.append(new_env) other_environments.sort(key=lambda x: x['file']) other_environment_groups = [] for group in other_environments: new_group = { "description": None, "environments": [group], "title": group['file'], } other_environment_groups.append(new_group) other_environments_topic_dict = { "description": None, "title": "Other", "environment_groups": other_environment_groups } other_environments_topic = { "Other": other_environments_topic_dict } data_to_return.update(other_environments_topic) return data_to_return
def _process_custom_roles(self, context): swift = self.get_object_client(context) try: j2_role_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_ROLES_NAME) role_data = yaml.safe_load(j2_role_file) except swiftexceptions.ClientException: LOG.info("No %s file found, skipping jinja templating" % constants.OVERCLOUD_J2_ROLES_NAME) return try: j2_network_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_NETWORKS_NAME) network_data = yaml.safe_load(j2_network_file) # Allow no networks defined in network_data if network_data is None: network_data = [] except swiftexceptions.ClientException: # Until t-h-t contains network_data.yaml we tolerate a missing file LOG.warning("No %s file found, ignoring" % constants.OVERCLOUD_J2_ROLES_NAME) network_data = [] j2_excl_data = self._get_j2_excludes_file(context) try: # Iterate over all files in the plan container # we j2 render any with the .j2.yaml suffix container_files = swift.get_container(self.container) except swiftexceptions.ClientException as ex: error_msg = ("Error listing contents of container %s : %s" % (self.container, six.text_type(ex))) LOG.error(error_msg) raise Exception(error_msg) role_names = [r.get('name') for r in role_data] r_map = {} for r in role_data: r_map[r.get('name')] = r excl_templates = j2_excl_data.get('name') heatclient = self.get_orchestration_client(context) stack = None try: stack = heatclient.stacks.get(self.container, resolve_outputs=False) except heat_exc.HTTPNotFound: LOG.debug("Stack does not exist") n_map = {} for n in network_data: if n.get('enabled') is not False: n_map[n.get('name')] = n if not n.get('name_lower'): n_map[n.get('name')]['name_lower'] = n.get('name').lower() if n.get('name') == constants.API_NETWORK and 'compat_name' \ not in n.keys(): # Check to see if legacy named API network exists # and if so we need to set compat_name api_net = "{}Network".format(constants.LEGACY_API_NETWORK) if self._heat_resource_exists(heatclient, stack, 'Networks', api_net, context): n['compat_name'] = 'Internal' LOG.info("Upgrade compatibility enabled for legacy " "network resource Internal.") else: LOG.info("skipping %s network: network is disabled." % n.get('name')) self.cache_delete(context, self.container, "tripleo.parameters.get") for f in [f.get('name') for f in container_files[1]]: # We do three templating passes here: # 1. *.role.j2.yaml - we template just the role name # and create multiple files (one per role) # 2 *.network.j2.yaml - we template the network name and # data and create multiple files for networks and # network ports (one per network) # 3. *.j2.yaml - we template with all roles_data, # and create one file common to all roles if f.endswith('.role.j2.yaml'): LOG.info("jinja2 rendering role template %s" % f) j2_template = swiftutils.get_object_string(swift, self.container, f) LOG.info("jinja2 rendering roles %s" % "," .join(role_names)) for role in role_names: LOG.info("jinja2 rendering role %s" % role) out_f = "-".join( [role.lower(), os.path.basename(f).replace('.role.j2.yaml', '.yaml')]) out_f_path = os.path.join(os.path.dirname(f), out_f) if ('network/config' in os.path.dirname(f) and r_map[role].get('deprecated_nic_config_name')): d_name = r_map[role].get('deprecated_nic_config_name') out_f_path = os.path.join(os.path.dirname(f), d_name) elif ('network/config' in os.path.dirname(f)): d_name = "%s.yaml" % role.lower() out_f_path = os.path.join(os.path.dirname(f), d_name) if not (out_f_path in excl_templates): if '{{role.name}}' in j2_template: j2_data = {'role': r_map[role], 'networks': network_data} self._j2_render_and_put(j2_template, j2_data, out_f_path, context=context) else: # Backwards compatibility with templates # that specify {{role}} vs {{role.name}} j2_data = {'role': role, 'networks': network_data} LOG.debug("role legacy path for role %s" % role) self._j2_render_and_put(j2_template, j2_data, out_f_path, context=context) else: LOG.info("Skipping rendering of %s, defined in %s" % (out_f_path, j2_excl_data)) elif (f.endswith('.network.j2.yaml')): LOG.info("jinja2 rendering network template %s" % f) j2_template = swiftutils.get_object_string(swift, self.container, f) LOG.info("jinja2 rendering networks %s" % ",".join(n_map)) for network in n_map: j2_data = {'network': n_map[network]} # Output file names in "<name>.yaml" format out_f = os.path.basename(f).replace('.network.j2.yaml', '.yaml') if os.path.dirname(f).endswith('ports'): out_f = out_f.replace('port', n_map[network]['name_lower']) else: out_f = out_f.replace('network', n_map[network]['name_lower']) out_f_path = os.path.join(os.path.dirname(f), out_f) if not (out_f_path in excl_templates): self._j2_render_and_put(j2_template, j2_data, out_f_path, context=context) else: LOG.info("Skipping rendering of %s, defined in %s" % (out_f_path, j2_excl_data)) elif f.endswith('.j2.yaml'): LOG.info("jinja2 rendering %s" % f) j2_template = swiftutils.get_object_string(swift, self.container, f) j2_data = {'roles': role_data, 'networks': network_data} out_f = f.replace('.j2.yaml', '.yaml') self._j2_render_and_put(j2_template, j2_data, out_f, context=context)
def _process_custom_roles(self, context): swift = self.get_object_client(context) try: j2_role_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_ROLES_NAME) role_data = yaml.safe_load(j2_role_file) except swiftexceptions.ClientException: LOG.info("No %s file found, skipping jinja templating" % constants.OVERCLOUD_J2_ROLES_NAME) return try: j2_network_file = swiftutils.get_object_string( swift, self.container, constants.OVERCLOUD_J2_NETWORKS_NAME) network_data = yaml.safe_load(j2_network_file) # Allow no networks defined in network_data if network_data is None: network_data = [] except swiftexceptions.ClientException: # Until t-h-t contains network_data.yaml we tolerate a missing file LOG.warning("No %s file found, ignoring" % constants.OVERCLOUD_J2_ROLES_NAME) network_data = [] j2_excl_data = self._get_j2_excludes_file(context) try: # Iterate over all files in the plan container # we j2 render any with the .j2.yaml suffix container_files = swift.get_container(self.container) except swiftexceptions.ClientException as ex: error_msg = ("Error listing contents of container %s : %s" % (self.container, six.text_type(ex))) LOG.error(error_msg) raise Exception(error_msg) role_names = [r.get('name') for r in role_data] r_map = {} for r in role_data: r_map[r.get('name')] = r excl_templates = j2_excl_data.get('name') heatclient = self.get_orchestration_client(context) stack = None try: stack = heatclient.stacks.get(self.container, resolve_outputs=False) except heat_exc.HTTPNotFound: LOG.debug("Stack does not exist") n_map = {} for n in network_data: if n.get('enabled') is not False: n_map[n.get('name')] = n if not n.get('name_lower'): n_map[n.get('name')]['name_lower'] = n.get('name').lower() if n.get('name') == constants.API_NETWORK and 'compat_name' \ not in n.keys(): # Check to see if legacy named API network exists # and if so we need to set compat_name api_net = "{}Network".format(constants.LEGACY_API_NETWORK) if self._heat_resource_exists(heatclient, stack, 'Networks', api_net, context): n['compat_name'] = 'Internal' LOG.info("Upgrade compatibility enabled for legacy " "network resource Internal.") else: LOG.info("skipping %s network: network is disabled." % n.get('name')) self.cache_delete(context, self.container, "tripleo.parameters.get") for f in [f.get('name') for f in container_files[1]]: # We do three templating passes here: # 1. *.role.j2.yaml - we template just the role name # and create multiple files (one per role) # 2 *.network.j2.yaml - we template the network name and # data and create multiple files for networks and # network ports (one per network) # 3. *.j2.yaml - we template with all roles_data, # and create one file common to all roles if f.endswith('.role.j2.yaml'): LOG.info("jinja2 rendering role template %s" % f) j2_template = swiftutils.get_object_string(swift, self.container, f) LOG.info("jinja2 rendering roles %s" % "," .join(role_names)) for role in role_names: LOG.info("jinja2 rendering role %s" % role) out_f = "-".join( [role.lower(), os.path.basename(f).replace('.role.j2.yaml', '.yaml')]) out_f_path = os.path.join(os.path.dirname(f), out_f) if ('network/config' in os.path.dirname(f) and r_map[role].get('deprecated_nic_config_name')): d_name = r_map[role].get('deprecated_nic_config_name') out_f_path = os.path.join(os.path.dirname(f), d_name) elif ('network/config' in os.path.dirname(f)): d_name = "%s.yaml" % role.lower() out_f_path = os.path.join(os.path.dirname(f), d_name) if not (out_f_path in excl_templates): if '{{role.name}}' in j2_template: j2_data = {'role': r_map[role], 'networks': network_data} self._j2_render_and_put(j2_template, j2_data, out_f_path, context=context) else: # Backwards compatibility with templates # that specify {{role}} vs {{role.name}} j2_data = {'role': role, 'networks': network_data} LOG.debug("role legacy path for role %s" % role) if r_map[role].get('disable_constraints', False): j2_data['disable_constraints'] = True self._j2_render_and_put(j2_template, j2_data, out_f_path, context=context) else: LOG.info("Skipping rendering of %s, defined in %s" % (out_f_path, j2_excl_data)) elif (f.endswith('.network.j2.yaml')): LOG.info("jinja2 rendering network template %s" % f) j2_template = swiftutils.get_object_string(swift, self.container, f) LOG.info("jinja2 rendering networks %s" % ",".join(n_map)) for network in n_map: j2_data = {'network': n_map[network]} # Output file names in "<name>.yaml" format out_f = os.path.basename(f).replace('.network.j2.yaml', '.yaml') if os.path.dirname(f).endswith('ports'): out_f = out_f.replace('port', n_map[network]['name_lower']) else: out_f = out_f.replace('network', n_map[network]['name_lower']) out_f_path = os.path.join(os.path.dirname(f), out_f) if not (out_f_path in excl_templates): self._j2_render_and_put(j2_template, j2_data, out_f_path, context=context) else: LOG.info("Skipping rendering of %s, defined in %s" % (out_f_path, j2_excl_data)) elif f.endswith('.j2.yaml'): LOG.info("jinja2 rendering %s" % f) j2_template = swiftutils.get_object_string(swift, self.container, f) j2_data = {'roles': role_data, 'networks': network_data} out_f = f.replace('.j2.yaml', '.yaml') self._j2_render_and_put(j2_template, j2_data, out_f, context=context)
def run(self, context): orchestration_client = self.get_orchestration_client(context) workflow_client = self.get_workflow_client(context) swift_client = self.get_object_client(context) try: stack = orchestration_client.stacks.get(self.plan) except heat_exc.HTTPNotFound: return dict(status_update=None, deployment_status=None) try: body = swiftutils.get_object_string(swift_client, '%s-messages' % self.plan, 'deployment_status.yaml') deployment_status = yaml.safe_load(body)['deployment_status'] except swiftexceptions.ClientException: deployment_status = None stack_status = stack.stack_status cd_status = None ansible_status = None # Will get set to new status if an update is required status_update = None cd_execs = workflow_client.executions.find( workflow_name='tripleo.deployment.v1.config_download_deploy') cd_execs.sort(key=lambda x: x.updated_at) if cd_execs: cd_exec = workflow_client.executions.get(cd_execs[-1].id) cd_status = cd_exec.state ansible_status = json.loads( cd_exec.output).get('deployment_status') def update_status(status): # If we need to update the status return it if deployment_status != status: return status # Update the status if needed. We do this since tripleoclient does not # yet use a single API for overcloud deployment. Since there is no long # running process to make sure the status is updated, we instead update # the status if needed when we get it with this action. # # The logic here is: # # If stack or config_download is in progress, then the status is # deploying. # # Else if stack is failed or config_download is failed or ansible is # failed, then the status is failed. # # Else if config_download status is success and ansible is success # then status is success. # # Else, we just return the read deployment_status from earlier. if stack_status.endswith('IN_PROGRESS') or cd_status == 'RUNNING': status_update = update_status('DEPLOYING') elif stack_status.endswith('FAILED') or cd_status == 'FAILED' \ or ansible_status == 'DEPLOY_FAILED': status_update = update_status('DEPLOY_FAILED') elif cd_status == 'SUCCESS' and ansible_status == 'DEPLOY_SUCCESS': status_update = update_status('DEPLOY_SUCCESS') return dict(cd_status=cd_status, stack_status=stack_status, deployment_status=deployment_status, ansible_status=ansible_status, status_update=status_update)
def get_user_env(swift, container_name): """Get user environment from Swift convert it to a dictionary.""" return yaml.safe_load( swiftutils.get_object_string(swift, container_name, constants.USER_ENVIRONMENT))
def run(self, context): orchestration_client = self.get_orchestration_client(context) workflow_client = self.get_workflow_client(context) swift_client = self.get_object_client(context) try: stack = orchestration_client.stacks.get(self.plan) except heat_exc.HTTPNotFound: return dict(status_update=None, deployment_status=None) try: body = swiftutils.get_object_string(swift_client, '%s-messages' % self.plan, 'deployment_status.yaml') deployment_status = yaml.safe_load(body)['deployment_status'] except swiftexceptions.ClientException: deployment_status = None stack_status = stack.stack_status cd_status = None ansible_status = None # Will get set to new status if an update is required status_update = None cd_execs = workflow_client.executions.find( workflow_name='tripleo.deployment.v1.config_download_deploy') cd_execs.sort(key=lambda x: x.updated_at) if cd_execs: cd_exec = workflow_client.executions.get(cd_execs[-1].id) cd_status = cd_exec.state ansible_status = json.loads( cd_exec.output).get('deployment_status') def update_status(status): # If we need to update the status return it if deployment_status != status: return status # Update the status if needed. We do this since tripleoclient does not # yet use a single API for overcloud deployment. Since there is no long # running process to make sure the status is updated, we instead update # the status if needed when we get it with this action. # # The logic here is: # # If stack or config_download is in progress, then the status is # deploying. # # Else if stack is failed or config_download is failed or ansible is # failed, then the status is failed. # # Else if config_download status is success and ansible is success # then status is success. # # Else, we just return the read deployment_status from earlier. if stack_status.endswith('IN_PROGRESS') or cd_status == 'RUNNING': status_update = update_status('DEPLOYING') elif stack_status.endswith('FAILED') or cd_status == 'FAILED' \ or ansible_status == 'DEPLOY_FAILED': status_update = update_status('DEPLOY_FAILED') elif cd_status == 'SUCCESS' and ansible_status == 'DEPLOY_SUCCESS': status_update = update_status('DEPLOY_SUCCESS') return dict(cd_status=cd_status, stack_status=stack_status, deployment_status=deployment_status, ansible_status=ansible_status, status_update=status_update)
def test_get_object_string_from_bytes(self): self.swiftclient.get_object.return_value = (1, b'foo') val = swift_utils.get_object_string(self.swiftclient, 'foo', 'bar') self.assertEqual(str('foo'), val)
def run(self, context): try: swift = self.get_object_client(context) map_file = swiftutils.get_object_string(swift, self.container, 'capabilities-map.yaml') capabilities = yaml.safe_load(map_file) except Exception: err_msg = ("Error parsing capabilities-map.yaml.") LOG.exception(err_msg) return actions.Result(error=err_msg) try: container_files = swift.get_container(self.container) container_file_list = [ entry['name'] for entry in container_files[1] ] except Exception as swift_err: err_msg = ("Error retrieving plan files: %s" % swift_err) LOG.exception(err_msg) return actions.Result(error=err_msg) try: env = plan_utils.get_env(swift, self.container) except swiftexceptions.ClientException as err: err_msg = ("Error retrieving environment for plan %s: %s" % (self.container, err)) LOG.exception(err_msg) return actions.Result(error=err_msg) selected_envs = [ item['path'] for item in env['environments'] if 'path' in item ] # extract environment files plan_environments = [] for env_group in capabilities['topics']: for envs in env_group['environment_groups']: for files in envs['environments']: file = files.get('file') if file: plan_environments.append(file) # parse plan for environment files env_files = fnmatch.filter(container_file_list, '*environments/*.yaml') env_user_files = fnmatch.filter(container_file_list, '*user-environment.yaml') outstanding_envs = list( set(env_files).union(env_user_files) - set(plan_environments)) # change capabilities format data_to_return = {} for topic in capabilities['topics']: title = topic.get('title', '_title_holder') data_to_return[title] = topic for eg in topic['environment_groups']: for env in eg['environments']: if selected_envs and env.get('file') in selected_envs: env['enabled'] = True else: env['enabled'] = False # add custom environment files other_environments = [] for env in outstanding_envs: flag = selected_envs and env in selected_envs new_env = { "description": "Enable %s environment" % env, "enabled": flag, "file": env, "title": env, } other_environments.append(new_env) other_environments.sort(key=lambda x: x['file']) other_environment_groups = [] for group in other_environments: new_group = { "description": None, "environments": [group], "title": group['file'], } other_environment_groups.append(new_group) other_environments_topic_dict = { "description": None, "title": "Other", "environment_groups": other_environment_groups } other_environments_topic = {"Other": other_environments_topic_dict} data_to_return.update(other_environments_topic) return data_to_return