def run(self): enabled_roles = self.inventory_creator.get_enabled_roles() ansible_dir = get_ansible_path(self.cluster_model.specification.name) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() self.populate_group_vars(ansible_dir) with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) for role in enabled_roles: document = select_first( self.config_docs, lambda x: x.kind == 'configuration/' + to_feature_name(role)) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document = self.add_provider_info(document) vars_dir = os.path.join(ansible_dir, 'roles', to_role_name(role), 'vars') if not os.path.exists(vars_dir): os.makedirs(vars_dir) vars_file_name = 'main.yml' vars_file_path = os.path.join(vars_dir, vars_file_name) with open(vars_file_path, 'w') as stream: dump(document, stream)
def populate_group_vars(self, ansible_dir): main_vars = ObjDict() main_vars['admin_user'] = self.cluster_model.specification.admin_user main_vars['validate_certs'] = Config().validate_certs main_vars['offline_requirements'] = Config().offline_requirements main_vars['wait_for_pods'] = Config().wait_for_pods main_vars['is_upgrade_run'] = self.is_upgrade_run main_vars['roles_with_generated_vars'] = sorted( self.roles_with_generated_vars) if self.is_upgrade_run: shared_config_doc = self.get_shared_config_from_manifest() else: shared_config_doc = select_first( self.config_docs, lambda x: x.kind == 'configuration/shared-config') if shared_config_doc is None: shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config') self.set_vault_path(shared_config_doc) main_vars.update(shared_config_doc.specification) vars_dir = os.path.join(ansible_dir, 'group_vars') if not os.path.exists(vars_dir): os.makedirs(vars_dir) vars_file_name = 'all.yml' vars_file_path = os.path.join(vars_dir, vars_file_name) with open(vars_file_path, 'a') as stream: dump(main_vars, stream)
def generate(self): self.logger.info('Generate Ansible vars') if self.inventory_creator != None: ansible_dir = get_ansible_path(self.cluster_model.specification.name) else: ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir) self.populate_group_vars(ansible_dir) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) if self.inventory_creator == None: # For upgrade at this point we don't need any of other roles then # common, upgrade, repository and image_registry. # - commmon is already provisioned from the cluster model constructed from the inventory. # - upgrade should not require any additional config # roles in the list below are provisioned for upgrade from defaults enabled_roles = ['repository', 'image_registry'] else: enabled_roles = self.inventory_creator.get_enabled_roles() for role in enabled_roles: document = select_first(self.config_docs, lambda x: x.kind == 'configuration/'+to_feature_name(role)) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document.specification['provider'] = self.cluster_model.provider self.write_role_vars(ansible_dir, role, document)
def write_role_vars(self, ansible_dir, role, document): vars_dir = os.path.join(ansible_dir, 'roles', to_role_name(role), 'vars') if not os.path.exists(vars_dir): os.makedirs(vars_dir) vars_file_name = 'main.yml' vars_file_path = os.path.join(vars_dir, vars_file_name) with open(vars_file_path, 'w') as stream: dump(document, stream)
def populate_group_vars(self, ansible_dir): main_vars = ObjDict() main_vars = self.add_admin_user_name(main_vars) main_vars = self.add_validate_certs(main_vars) vars_dir = os.path.join(ansible_dir, 'group_vars') if not os.path.exists(vars_dir): os.makedirs(vars_dir) vars_file_name = 'all.yml' vars_file_path = os.path.join(vars_dir, vars_file_name) with open(vars_file_path, 'w') as stream: dump(main_vars, stream)
def _update_role_files_and_vars(self, action, document): """Render mandatory vars files for backup/recovery ansible roles inside the existing build directory.""" self.logger.info(f'Updating {action} role files...') # Copy role files roles_build_path = os.path.join(self.build_directory, 'ansible/roles', action) roles_source_path = os.path.join(AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH, 'roles', action) copy_files_recursively(roles_source_path, roles_build_path) # Render role vars vars_dir = os.path.join(roles_build_path, 'vars') os.makedirs(vars_dir, exist_ok=True) vars_file_path = os.path.join(vars_dir, 'main.yml') with open(vars_file_path, 'w') as stream: dump(document, stream)
def generate(self): self.logger.info('Generate Ansible vars') self.is_upgrade_run = self.inventory_creator == None if self.is_upgrade_run: ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir) else: ansible_dir = get_ansible_path(self.cluster_model.specification.name) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) if self.is_upgrade_run: # For upgrade at this point we don't need any of other roles than common, repository, image_registry and node_exporter. # - commmon is already provisioned from the cluster model constructed from the inventory # - (if possible) upgrade should not require any additional config # roles in the list below are provisioned for upgrade from defaults roles_with_defaults = ['repository', 'image_registry', 'node_exporter'] # In a special cases (like haproxy), where user specifies majority of the config, it's easier (and less awkward) # to re-render config templates instead of modifying (for example with regular expressions) no-longer-compatible config files. roles_with_manifest = ['haproxy'] else: roles_with_defaults = self.inventory_creator.get_enabled_roles() roles_with_manifest = [] # applies only to upgrades for role in roles_with_defaults: kind = 'configuration/' + to_feature_name(role) document = select_first(self.config_docs, lambda x: x.kind == kind) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document.specification['provider'] = self.cluster_model.provider self.write_role_vars(ansible_dir, role, document) for role in roles_with_manifest: kind = 'configuration/' + to_feature_name(role) self.write_role_manifest_vars(ansible_dir, role, kind) self.populate_group_vars(ansible_dir)
def populate_group_vars(self, ansible_dir): main_vars = ObjDict() main_vars['admin_user'] = self.cluster_model.specification.admin_user main_vars['validate_certs'] = Config().validate_certs main_vars['offline_requirements'] = Config().offline_requirements main_vars['wait_for_pods'] = Config().wait_for_pods shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config') if shared_config_doc == None: shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config') main_vars.update(shared_config_doc.specification) vars_dir = os.path.join(ansible_dir, 'group_vars') if not os.path.exists(vars_dir): os.makedirs(vars_dir) vars_file_name = 'all.yml' vars_file_path = os.path.join(vars_dir, vars_file_name) with open(vars_file_path, 'a') as stream: dump(main_vars, stream)
def save_sp(service_principle, cluster_name): terraform_dir = get_terraform_path(cluster_name) path = os.path.join(terraform_dir, SP_FILE_NAME) with open(path, 'w') as stream: dump(service_principle, stream) return path