def run(self): enabled_roles = self.inventory_creator.get_enabled_roles() ansible_dir = get_ansible_path(self.cluster_model.specification.name) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() self.populate_group_vars(ansible_dir) with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) for role in enabled_roles: document = select_first( self.config_docs, lambda x: x.kind == 'configuration/' + to_feature_name(role)) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document = self.add_provider_info(document) vars_dir = os.path.join(ansible_dir, 'roles', to_role_name(role), 'vars') if not os.path.exists(vars_dir): os.makedirs(vars_dir) vars_file_name = 'main.yml' vars_file_path = os.path.join(vars_dir, vars_file_name) with open(vars_file_path, 'w') as stream: dump(document, stream)
def generate(self): self.logger.info('Generate Ansible vars') self.is_upgrade_run = self.inventory_creator == None if self.is_upgrade_run: ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir) else: ansible_dir = get_ansible_path(self.cluster_model.specification.name) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) if self.is_upgrade_run: # For upgrade at this point we don't need any of other roles than common, repository, image_registry and node_exporter. # - commmon is already provisioned from the cluster model constructed from the inventory # - (if possible) upgrade should not require any additional config # roles in the list below are provisioned for upgrade from defaults roles_with_defaults = ['repository', 'image_registry', 'node_exporter'] # In a special cases (like haproxy), where user specifies majority of the config, it's easier (and less awkward) # to re-render config templates instead of modifying (for example with regular expressions) no-longer-compatible config files. roles_with_manifest = ['haproxy'] else: roles_with_defaults = self.inventory_creator.get_enabled_roles() roles_with_manifest = [] # applies only to upgrades for role in roles_with_defaults: kind = 'configuration/' + to_feature_name(role) document = select_first(self.config_docs, lambda x: x.kind == kind) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document.specification['provider'] = self.cluster_model.provider self.write_role_vars(ansible_dir, role, document) for role in roles_with_manifest: kind = 'configuration/' + to_feature_name(role) self.write_role_manifest_vars(ansible_dir, role, kind) self.populate_group_vars(ansible_dir)
def generate(self): self.logger.info('Generate Ansible vars') self.is_upgrade_run = self.inventory_creator == None if self.is_upgrade_run: ansible_dir = get_ansible_path_for_build( self.inventory_upgrade.build_dir) else: ansible_dir = get_ansible_path( self.cluster_model.specification.name) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) if self.is_upgrade_run: # For upgrade at this point we don't need any of other roles then # common, upgrade, repository, image_registry, haproxy and node_exporter. # - commmon is already provisioned from the cluster model constructed from the inventory. # - upgrade should not require any additional config # roles in the list below are provisioned for upgrade from defaults enabled_roles = [ 'repository', 'image_registry', 'haproxy', 'node_exporter' ] else: enabled_roles = self.inventory_creator.get_enabled_roles() for role in enabled_roles: kind = 'configuration/' + to_feature_name(role) document = select_first(self.config_docs, lambda x: x.kind == kind) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document.specification['provider'] = self.cluster_model.provider self.write_role_vars(ansible_dir, role, document) self.write_role_manifest_vars(ansible_dir, role, kind) self.populate_group_vars(ansible_dir)
def test_to_feature_name(): actual = to_feature_name("route_table_association") assert actual == "route-table-association"