def run(self): enabled_roles = self.inventory_creator.get_enabled_roles() ansible_dir = get_ansible_path(self.cluster_model.specification.name) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() self.populate_group_vars(ansible_dir) with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) for role in enabled_roles: document = select_first( self.config_docs, lambda x: x.kind == 'configuration/' + to_feature_name(role)) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document = self.add_provider_info(document) vars_dir = os.path.join(ansible_dir, 'roles', to_role_name(role), 'vars') if not os.path.exists(vars_dir): os.makedirs(vars_dir) vars_file_name = 'main.yml' vars_file_path = os.path.join(vars_dir, vars_file_name) with open(vars_file_path, 'w') as stream: dump(document, stream)
def generate(self): self.logger.info('Generate Ansible vars') if self.inventory_creator != None: ansible_dir = get_ansible_path(self.cluster_model.specification.name) else: ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir) self.populate_group_vars(ansible_dir) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) if self.inventory_creator == None: # For upgrade at this point we don't need any of other roles then # common, upgrade, repository and image_registry. # - commmon is already provisioned from the cluster model constructed from the inventory. # - upgrade should not require any additional config # roles in the list below are provisioned for upgrade from defaults enabled_roles = ['repository', 'image_registry'] else: enabled_roles = self.inventory_creator.get_enabled_roles() for role in enabled_roles: document = select_first(self.config_docs, lambda x: x.kind == 'configuration/'+to_feature_name(role)) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document.specification['provider'] = self.cluster_model.provider self.write_role_vars(ansible_dir, role, document)
def playbook_path(self, name): if self.cluster_model != None: return os.path.join( get_ansible_path(self.cluster_model.specification.name), f'{name}.yml') else: return os.path.join(get_ansible_path_for_build(self.build_dir), f'{name}.yml')
def run(self): inventory_path = get_inventory_path( self.cluster_model.specification.name) # create inventory on every run self.inventory_creator.create() time.sleep(10) copy_files_recursively( AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH, get_ansible_path(self.cluster_model.specification.name)) # todo: install packages to run ansible on Red Hat hosts self.ansible_command.run_task_with_retries( hosts="all", inventory=inventory_path, module="raw", args="cat /etc/lsb-release | grep -i DISTRIB_ID | grep -i ubuntu && " "sudo apt-get install -y python-simplejson " "|| echo 'Cannot find information about Ubuntu distribution'", retries=5) self.ansible_vars_generator.run() common_play_result = self.ansible_command.run_playbook_with_retries( inventory=inventory_path, playbook_path=os.path.join( get_ansible_path(self.cluster_model.specification.name), "common.yml"), retries=5) if common_play_result != 0: return enabled_roles = self.inventory_creator.get_enabled_roles() for role in enabled_roles: play_result = self.ansible_command.run_playbook_with_retries( inventory=inventory_path, playbook_path=os.path.join( get_ansible_path(self.cluster_model.specification.name), to_role_name(role) + ".yml"), retries=1) if play_result != 0: break
def copy_resources(self): self.logger.info('Copying Ansible resources') if self.cluster_model != None: ansible_dir = get_ansible_path( self.cluster_model.specification.name) else: ansible_dir = get_ansible_path_for_build(self.build_dir) shutil.rmtree(ansible_dir, ignore_errors=True) copy_files_recursively(AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH, ansible_dir) # copy skopeo so Ansible can move it to the repositry machine if not Config().offline_requirements: shutil.copy( os.path.join(dirname(dirname(inspect.getfile(os))), 'skopeo_linux'), '/tmp')
def generate(self): self.logger.info('Generate Ansible vars') self.is_upgrade_run = self.inventory_creator == None if self.is_upgrade_run: ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir) else: ansible_dir = get_ansible_path(self.cluster_model.specification.name) cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml') clean_cluster_model = self.get_clean_cluster_model() with open(cluster_config_file_path, 'w') as stream: dump(clean_cluster_model, stream) if self.is_upgrade_run: # For upgrade at this point we don't need any of other roles than common, repository, image_registry and node_exporter. # - commmon is already provisioned from the cluster model constructed from the inventory # - (if possible) upgrade should not require any additional config # roles in the list below are provisioned for upgrade from defaults roles_with_defaults = ['repository', 'image_registry', 'node_exporter'] # In a special cases (like haproxy), where user specifies majority of the config, it's easier (and less awkward) # to re-render config templates instead of modifying (for example with regular expressions) no-longer-compatible config files. roles_with_manifest = ['haproxy'] else: roles_with_defaults = self.inventory_creator.get_enabled_roles() roles_with_manifest = [] # applies only to upgrades for role in roles_with_defaults: kind = 'configuration/' + to_feature_name(role) document = select_first(self.config_docs, lambda x: x.kind == kind) if document is None: self.logger.warn('No config document for enabled role: ' + role) continue document.specification['provider'] = self.cluster_model.provider self.write_role_vars(ansible_dir, role, document) for role in roles_with_manifest: kind = 'configuration/' + to_feature_name(role) self.write_role_manifest_vars(ansible_dir, role, kind) self.populate_group_vars(ansible_dir)