def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.all_manageable: baremetal.introspect_manageable_nodes( self.app.client_manager, run_validations=parsed_args.run_validations, concurrency=parsed_args.concurrency, verbosity=oooutils.playbook_verbosity(self=self) ) else: baremetal.introspect( self.app.client_manager, node_uuids=parsed_args.node_uuids, run_validations=parsed_args.run_validations, concurrency=parsed_args.concurrency, verbosity=oooutils.playbook_verbosity(self=self) ) # NOTE(cloudnull): This is using the old provide function, in a future # release this may be ported to a standalone playbook if parsed_args.provide: if parsed_args.node_uuids: baremetal.provide( self.app.client_manager, node_uuids=parsed_args.node_uuids, ) else: baremetal.provide_manageable_nodes( clients=self.app.client_manager, verbosity=oooutils.playbook_verbosity(self=self) )
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) clients = self.app.client_manager name = parsed_args.name use_default_templates = False generate_passwords = not parsed_args.disable_password_generation source_url = parsed_args.source_url # if the templates and source_url params are not used, then # use the default templates if not parsed_args.templates and not parsed_args.source_url: use_default_templates = True if parsed_args.templates: plan_management.create_plan_from_templates( clients, name, parsed_args.templates, generate_passwords=generate_passwords, plan_env_file=parsed_args.plan_environment_file, validate_stack=False, verbosity_level=utils.playbook_verbosity(self=self)) else: plan_management.create_deployment_plan( container=name, generate_passwords=generate_passwords, source_url=source_url, use_default_templates=use_default_templates, validate_stack=False, verbosity_level=utils.playbook_verbosity(self=self))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) clients = self.app.client_manager if parsed_args.baremetal_deployment: with open(parsed_args.baremetal_deployment, 'r') as fp: roles = yaml.safe_load(fp) nodes_text, nodes = self._nodes_to_delete(parsed_args, roles) if nodes_text: print(nodes_text) else: return else: nodes = parsed_args.nodes nodes_text = '\n'.join('- %s' % node for node in nodes) if not parsed_args.yes: confirm = oooutils.prompt_user_for_confirmation(message=_( "Are you sure you want to delete these overcloud " "nodes [y/N]? "), logger=self.log) if not confirm: raise oscexc.CommandError("Action not confirmed, exiting.") orchestration_client = clients.orchestration stack = oooutils.get_stack(orchestration_client, parsed_args.stack) if not stack: raise InvalidConfiguration("stack {} not found".format( parsed_args.stack)) print( "Deleting the following nodes from stack {stack}:\n{nodes}".format( stack=stack.stack_name, nodes=nodes_text)) scale.scale_down( log=self.log, clients=clients, stack=stack, nodes=nodes, connection_timeout=parsed_args.overcloud_ssh_port_timeout, timeout=parsed_args.timeout, verbosity=oooutils.playbook_verbosity(self=self)) if parsed_args.baremetal_deployment: with oooutils.TempDirs() as tmp: oooutils.run_ansible_playbook( playbook='cli-overcloud-node-unprovision.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=oooutils.playbook_verbosity(self=self), extra_vars={ "stack_name": parsed_args.stack, "baremetal_deployment": roles, "prompt": False, })
def take_action(self, parsed_args): self.log.debug("take_action({args})".format(args=parsed_args)) clients = self.app.client_manager if parsed_args.node_uuids: baremetal.reset_bios_configuration( node_uuids=parsed_args.node_uuids, verbosity=utils.playbook_verbosity(self=self)) else: baremetal.reset_bios_configuration_on_manageable_nodes( clients=clients, verbosity=utils.playbook_verbosity(self=self))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) with open(parsed_args.input, 'r') as fp: roles = yaml.safe_load(fp) with oooutils.TempDirs() as tmp: unprovision_confirm = os.path.join(tmp, 'unprovision_confirm.json') if not parsed_args.yes: oooutils.run_ansible_playbook( playbook='cli-overcloud-node-unprovision.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=oooutils.playbook_verbosity(self=self), extra_vars={ "stack_name": parsed_args.stack, "baremetal_deployment": roles, "all": parsed_args.all, "prompt": True, "unprovision_confirm": unprovision_confirm } ) with open(unprovision_confirm) as f: to_unprovision = json.load(f) if not to_unprovision: print('Nothing to unprovision, exiting') return self._print_nodes(to_unprovision) confirm = oooutils.prompt_user_for_confirmation( message=_("Are you sure you want to unprovision these %s " "nodes [y/N]? ") % parsed_args.stack, logger=self.log) if not confirm: raise oscexc.CommandError("Action not confirmed, exiting.") oooutils.run_ansible_playbook( playbook='cli-overcloud-node-unprovision.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=oooutils.playbook_verbosity(self=self), extra_vars={ "stack_name": parsed_args.stack, "baremetal_deployment": roles, "all": parsed_args.all, "prompt": False, } ) print('Unprovision complete')
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) with open(parsed_args.input, 'r') as fp: roles = yaml.safe_load(fp) key = self.get_key_pair(parsed_args) with open('{}.pub'.format(key), 'rt') as fp: ssh_key = fp.read() output_path = os.path.abspath(parsed_args.output) extra_vars = { "stack_name": parsed_args.stack, "baremetal_deployment": roles, "baremetal_deployed_path": output_path, "ssh_public_keys": ssh_key, "ssh_user_name": parsed_args.overcloud_ssh_user, "node_timeout": parsed_args.timeout, "concurrency": parsed_args.concurrency } with oooutils.TempDirs() as tmp: oooutils.run_ansible_playbook( playbook='cli-overcloud-node-provision.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=oooutils.playbook_verbosity(self=self), extra_vars=extra_vars, ) print('Nodes deployed successfully, add %s to your deployment ' 'environment' % parsed_args.output)
def take_action(self, parsed_args): self.log.debug("take_action({args})".format(args=parsed_args)) self._validate_args(parsed_args) if not parsed_args.yes: confirm = utils.prompt_user_for_confirmation(message=_( "Are you sure you want to delete this overcloud " "[y/N]? "), logger=self.log) if not confirm: raise oscexc.CommandError("Action not confirmed, exiting.") if parsed_args.skip_ipa_cleanup: playbooks = ["cli-overcloud-delete.yaml"] else: # Order is important, let's make sure we cleanup FreeIPA before we # start removing infrastructure. playbooks = ["cli-cleanup-ipa.yml", "cli-overcloud-delete.yaml"] with utils.TempDirs() as tmp: utils.run_ansible_playbook( playbooks, constants.ANSIBLE_INVENTORY, workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars={"stack_name": parsed_args.stack}) print("Success.")
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) _, ansible_dir = self.get_ansible_key_and_dir( no_workflow=True, stack=parsed_args.stack, orchestration=self.app.client_manager.orchestration) deployment.config_download( log=self.log, clients=self.app.client_manager, stack=oooutils.get_stack(self.app.client_manager.orchestration, parsed_args.stack), output_dir=ansible_dir, verbosity=oooutils.playbook_verbosity(self=self), ansible_playbook_name=constants.EXTERNAL_UPDATE_PLAYBOOKS, extra_vars=oooutils.parse_extra_vars( extra_var_strings=parsed_args.extra_vars), inventory_path=oooutils.get_tripleo_ansible_inventory( parsed_args.static_inventory, parsed_args.ssh_user, parsed_args.stack, return_inventory_file_path=True), tags=parsed_args.tags, skip_tags=parsed_args.skip_tags, limit_hosts=oooutils.playbook_limit_parse( limit_nodes=parsed_args.limit)) self.log.info("Completed Overcloud External Update Run.")
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) _, ansible_dir = self.get_ansible_key_and_dir( no_workflow=True, stack=parsed_args.stack, orchestration=self.app.client_manager.orchestration ) deployment.config_download( log=self.log, clients=self.app.client_manager, stack=oooutils.get_stack( self.app.client_manager.orchestration, parsed_args.stack ), output_dir=ansible_dir, verbosity=oooutils.playbook_verbosity(self=self), ansible_playbook_name=constants.FFWD_UPGRADE_PLAYBOOK, inventory_path=oooutils.get_tripleo_ansible_inventory( parsed_args.static_inventory, parsed_args.ssh_user, parsed_args.stack, return_inventory_file_path=True ) ) self.log.info("Completed Overcloud FFWD Upgrade Run.")
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) # NOTE(cloudnull): The string option "all" was a special default # that is no longer relevant. To retain compatibility # this condition has been put in place. if not parsed_args.playbook or parsed_args.playbook == ['all']: playbook = constants.MAJOR_UPGRADE_PLAYBOOKS else: playbook = parsed_args.playbook _, ansible_dir = self.get_ansible_key_and_dir( no_workflow=parsed_args.no_workflow, stack=parsed_args.stack, orchestration=self.app.client_manager.orchestration) deployment.config_download( log=self.log, clients=self.app.client_manager, stack=oooutils.get_stack(self.app.client_manager.orchestration, parsed_args.stack), output_dir=ansible_dir, verbosity=oooutils.playbook_verbosity(self=self), ansible_playbook_name=playbook, inventory_path=oooutils.get_tripleo_ansible_inventory( parsed_args.static_inventory, parsed_args.ssh_user, parsed_args.stack, return_inventory_file_path=True), tags=parsed_args.tags, skip_tags=parsed_args.skip_tags, limit_list=[i.strip() for i in parsed_args.limit.split(',') if i]) self.log.info("Completed Overcloud Major Upgrade Run.")
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) nodes_config = oooutils.parse_env_file(parsed_args.env_file) parsed_args.env_file.close() if parsed_args.validate_only: return baremetal.validate_nodes(self.app.client_manager, nodes_json=nodes_config) # Look for *specific* deploy images and update the node data if # one is found. if not parsed_args.no_deploy_image: oooutils.update_nodes_deploy_data(nodes_config, http_boot=parsed_args.http_boot) nodes = baremetal.register_or_update( self.app.client_manager, nodes_json=nodes_config, instance_boot_option=parsed_args.instance_boot_option ) nodes_uuids = [node.uuid for node in nodes] if parsed_args.introspect: extra_vars = { "node_uuids": nodes_uuids, "run_validations": parsed_args.run_validations, "concurrency": parsed_args.concurrency, } with oooutils.TempDirs() as tmp: oooutils.run_ansible_playbook( playbook='cli-baremetal-introspect.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=oooutils.playbook_verbosity(self=self), extra_vars=extra_vars ) if parsed_args.provide: baremetal.provide( verbosity=oooutils.playbook_verbosity(self=self), node_uuids=nodes_uuids )
def take_action(self, parsed_args): self.log.debug("take_action({args})".format(args=parsed_args)) if os.path.exists(parsed_args.configuration): with open(parsed_args.configuration, 'r') as fp: configuration = yaml.safe_load(fp.read()) else: try: configuration = yaml.safe_load(parsed_args.configuration) except yaml.YAMLError as exc: raise RuntimeError( _('Configuration is not an existing file and cannot be ' 'parsed as YAML: %s') % exc) # Basic sanity check, we defer the full check to Ironic try: settings = configuration['settings'] except KeyError: raise ValueError(_('Configuration must contain key "settings"')) except TypeError: raise TypeError( _('Configuration must be an object, got %r instead') % configuration) if (not isinstance(settings, list) or not all(isinstance(item, dict) for item in settings)): raise TypeError( _('BIOS settings list is expected to be a list of ' 'objects, got %r instead') % settings) clients = self.app.client_manager if parsed_args.node_uuids: baremetal.apply_bios_configuration( node_uuids=parsed_args.node_uuids, configuration=configuration, verbosity=utils.playbook_verbosity(self=self)) else: baremetal.apply_bios_configuration_on_manageable_nodes( clients, configuration=configuration, verbosity=utils.playbook_verbosity(self=self))
def take_action(self, parsed_args): self.log.debug("take_action({})".format(parsed_args)) clients = self.app.client_manager stack = oooutils.get_stack(clients.orchestration, parsed_args.stack) deployment.get_hosts_and_enable_ssh_admin( stack, parsed_args.overcloud_ssh_network, parsed_args.overcloud_ssh_user, self.get_key_pair(parsed_args), parsed_args.overcloud_ssh_port_timeout, verbosity=oooutils.playbook_verbosity(self=self))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) nodes = parsed_args.node_uuids if nodes: baremetal.clean_nodes( node_uuids=parsed_args.node_uuids, verbosity=oooutils.playbook_verbosity(self=self)) else: baremetal.clean_manageable_nodes( clients=self.app.client_manager, verbosity=oooutils.playbook_verbosity(self=self)) if parsed_args.provide: if nodes: baremetal.provide( self.app.client_manager, node_uuids=nodes, ) else: baremetal.provide_manageable_nodes(self.app.client_manager)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) # Throw deprecation warning if service is enabled and # ask user if upgrade should still be continued. if parsed_args.environment_files: oooutils.check_deprecated_service_is_enabled( parsed_args.environment_files) clients = self.app.client_manager stack = oooutils.get_stack(clients.orchestration, parsed_args.stack) stack_name = stack.stack_name # In case of update and upgrade we need to force the # update_plan_only. The heat stack update is done by the # packag_update mistral action parsed_args.update_plan_only = True # Add the upgrade-prepare.yaml environment to set noops etc templates_dir = (parsed_args.templates or constants.TRIPLEO_HEAT_TEMPLATES) parsed_args.environment_files = oooutils.prepend_environment( parsed_args.environment_files, templates_dir, constants.UPGRADE_PREPARE_ENV) super(UpgradePrepare, self).take_action(parsed_args) package_update.update(clients, container=stack_name) oooutils.get_config(clients, container=stack_name, container_config='{}-config'.format( stack.stack_name)) overcloudrcs = deployment.create_overcloudrc(clients, container=stack_name) oooutils.write_overcloudrc(stack_name, overcloudrcs) # refresh stack info and enable ssh admin for Ansible-via-Mistral stack = oooutils.get_stack(clients.orchestration, parsed_args.stack) deployment.get_hosts_and_enable_ssh_admin( stack, parsed_args.overcloud_ssh_network, parsed_args.overcloud_ssh_user, self.get_key_pair(parsed_args), parsed_args.overcloud_ssh_port_timeout, verbosity=oooutils.playbook_verbosity(self=self)) self.log.info("Completed Overcloud Upgrade Prepare for stack " "{0}".format(stack_name))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) oooutils.ffwd_upgrade_operator_confirm(parsed_args.yes, self.log) clients = self.app.client_manager stack = oooutils.get_stack(clients.orchestration, parsed_args.stack) stack_name = stack.stack_name # In case of update and upgrade we need to force the # update_plan_only. The heat stack update is done by the # packag_update mistral action parsed_args.update_plan_only = True # Add the prepare environment into the args to unset noop etc templates_dir = (parsed_args.templates or constants.TRIPLEO_HEAT_TEMPLATES) if not parsed_args.environment_files: parsed_args.environment_files = [] parsed_args.environment_files = oooutils.prepend_environment( parsed_args.environment_files, templates_dir, constants.FFWD_UPGRADE_PREPARE_ENV) super(FFWDUpgradePrepare, self).take_action(parsed_args) package_update.update(clients, container=stack_name) oooutils.get_config( clients, container=stack_name, container_config='{}-config'.format(stack.stack_name)) overcloudrcs = deployment.create_overcloudrc( clients, container=stack_name) oooutils.write_overcloudrc(stack_name, overcloudrcs) # refresh stack info and enable ssh admin for Ansible-via-Mistral stack = oooutils.get_stack(clients.orchestration, parsed_args.stack) deployment.get_hosts_and_enable_ssh_admin( stack, parsed_args.overcloud_ssh_network, parsed_args.overcloud_ssh_user, self.get_key_pair(parsed_args), parsed_args.overcloud_ssh_port_timeout, verbosity=oooutils.playbook_verbosity(self=self) ) self.log.info("FFWD Upgrade Prepare on stack {0} complete.".format( parsed_args.stack))
def take_action(self, parsed_args): self.log.debug('take_action({})'.format(parsed_args)) extra_vars = { 'server_name': parsed_args.server_name, 'sos_destination': parsed_args.destination, } with utils.TempDirs() as tmp: utils.run_ansible_playbook( playbook='cli-support-collect-logs.yaml', inventory=constants.ANSIBLE_INVENTORY, workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars=extra_vars)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) clients = self.app.client_manager orchestration_client = clients.orchestration stack = utils.get_stack(orchestration_client, parsed_args.name) print("Starting to deploy plan: {}".format(parsed_args.name)) deployment.deploy_and_wait( log=self.log, clients=clients, stack=stack, plan_name=parsed_args.name, verbose_level=utils.playbook_verbosity(self=self), timeout=parsed_args.timeout, run_validations=parsed_args.run_validations, )
def _provision_baremetal(self, parsed_args, tht_root): if not parsed_args.baremetal_deployment: return [] with open(parsed_args.baremetal_deployment, 'r') as fp: roles = yaml.safe_load(fp) key = self.get_key_pair(parsed_args) with open('{}.pub'.format(key), 'rt') as fp: ssh_key = fp.read() output_path = self._user_env_path( 'baremetal-deployed.yaml', tht_root ) extra_vars = { "stack_name": parsed_args.stack, "baremetal_deployment": roles, "baremetal_deployed_path": output_path, "ssh_public_keys": ssh_key, "ssh_user_name": parsed_args.overcloud_ssh_user, } with utils.TempDirs() as tmp: utils.run_ansible_playbook( playbook='cli-overcloud-node-provision.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars=extra_vars, ) with open(output_path, 'r') as fp: parameter_defaults = yaml.safe_load(fp) # TODO(sbaker) Remove this call when it is no longer necessary # to write to a swift object self._write_user_environment( parameter_defaults, 'baremetal-deployed.yaml', tht_root, parsed_args.stack) return [output_path]
def _unprovision_baremetal(self, parsed_args): if not parsed_args.baremetal_deployment: return with open(parsed_args.baremetal_deployment, 'r') as fp: roles = yaml.safe_load(fp) with utils.TempDirs() as tmp: utils.run_ansible_playbook( playbook='cli-overcloud-node-unprovision.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars={ "stack_name": parsed_args.stack, "baremetal_deployment": roles, "prompt": False, })
def take_action(self, parsed_args): self.log.debug("take_action({args})".format(args=parsed_args)) self._validate_args(parsed_args) if not parsed_args.yes: confirm = utils.prompt_user_for_confirmation(message=_( "Are you sure you want to delete this overcloud " "[y/N]? "), logger=self.log) if not confirm: raise oscexc.CommandError("Action not confirmed, exiting.") with utils.TempDirs() as tmp: utils.run_ansible_playbook( "cli-overcloud-delete.yaml", 'undercloud,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars={"stack_name": parsed_args.stack}) print("Success.")
def _nodes_to_delete(self, parsed_args, roles): with oooutils.TempDirs() as tmp: unprovision_confirm = os.path.join(tmp, 'unprovision_confirm.json') oooutils.run_ansible_playbook( playbook='cli-overcloud-node-unprovision.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=oooutils.playbook_verbosity(self=self), extra_vars={ "stack_name": parsed_args.stack, "baremetal_deployment": roles, "prompt": True, "unprovision_confirm": unprovision_confirm, }) with open(unprovision_confirm) as f: nodes = json.load(f) if not nodes: print('No nodes to unprovision') return None, None TableArgs = collections.namedtuple('TableArgs', 'print_empty max_width fit_width') args = TableArgs(print_empty=True, max_width=-1, fit_width=True) nodes_data = [(i.get('hostname', ''), i.get('name', ''), i.get('id', '')) for i in nodes] node_hostnames = [i['hostname'] for i in nodes if 'hostname' in i] formatter = table.TableFormatter() output = six.StringIO() formatter.emit_list(column_names=['hostname', 'name', 'id'], data=nodes_data, stdout=output, parsed_args=args) return output.getvalue(), node_hostnames
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.file_in.name.endswith('.json'): params = simplejson.load(parsed_args.file_in) elif parsed_args.file_in.name.endswith('.yaml'): params = yaml.safe_load(parsed_args.file_in) else: raise exceptions.InvalidConfiguration( _("Invalid file extension for %s, must be json or yaml") % parsed_args.file_in.name) if 'parameter_defaults' in params: params = params['parameter_defaults'] with utils.TempDirs() as tmp: utils.run_ansible_playbook( playbook='cli-update-params.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars={"container": parsed_args.name}, extra_vars_file={"parameters": params})
def take_action(self, parsed_args): self.log.debug("take_action({args})".format(args=parsed_args)) if os.path.exists(parsed_args.configuration): with open(parsed_args.configuration, 'r') as fp: configuration = yaml.safe_load(fp.read()) else: try: configuration = yaml.safe_load(parsed_args.configuration) except yaml.YAMLError as exc: raise RuntimeError( _('Configuration is not an existing file and cannot be ' 'parsed as YAML: %s') % exc) # Basic sanity check, we defer the full check to Ironic try: disks = configuration['logical_disks'] except KeyError: raise ValueError( _('Configuration must contain key "logical_disks"')) except TypeError: raise TypeError( _('Configuration must be an object, got %r instead') % configuration) if (not isinstance(disks, list) or not all(isinstance(item, dict) for item in disks)): raise TypeError( _('Logical disks list is expected to be a list of objects, ' 'got %r instead') % disks) baremetal.create_raid_configuration( clients=self.app.client_manager, node_uuids=parsed_args.node, configuration=configuration, verbosity=utils.playbook_verbosity(self=self))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) self._setup_clients(parsed_args) # Swiftclient logs things like 404s at error level, which is a problem # because we use EAFP to check for the existence of files. Turn off # most swiftclient logging to avoid cluttering up our output with # pointless tracebacks. sc_logger = logging.getLogger("swiftclient") sc_logger.setLevel(logging.CRITICAL) self._validate_args(parsed_args) # Throw warning if deprecated service is enabled and # ask user if deployment should still be continued. if parsed_args.environment_files: utils.check_deprecated_service_is_enabled( parsed_args.environment_files) stack = utils.get_stack(self.orchestration_client, parsed_args.stack) self._update_parameters(parsed_args, stack) stack_create = stack is None if stack_create: self.log.info("No stack found, will be doing a stack create") else: self.log.info("Stack found, will be doing a stack update") if parsed_args.dry_run: print("Validation Finished") return start = time.time() if not parsed_args.config_download_only: self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args) # Get a new copy of the stack after stack update/create. If it was # a create then the previous stack object would be None. stack = utils.get_stack(self.orchestration_client, parsed_args.stack) if parsed_args.update_plan_only: # If we are only updating the plan, then we either wont have a # stack yet or there wont be any changes and the following code # wont do anything. return if parsed_args.config_download: print("Deploying overcloud configuration") deployment.set_deployment_status( clients=self.clients, plan=stack.stack_name, status='DEPLOYING' ) try: if not parsed_args.config_download_only: deployment.get_hosts_and_enable_ssh_admin( stack, parsed_args.overcloud_ssh_network, parsed_args.overcloud_ssh_user, self.get_key_pair(parsed_args), parsed_args.overcloud_ssh_port_timeout, verbosity=utils.playbook_verbosity(self=self) ) if parsed_args.config_download_timeout: timeout = parsed_args.config_download_timeout else: used = int((time.time() - start) // 60) timeout = parsed_args.timeout - used if timeout <= 0: raise exceptions.DeploymentError( 'Deployment timed out after %sm' % used) deployment_options = {} if parsed_args.deployment_python_interpreter: deployment_options['ansible_python_interpreter'] = \ parsed_args.deployment_python_interpreter deployment.config_download( self.log, self.clients, stack, parsed_args.overcloud_ssh_network, parsed_args.output_dir, parsed_args.override_ansible_cfg, timeout=parsed_args.overcloud_ssh_port_timeout, verbosity=utils.playbook_verbosity(self=self), deployment_options=deployment_options, in_flight_validations=parsed_args.inflight, deployment_timeout=timeout, tags=parsed_args.tags, skip_tags=parsed_args.skip_tags, limit_hosts=utils.playbook_limit_parse( limit_nodes=parsed_args.limit ) ) deployment.set_deployment_status( clients=self.clients, plan=stack.stack_name, status='DEPLOY_SUCCESS') except Exception: deployment.set_deployment_status( clients=self.clients, plan=stack.stack_name, status='DEPLOY_FAILED' ) raise # Force fetching of attributes stack.get() rcpath = deployment.create_overcloudrc(container=stack.stack_name, no_proxy=parsed_args.no_proxy) # Copy clouds.yaml to the cloud user directory user = getpwuid(os.stat(constants.CLOUD_HOME_DIR).st_uid).pw_name utils.copy_clouds_yaml(user) utils.create_tempest_deployer_input() # Run postconfig on create or force. Use force to makes sure endpoints # are created with deploy reruns and upgrades if (stack_create or parsed_args.force_postconfig and not parsed_args.skip_postconfig): self._deploy_postconfig(stack, parsed_args) overcloud_endpoint = utils.get_overcloud_endpoint(stack) horizon_url = deployment.get_horizon_url(stack=stack.stack_name) print("Overcloud Endpoint: {0}".format(overcloud_endpoint)) print("Overcloud Horizon Dashboard URL: {0}".format(horizon_url)) print("Overcloud rc file: {0}".format(rcpath)) print("Overcloud Deployed")
def _deploy_tripleo_heat_templates(self, stack, parsed_args, tht_root, user_tht_root): """Deploy the fixed templates in TripleO Heat Templates""" plans = plan_management.list_deployment_plans(self.clients) generate_passwords = not parsed_args.disable_password_generation # TODO(d0ugal): We need to put a more robust strategy in place here to # handle updating plans. if parsed_args.stack in plans: # Upload the new plan templates to swift to replace the existing # templates. plan_management.update_plan_from_templates( self.clients, parsed_args.stack, tht_root, parsed_args.roles_file, generate_passwords, parsed_args.plan_environment_file, parsed_args.networks_file, type(self)._keep_env_on_update, validate_stack=False, verbosity_level=utils.playbook_verbosity(self=self) ) else: plan_management.create_plan_from_templates( self.clients, parsed_args.stack, tht_root, parsed_args.roles_file, generate_passwords, parsed_args.plan_environment_file, parsed_args.networks_file, validate_stack=False, verbosity_level=utils.playbook_verbosity(self=self) ) # Get any missing (e.g j2 rendered) files from the plan to tht_root self._download_missing_files_from_plan( tht_root, parsed_args.stack) print("Processing templates in the directory {0}".format( os.path.abspath(tht_root))) self.log.debug("Creating Environment files") env = {} created_env_files = [] created_env_files.extend( self._provision_baremetal(parsed_args, tht_root)) if parsed_args.environment_directories: created_env_files.extend(utils.load_environment_directories( parsed_args.environment_directories)) parameters = {} if stack: try: # If user environment already exist then keep it user_env = yaml.safe_load(self.object_client.get_object( parsed_args.stack, constants.USER_ENVIRONMENT)[1]) template_utils.deep_update(env, user_env) except ClientException: pass parameters.update(self._update_parameters(parsed_args, stack)) template_utils.deep_update(env, self._create_parameters_env( parameters, tht_root, parsed_args.stack)) if parsed_args.environment_files: created_env_files.extend(parsed_args.environment_files) deployment_options = {} if parsed_args.deployment_python_interpreter: deployment_options['ansible_python_interpreter'] = \ parsed_args.deployment_python_interpreter self.log.debug("Processing environment files %s" % created_env_files) env_files, localenv = utils.process_multiple_environments( created_env_files, tht_root, user_tht_root, cleanup=(not parsed_args.no_cleanup)) template_utils.deep_update(env, localenv) if stack: if not parsed_args.disable_validations: # note(aschultz): network validation goes here before we deploy utils.check_stack_network_matches_env_files(stack, env) bp_cleanup = self._create_breakpoint_cleanup_env( tht_root, parsed_args.stack) template_utils.deep_update(env, bp_cleanup) # FIXME(shardy) It'd be better to validate this via mistral # e.g part of the plan create/update workflow number_controllers = int(parameters.get('ControllerCount', 0)) if number_controllers > 1: if not env.get('parameter_defaults').get('NtpServer'): raise exceptions.InvalidConfiguration( 'Specify --ntp-server as parameter or NtpServer in ' 'environments when using multiple controllers ' '(with HA).') self._try_overcloud_deploy_with_compat_yaml( tht_root, stack, parsed_args.stack, parameters, env_files, parsed_args.timeout, env, parsed_args.update_plan_only, parsed_args.run_validations, parsed_args.skip_deploy_identifier, parsed_args.plan_environment_file, deployment_options=deployment_options) self._unprovision_baremetal(parsed_args)
def _process_and_upload_environment(self, container_name, env, moved_files, tht_root): """Process the environment and upload to Swift The environment at this point should be the result of the merged custom user environments. We need to look at the paths in the environment and update any that changed when they were uploaded to swift. """ file_prefix = "file://" if env.get('resource_registry'): for name, path in env['resource_registry'].items(): if not isinstance(path, six.string_types): continue if path in moved_files: new_path = moved_files[path] env['resource_registry'][name] = new_path elif path.startswith(file_prefix): path = path[len(file_prefix):] if path.startswith(tht_root): path = path[len(tht_root):] # We want to make sure all the paths are relative. if path.startswith("/"): path = path[1:] env['resource_registry'][name] = path # Parameters are removed from the environment params = env.pop('parameter_defaults', None) contents = yaml.safe_dump(env, default_flow_style=False) # Until we have a well defined plan update workflow in tripleo-common # we need to manually add an environment in swift and for users # custom environments passed to the deploy command. # See bug: https://bugs.launchpad.net/tripleo/+bug/1623431 # Update plan env. swift_path = "user-environment.yaml" self.object_client.put_object(container_name, swift_path, contents) env = yaml.safe_load(self.object_client.get_object( container_name, constants.PLAN_ENVIRONMENT)[1]) user_env = {'path': swift_path} if user_env not in env['environments']: env['environments'].append(user_env) yaml_string = yaml.safe_dump(env, default_flow_style=False) self.object_client.put_object( container_name, constants.PLAN_ENVIRONMENT, yaml_string) # Parameters are sent to the update parameters action, this stores them # in the plan environment and means the UI can find them. if params: with utils.TempDirs() as tmp: utils.run_ansible_playbook( playbook='cli-update-params.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars={ "container": container_name }, extra_vars_file={ "parameters": params } )
def _heat_deploy(self, stack, stack_name, template_path, parameters, env_files, timeout, tht_root, env, update_plan_only, run_validations, skip_deploy_identifier, plan_env_file, deployment_options=None): """Verify the Baremetal nodes are available and do a stack update""" if stack: self.log.debug( "Checking compatibilities of neutron drivers for {0}".format( stack_name)) msg = update.check_neutron_mechanism_drivers( env, stack, self.object_client, stack_name) if msg: raise oscexc.CommandError(msg) self.log.debug("Getting template contents from plan %s" % stack_name) # We need to reference the plan here, not the local # tht root, as we need template_object to refer to # the rendered overcloud.yaml, not the tht_root overcloud.j2.yaml # FIXME(shardy) we need to move more of this into mistral actions plan_yaml_path = os.path.relpath(template_path, tht_root) # heatclient template_utils needs a function that can # retrieve objects from a container by name/path def do_object_request(method='GET', object_path=None): obj = self.object_client.get_object(stack_name, object_path) return obj and obj[1] template_files, template = template_utils.get_template_contents( template_object=plan_yaml_path, object_request=do_object_request) files = dict(list(template_files.items()) + list(env_files.items())) moved_files = self._upload_missing_files( stack_name, files, tht_root) self._process_and_upload_environment( stack_name, env, moved_files, tht_root) # Invokes the workflows specified in plan environment file if plan_env_file: workflow_params.invoke_plan_env_workflows( self.clients, stack_name, plan_env_file, verbosity=utils.playbook_verbosity(self=self) ) workflow_params.check_deprecated_parameters(self.clients, stack_name) if not update_plan_only: print("Deploying templates in the directory {0}".format( os.path.abspath(tht_root))) deployment.deploy_and_wait( log=self.log, clients=self.clients, stack=stack, plan_name=stack_name, verbose_level=utils.playbook_verbosity(self=self), timeout=timeout, run_validations=run_validations, skip_deploy_identifier=skip_deploy_identifier, deployment_options=deployment_options )