def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        oooutils.ffwd_upgrade_operator_confirm(parsed_args.yes, self.log)

        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)

        stack_name = stack.stack_name

        # ffwd-upgrade "init" run command on overcloud nodes
        package_update.run_on_nodes(
            clients,
            server_name='all',
            config_name='ffwd-upgrade-prepare',
            config=constants.FFWD_UPGRADE_PREPARE_SCRIPT,
            group='script')

        # In case of update and upgrade we need to force the
        # update_plan_only. The heat stack update is done by the
        # packag_update mistral action
        parsed_args.update_plan_only = True

        # Add the prepare environment into the args to unset noop etc
        templates_dir = (parsed_args.templates
                         or constants.TRIPLEO_HEAT_TEMPLATES)
        if not parsed_args.environment_files:
            parsed_args.environment_files = []
        parsed_args.environment_files = oooutils.prepend_environment(
            parsed_args.environment_files, templates_dir,
            constants.FFWD_UPGRADE_PREPARE_ENV)

        super(FFWDUpgradePrepare, self).take_action(parsed_args)
        package_update.update(clients, container=stack_name)
        package_update.get_config(clients, container=stack_name)

        overcloudrcs = deployment.create_overcloudrc(clients,
                                                     container=stack_name)
        oooutils.write_overcloudrc(stack_name, overcloudrcs)

        # refresh stack info and enable ssh admin for Ansible-via-Mistral
        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)
        deployment.get_hosts_and_enable_ssh_admin(
            self.log, clients, stack, parsed_args.overcloud_ssh_network,
            parsed_args.overcloud_ssh_user, parsed_args.overcloud_ssh_key)

        self.log.info("FFWD Upgrade Prepare on stack {0} complete.".format(
            parsed_args.stack))
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)

        # Throw deprecation warning if service is enabled and
        # ask user if upgrade should still be continued.
        if parsed_args.environment_files:
            oooutils.check_deprecated_service_is_enabled(
                parsed_args.environment_files)

        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)

        stack_name = stack.stack_name

        # In case of update and upgrade we need to force the
        # update_plan_only. The heat stack update is done by the
        # packag_update mistral action
        parsed_args.update_plan_only = True
        # Add the upgrade-prepare.yaml environment to set noops etc
        templates_dir = (parsed_args.templates
                         or constants.TRIPLEO_HEAT_TEMPLATES)
        parsed_args.environment_files = oooutils.prepend_environment(
            parsed_args.environment_files, templates_dir,
            constants.UPGRADE_PREPARE_ENV)
        super(UpgradePrepare, self).take_action(parsed_args)
        package_update.update(clients, container=stack_name)
        oooutils.get_config(clients,
                            container=stack_name,
                            container_config='{}-config'.format(
                                stack.stack_name))

        overcloudrcs = deployment.create_overcloudrc(clients,
                                                     container=stack_name)
        oooutils.write_overcloudrc(stack_name, overcloudrcs)

        # refresh stack info and enable ssh admin for Ansible-via-Mistral
        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)
        deployment.get_hosts_and_enable_ssh_admin(
            stack,
            parsed_args.overcloud_ssh_network,
            parsed_args.overcloud_ssh_user,
            self.get_key_pair(parsed_args),
            parsed_args.overcloud_ssh_port_timeout,
            verbosity=oooutils.playbook_verbosity(self=self))

        self.log.info("Completed Overcloud Upgrade Prepare for stack "
                      "{0}".format(stack_name))
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        oooutils.ffwd_upgrade_operator_confirm(parsed_args.yes, self.log)

        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration,
                                   parsed_args.stack)

        stack_name = stack.stack_name

        # In case of update and upgrade we need to force the
        # update_plan_only. The heat stack update is done by the
        # packag_update mistral action
        parsed_args.update_plan_only = True

        # Add the prepare environment into the args to unset noop etc
        templates_dir = (parsed_args.templates or
                         constants.TRIPLEO_HEAT_TEMPLATES)
        if not parsed_args.environment_files:
            parsed_args.environment_files = []
        parsed_args.environment_files = oooutils.prepend_environment(
            parsed_args.environment_files, templates_dir,
            constants.FFWD_UPGRADE_PREPARE_ENV)

        super(FFWDUpgradePrepare, self).take_action(parsed_args)
        package_update.update(clients, container=stack_name)
        oooutils.get_config(
            clients, container=stack_name,
            container_config='{}-config'.format(stack.stack_name))

        overcloudrcs = deployment.create_overcloudrc(
            clients, container=stack_name)
        oooutils.write_overcloudrc(stack_name, overcloudrcs)

        # refresh stack info and enable ssh admin for Ansible-via-Mistral
        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)
        deployment.get_hosts_and_enable_ssh_admin(
            stack,
            parsed_args.overcloud_ssh_network,
            parsed_args.overcloud_ssh_user,
            self.get_key_pair(parsed_args),
            parsed_args.overcloud_ssh_port_timeout,
            verbosity=oooutils.playbook_verbosity(self=self)
        )

        self.log.info("FFWD Upgrade Prepare on stack {0} complete.".format(
                      parsed_args.stack))
Example #4
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)

        stack_name = stack.stack_name
        container_registry = parsed_args.container_registry_file

        # Update the container registry:
        if container_registry:
            with open(os.path.abspath(container_registry)) as content:
                registry = yaml.load(content.read())
        else:
            self.log.warning(
                "You have not provided a container registry file. Note "
                "that none of the containers on your environement will be "
                "updated. If you want to update your container you have "
                "to re-run this command and provide the registry file "
                "with: --container-registry-file option.")
            registry = None
        # Run update
        ceph_ansible_playbook = parsed_args.ceph_ansible_playbook
        # Run Overcloud deploy (stack update)
        # In case of update and upgrade we need to force the
        # update_plan_only. The heat stack update is done by the
        # packag_update mistral action
        parsed_args.update_plan_only = True
        super(UpdatePrepare, self).take_action(parsed_args)
        package_update.update(clients,
                              container=stack_name,
                              container_registry=registry,
                              ceph_ansible_playbook=ceph_ansible_playbook)
        package_update.get_config(clients, container=stack_name)
        print("Update init on stack {0} complete.".format(parsed_args.stack))
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     _, ansible_dir = self.get_ansible_key_and_dir(
         no_workflow=True,
         stack=parsed_args.stack,
         orchestration=self.app.client_manager.orchestration
     )
     deployment.config_download(
         log=self.log,
         clients=self.app.client_manager,
         stack=oooutils.get_stack(
             self.app.client_manager.orchestration,
             parsed_args.stack
         ),
         output_dir=ansible_dir,
         verbosity=oooutils.playbook_verbosity(self=self),
         ansible_playbook_name=constants.FFWD_UPGRADE_PLAYBOOK,
         inventory_path=oooutils.get_tripleo_ansible_inventory(
             parsed_args.static_inventory,
             parsed_args.ssh_user,
             parsed_args.stack,
             return_inventory_file_path=True
         )
     )
     self.log.info("Completed Overcloud FFWD Upgrade Run.")
Example #6
0
 def take_action(self, parsed_args):
     self.log.debug("take_action({})".format(parsed_args))
     clients = self.app.client_manager
     stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)
     deployment.get_hosts_and_enable_ssh_admin(
         self.log, clients, stack, parsed_args.overcloud_ssh_network,
         parsed_args.overcloud_ssh_user, parsed_args.overcloud_ssh_key)
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)

        stack_name = stack.stack_name
        registry = oooutils.load_container_registry(
            self.log, parsed_args.container_registry_file)

        # Run update
        ceph_ansible_playbook = parsed_args.ceph_ansible_playbook
        # In case of update and upgrade we need to force the
        # update_plan_only. The heat stack update is done by the
        # packag_update mistral action
        parsed_args.update_plan_only = True
        # Add the upgrade-prepare.yaml environment to set noops etc
        templates_dir = (parsed_args.templates
                         or constants.TRIPLEO_HEAT_TEMPLATES)
        parsed_args.environment_files = oooutils.prepend_environment(
            parsed_args.environment_files, templates_dir,
            constants.UPGRADE_PREPARE_ENV)
        super(UpgradePrepare, self).take_action(parsed_args)
        package_update.update(clients,
                              container=stack_name,
                              container_registry=registry,
                              ceph_ansible_playbook=ceph_ansible_playbook)
        package_update.get_config(clients, container=stack_name)

        overcloudrcs = deployment.create_overcloudrc(clients.workflow_engine,
                                                     container=stack_name)
        oooutils.write_overcloudrc(stack_name, overcloudrcs)

        self.log.info("Completed Overcloud Upgrade Prepare for stack "
                      "{0}".format(stack_name))
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        # NOTE(cloudnull): The string option "all" was a special default
        #                  that is no longer relevant. To retain compatibility
        #                  this condition has been put in place.
        if not parsed_args.playbook or parsed_args.playbook == ['all']:
            playbook = constants.MAJOR_UPGRADE_PLAYBOOKS
        else:
            playbook = parsed_args.playbook

        _, ansible_dir = self.get_ansible_key_and_dir(
            no_workflow=parsed_args.no_workflow,
            stack=parsed_args.stack,
            orchestration=self.app.client_manager.orchestration)
        deployment.config_download(
            log=self.log,
            clients=self.app.client_manager,
            stack=oooutils.get_stack(self.app.client_manager.orchestration,
                                     parsed_args.stack),
            output_dir=ansible_dir,
            verbosity=oooutils.playbook_verbosity(self=self),
            ansible_playbook_name=playbook,
            inventory_path=oooutils.get_tripleo_ansible_inventory(
                parsed_args.static_inventory,
                parsed_args.ssh_user,
                parsed_args.stack,
                return_inventory_file_path=True),
            tags=parsed_args.tags,
            skip_tags=parsed_args.skip_tags,
            limit_list=[i.strip() for i in parsed_args.limit.split(',') if i])
        self.log.info("Completed Overcloud Major Upgrade Run.")
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager

        if not parsed_args.yes:
            confirm = oooutils.prompt_user_for_confirmation(message=_(
                "Are you sure you want to delete these overcloud "
                "nodes [y/N]? "),
                                                            logger=self.log)
            if not confirm:
                raise oscexc.CommandError("Action not confirmed, exiting.")

        orchestration_client = clients.orchestration

        stack = oooutils.get_stack(orchestration_client, parsed_args.stack)

        if not stack:
            raise InvalidConfiguration("stack {} not found".format(
                parsed_args.stack))

        nodes = '\n'.join('- %s' % node for node in parsed_args.nodes)
        print(
            "Deleting the following nodes from stack {stack}:\n{nodes}".format(
                stack=stack.stack_name, nodes=nodes))

        scale.scale_down(clients, stack.stack_name, parsed_args.nodes,
                         parsed_args.timeout)
Example #10
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration,
                                   parsed_args.stack)

        stack_name = stack.stack_name

        # In case of update and upgrade we need to force the
        # update_plan_only. The heat stack update is done by the
        # packag_update mistral action
        parsed_args.update_plan_only = True

        # Add the update-prepare.yaml environment to set noops etc
        templates_dir = (parsed_args.templates or
                         constants.TRIPLEO_HEAT_TEMPLATES)
        parsed_args.environment_files = oooutils.prepend_environment(
            parsed_args.environment_files, templates_dir,
            constants.UPDATE_PREPARE_ENV)

        # Throw deprecation warning if service is enabled and
        # ask user if update should still be continued.
        if parsed_args.environment_files:
            oooutils.check_deprecated_service_is_enabled(
                parsed_args.environment_files)

        super(UpdatePrepare, self).take_action(parsed_args)
        package_update.update(clients, container=stack_name)
        oooutils.get_config(
            clients, container=stack_name,
            container_config='{}-config'.format(stack.stack_name))
        self.log.info("Update init on stack {0} complete.".format(
                      parsed_args.stack))
 def take_action(self, parsed_args):
     self.log.debug("take_action(%s)" % parsed_args)
     _, ansible_dir = self.get_ansible_key_and_dir(
         no_workflow=True,
         stack=parsed_args.stack,
         orchestration=self.app.client_manager.orchestration)
     deployment.config_download(
         log=self.log,
         clients=self.app.client_manager,
         stack=oooutils.get_stack(self.app.client_manager.orchestration,
                                  parsed_args.stack),
         output_dir=ansible_dir,
         verbosity=oooutils.playbook_verbosity(self=self),
         ansible_playbook_name=constants.EXTERNAL_UPDATE_PLAYBOOKS,
         extra_vars=oooutils.parse_extra_vars(
             extra_var_strings=parsed_args.extra_vars),
         inventory_path=oooutils.get_tripleo_ansible_inventory(
             parsed_args.static_inventory,
             parsed_args.ssh_user,
             parsed_args.stack,
             return_inventory_file_path=True),
         tags=parsed_args.tags,
         skip_tags=parsed_args.skip_tags,
         limit_hosts=oooutils.playbook_limit_parse(
             limit_nodes=parsed_args.limit))
     self.log.info("Completed Overcloud External Update Run.")
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager

        if parsed_args.baremetal_deployment:
            with open(parsed_args.baremetal_deployment, 'r') as fp:
                roles = yaml.safe_load(fp)

            nodes_text, nodes = self._nodes_to_delete(parsed_args, roles)
            if nodes_text:
                print(nodes_text)
            else:
                return
        else:
            nodes = parsed_args.nodes
            nodes_text = '\n'.join('- %s' % node for node in nodes)
        if not parsed_args.yes:
            confirm = oooutils.prompt_user_for_confirmation(message=_(
                "Are you sure you want to delete these overcloud "
                "nodes [y/N]? "),
                                                            logger=self.log)
            if not confirm:
                raise oscexc.CommandError("Action not confirmed, exiting.")

        orchestration_client = clients.orchestration

        stack = oooutils.get_stack(orchestration_client, parsed_args.stack)

        if not stack:
            raise InvalidConfiguration("stack {} not found".format(
                parsed_args.stack))

        print(
            "Deleting the following nodes from stack {stack}:\n{nodes}".format(
                stack=stack.stack_name, nodes=nodes_text))

        scale.scale_down(
            log=self.log,
            clients=clients,
            stack=stack,
            nodes=nodes,
            connection_timeout=parsed_args.overcloud_ssh_port_timeout,
            timeout=parsed_args.timeout,
            verbosity=oooutils.playbook_verbosity(self=self))

        if parsed_args.baremetal_deployment:
            with oooutils.TempDirs() as tmp:
                oooutils.run_ansible_playbook(
                    playbook='cli-overcloud-node-unprovision.yaml',
                    inventory='localhost,',
                    workdir=tmp,
                    playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS,
                    verbosity=oooutils.playbook_verbosity(self=self),
                    extra_vars={
                        "stack_name": parsed_args.stack,
                        "baremetal_deployment": roles,
                        "prompt": False,
                    })
Example #13
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        self._setup_clients()
        stack = oooutils.get_stack(self.orchestration_client,
                                   parsed_args.stack)
        host_vars = oooutils.get_stack_output_item(stack,
                                                   'AnsibleHostVarsMap') or {}
        parameters = stack.to_dict().get('parameters', {})

        # list all baremetal nodes and map hostname to node name
        node_details = self.baremetal_client.node.list(detail=True)
        hostname_node_map = {}
        for node in node_details:
            hostname = node.instance_info.get('display_name')
            if hostname and node.name:
                hostname_node_map[hostname] = node.name

        role_data = six.StringIO()
        role_data.write('# Generated with the following on %s\n#\n' %
                        datetime.datetime.now().isoformat())
        role_data.write('#   openstack %s\n#\n\n' %
                        ' '.join(self.app.command_options))
        for role, entries in host_vars.items():
            role_count = len(entries)

            # skip zero count roles
            if not role_count:
                continue

            role_data.write('- name: %s\n' % role)
            role_data.write('  count: %s\n' % role_count)

            hostname_format = parameters.get('%sHostnameFormat' % role)
            if hostname_format:
                role_data.write('  hostname_format: "%s"\n' % hostname_format)

            role_data.write('  instances:\n')

            for entry in sorted(entries):
                role_data.write('  - hostname: %s\n' % entry)
                if entry in hostname_node_map:
                    role_data.write('    name: %s\n' %
                                    hostname_node_map[entry])

        if parsed_args.output:
            if (os.path.exists(parsed_args.output) and not parsed_args.yes
                    and sys.stdin.isatty()):
                prompt_response = six.moves.input(
                    ('Overwrite existing file %s [y/N]?' %
                     parsed_args.output)).lower()
                if not prompt_response.startswith('y'):
                    raise oscexc.CommandError(
                        "Will not overwrite existing file:"
                        " %s" % parsed_args.output)
            with open(parsed_args.output, 'w+') as fp:
                fp.write(role_data.getvalue())
        self.app.stdout.write(role_data.getvalue())
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)

        stack_name = stack.stack_name
        container_registry = parsed_args.container_registry_file

        if parsed_args.init_minor_update:
            # Update the container registry:
            if container_registry:
                with open(os.path.abspath(container_registry)) as content:
                    registry = yaml.load(content.read())
            else:
                self.log.warning(
                    "You have not provided a container registry file. Note "
                    "that none of the containers on your environement will be "
                    "updated. If you want to update your container you have "
                    "to re-run this command and provide the registry file "
                    "with: --container-registry-file option.")
                registry = None
            # Execute minor update
            ceph_ansible_playbook = parsed_args.ceph_ansible_playbook
            package_update.update(clients,
                                  container=stack_name,
                                  container_registry=registry,
                                  ceph_ansible_playbook=ceph_ansible_playbook)

            print("Minor update init on stack {0} complete.".format(
                parsed_args.stack))
        else:
            # Run ansible:
            nodes = parsed_args.nodes
            playbook = parsed_args.playbook
            inventory_file = parsed_args.static_inventory
            if inventory_file is None:
                inventory_file = '%s/%s' % (os.path.expanduser('~'),
                                            'tripleo-ansible-inventory')
                try:
                    processutils.execute('/bin/tripleo-ansible-inventory',
                                         '--static-inventory', inventory_file)
                except processutils.ProcessExecutionError as e:
                    message = "Failed to generate inventory: %s" % str(e)
                    raise exceptions.InvalidConfiguration(message)
            if os.path.exists(inventory_file):
                inventory = open(inventory_file, 'r').read()
            else:
                raise exceptions.InvalidConfiguration(
                    "Inventory file %s can not be found." % inventory_file)
            package_update.update_ansible(
                clients,
                nodes=nodes,
                inventory_file=inventory,
                playbook=playbook,
                ansible_queue_name=constants.UPDATE_QUEUE)
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)

        clients = self.app.client_manager
        orchestration_client = clients.orchestration
        stack = utils.get_stack(orchestration_client, parsed_args.name)

        print("Starting to deploy plan: {}".format(parsed_args.name))
        deployment.deploy_and_wait(self.log, clients, stack, parsed_args.name,
                                   self.app_args.verbose_level,
                                   timeout=parsed_args.timeout)
Example #16
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration,
                                   parsed_args.stack)
        stack_name = stack.stack_name

        parsed_args.update_plan_only = True
        super(UpgradeConvergeOvercloud, self).take_action(parsed_args)
        # Run converge steps
        package_update.converge_nodes(clients, container=stack_name)
Example #17
0
    def take_action(self, parsed_args):

        self.log.debug("take_action({})".format(parsed_args))
        clients = self.app.client_manager
        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)
        deployment.get_hosts_and_enable_ssh_admin(
            stack,
            parsed_args.overcloud_ssh_network,
            parsed_args.overcloud_ssh_user,
            self.get_key_pair(parsed_args),
            parsed_args.overcloud_ssh_port_timeout,
            verbosity=oooutils.playbook_verbosity(self=self))
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)

        clients = self.app.client_manager
        orchestration_client = clients.orchestration
        stack = utils.get_stack(orchestration_client, parsed_args.name)

        print("Starting to deploy plan: {}".format(parsed_args.name))
        deployment.deploy_and_wait(self.log, clients, stack, parsed_args.name,
                                   self.app_args.verbose_level,
                                   timeout=parsed_args.timeout,
                                   run_validations=parsed_args.run_validations)
    def _stack_delete(self, clients, stack_name):
        orchestration_client = clients.orchestration

        print("Deleting stack {s}...".format(s=stack_name))
        stack = utils.get_stack(orchestration_client, stack_name)
        if stack is None:
            self.log.warning(
                "No stack found ('{s}'), skipping delete".format(s=stack_name))
        else:
            try:
                stack_management.delete_stack(clients, stack=stack.id)
            except Exception as e:
                raise oscexc.CommandError(
                    "Error occurred during stack delete {}".format(e))
Example #20
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        oooutils.ffwd_upgrade_operator_confirm(parsed_args.yes, self.log)

        clients = self.app.client_manager

        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)

        stack_name = stack.stack_name

        # ffwd-upgrade "init" run command on overcloud nodes
        package_update.run_on_nodes(
            clients,
            server_name='all',
            config_name='ffwd-upgrade-prepare',
            config=constants.FFWD_UPGRADE_PREPARE_SCRIPT,
            group='script',
            queue_name=constants.FFWD_UPGRADE_QUEUE)

        registry = oooutils.load_container_registry(
            self.log, parsed_args.container_registry_file)
        ceph_ansible_playbook = parsed_args.ceph_ansible_playbook
        # In case of update and upgrade we need to force the
        # update_plan_only. The heat stack update is done by the
        # packag_update mistral action
        parsed_args.update_plan_only = True

        # Add the prepare environment into the args to unset noop etc
        templates_dir = (parsed_args.templates
                         or constants.TRIPLEO_HEAT_TEMPLATES)
        if not parsed_args.environment_files:
            parsed_args.environment_files = []
        parsed_args.environment_files = oooutils.prepend_environment(
            parsed_args.environment_files, templates_dir,
            constants.FFWD_UPGRADE_PREPARE_ENV)

        super(FFWDUpgradePrepare, self).take_action(parsed_args)
        package_update.update(clients,
                              container=stack_name,
                              container_registry=registry,
                              ceph_ansible_playbook=ceph_ansible_playbook)
        package_update.get_config(clients, container=stack_name)

        overcloudrcs = deployment.create_overcloudrc(clients.workflow_engine,
                                                     container=stack_name)
        oooutils.write_overcloudrc(stack_name, overcloudrcs)

        self.log.info("FFWD Upgrade Prepare on stack {0} complete.".format(
            parsed_args.stack))
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager
        stack = oooutils.get_stack(clients.orchestration, parsed_args.stack)
        # Add the converge environment into the args to unset noop etc
        templates_dir = (parsed_args.templates
                         or constants.TRIPLEO_HEAT_TEMPLATES)
        parsed_args.environment_files = oooutils.prepend_environment(
            parsed_args.environment_files, templates_dir,
            constants.UPGRADE_CONVERGE_ENV)

        super(UpgradeConvergeOvercloud, self).take_action(parsed_args)
        self.log.info(
            "Completed Overcloud Upgrade Converge for stack {0}".format(
                stack.stack_name))
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager
        orchestration_client = clients.orchestration

        stack = oooutils.get_stack(orchestration_client, parsed_args.stack)

        if not stack:
            raise InvalidConfiguration("stack {} not found".format(
                parsed_args.stack))

        nodes = '\n'.join('- %s' % node for node in parsed_args.nodes)
        print("Deleting the following nodes from stack {stack}:\n{nodes}"
              .format(stack=stack.stack_name, nodes=nodes))

        scale.scale_down(clients, stack.stack_name, parsed_args.nodes)
 def _stack_delete(self, orchestration_client, stack_name):
     print("Deleting stack {s}...".format(s=stack_name))
     stack = utils.get_stack(orchestration_client, stack_name)
     if stack is None:
         self.log.warning("No stack found ('{s}'), skipping delete".
                          format(s=stack_name))
     else:
         try:
             utils.wait_for_stack_ready(
                 orchestration_client=orchestration_client,
                 stack_name=stack_name,
                 action='DELETE')
         except Exception as e:
             self.log.error("Exception while waiting for stack to delete "
                            "{}".format(e))
             raise oscexc.CommandError(
                 "Error occurred while waiting for stack to delete {}".
                 format(e))
Example #24
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        clients = self.app.client_manager
        orchestration_client = clients.orchestration

        stack = oooutils.get_stack(orchestration_client, parsed_args.stack)

        if not stack:
            raise InvalidConfiguration("stack {} not found".format(
                parsed_args.stack))

        nodes = '\n'.join('- %s' % node for node in parsed_args.nodes)
        print(
            "Deleting the following nodes from stack {stack}:\n{nodes}".format(
                stack=stack.stack_name, nodes=nodes))

        scale.scale_down(clients, stack.stack_name, parsed_args.nodes,
                         parsed_args.timeout)
Example #25
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)

        control_plane_stack = parsed_args.control_plane_stack
        cell_stack = parsed_args.cell_stack
        cell_name = parsed_args.name
        output_file = parsed_args.output_file or \
            '%s-cell-input.yaml' % cell_name

        self.log.info('Running at %s with parameters %s', self.now,
                      parsed_args)

        if os.path.exists(output_file) and not parsed_args.force_overwrite:
            raise exceptions.CellExportError(
                "File '%s' already exists, not exporting." % output_file)

        # prepare clients to access the environment
        clients = self.app.client_manager
        swift_client = clients.tripleoclient.object_store

        # data to export
        # parameter: Parameter to be exported
        # file:   IF file specified it is taken as source instead of heat
        #         output.File is relative to MISTRAL_VAR/stack_to_export.
        # filter: in case only specific settings should be
        #         exported from parameter data.
        export_data = {
            "EndpointMap": {
                "parameter": "EndpointMapOverride",
            },
            "HostsEntry": {
                "parameter": "ExtraHostFileEntries",
            },
            "GlobalConfig": {
                "parameter": "GlobalConfigExtraMapData",
            },
            "AllNodesConfig": {
                "file":
                "/group_vars/overcloud.json",
                "parameter":
                "GlobalConfigExtraMapData",
                "filter": [
                    "oslo_messaging_notify_short_bootstrap_node_name",
                    "oslo_messaging_notify_node_names",
                    "oslo_messaging_rpc_node_names", "memcached_node_ips",
                    "ovn_dbs_vip", "redis_vip"
                ]
            },
        }

        # export the data from swift and heat
        data_real = {}

        # Export the passwords from swift
        obj = 'plan-environment.yaml'
        container = control_plane_stack
        try:
            resp_headers, content = swift_client.get_object(container, obj)
        except Exception as e:
            self.log.error(
                "An error happened while exporting the password "
                "file from swift: %s", str(e))
            sys.exit(1)

        data_real = {'parameter_defaults': yaml.load(content)["passwords"]}

        stack_to_export = control_plane_stack
        if cell_stack:
            stack_to_export = cell_stack

        stack = oooutils.get_stack(clients.orchestration, stack_to_export)

        for export_key, export_param in export_data.items():
            data = None
            if "file" in export_param:
                # get stack data
                file = MISTRAL_VAR + stack_to_export + export_param["file"]
                with open(file, 'r') as ff:
                    try:
                        data = json.load(ff)
                    except Exception:
                        self.log.error(_('Could not read file %s') % file)
            else:
                # get stack data
                data = oooutils.get_stack_output_item(stack, export_key)

            param = export_param["parameter"]
            if data:
                # do we just want a subset of entries?
                # When we export information from a cell controller stack
                # we don't want to filter.
                if "filter" in export_param and not cell_stack:
                    for x in export_param["filter"]:
                        element = {x: data[x]}
                        if param not in data_real["parameter_defaults"]:
                            data_real["parameter_defaults"][param] = element
                        else:
                            data_real["parameter_defaults"][param].update(
                                element)
                else:
                    if param not in data_real["parameter_defaults"]:
                        data_real["parameter_defaults"][param] = data
                    else:
                        data_real["parameter_defaults"][param].update(data)
            else:
                raise exceptions.CellExportError(
                    "No data returned to export %s from." % param)

        # write the exported data
        with open(output_file, 'w') as f:
            yaml.safe_dump(data_real, f, default_flow_style=False)

        print("Cell input information exported to %s." % output_file)

        msg = """ \n\n
          Next steps:
          ===========\n
          * Create roles file for cell stack, e.g.:
            openstack overcloud roles generate --roles-path \\
            /usr/share/openstack-tripleo-heat-templates/roles \\
            -o cell_roles_data.yaml Compute CellController
          * Create new flavor used to tag the cell controller
          * Tag cell controller nodes into the new flavor
          * Create cell parameter file as explained in bellow doc link
          * Deploy the cell and make sure to add the following information
           to the deploy command:
            - additional environment files used for overcloud stack
            - --stack <cellname>
            - cell role file created
            - the exported cell input information file {output_file}
            - other specific parameter files for the cell\n
          For more details check https://docs.openstack.org/tripleo-docs/
          latest/install/advanced_deployment/deploy_cellv2.html#
          deploy-the-cell""".format(output_file=output_file)

        print(msg)
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)

        self._validate_args(parsed_args)

        errors, warnings = self._predeploy_verify_capabilities(parsed_args)
        if errors > 0:
            self.log.error(
                "Configuration has %d errors, fix them before proceeding. "
                "Ignoring these errors is likely to lead to a failed deploy.",
                errors)
            if parsed_args.validation_warnings_fatal or \
                    parsed_args.validation_errors_fatal:
                return
        if warnings > 0:
            self.log.error(
                "Configuration has %d warnings, fix them before proceeding. ",
                warnings)
            if parsed_args.validation_warnings_fatal:
                return
        else:
            self.log.info("SUCCESS: No warnings or errors in deploy "
                          "configuration, proceeding.")

        clients = self.app.client_manager
        orchestration_client = clients.tripleoclient.orchestration

        stack = utils.get_stack(orchestration_client, parsed_args.stack)
        stack_create = stack is None
        if stack_create:
            self.log.info("No stack found, will be doing a stack create")
        else:
            self.log.info("Stack found, will be doing a stack update")

        try:
            self._pre_heat_deploy()

            if parsed_args.rhel_reg:
                if parsed_args.reg_method == 'satellite':
                    sat_required_args = (parsed_args.reg_org
                                         and parsed_args.reg_sat_url
                                         and parsed_args.reg_activation_key)
                    if not sat_required_args:
                        raise exceptions.DeploymentError(
                            "ERROR: In order to use satellite registration, "
                            "you must specify --reg-org, --reg-sat-url, and "
                            "--reg-activation-key.")
                else:
                    portal_required_args = (parsed_args.reg_org
                                            and parsed_args.reg_activation_key)
                    if not portal_required_args:
                        raise exceptions.DeploymentError(
                            "ERROR: In order to use portal registration, you "
                            "must specify --reg-org, and "
                            "--reg-activation-key.")

            if parsed_args.dry_run:
                print("Validation Finished")
                return True

            self._deploy_tripleo_heat_templates(stack, parsed_args)

            # Get a new copy of the stack after stack update/create. If it was
            # a create then the previous stack object would be None.
            stack = utils.get_stack(orchestration_client, parsed_args.stack)

            utils.create_overcloudrc(stack, parsed_args.no_proxy)
            utils.create_tempest_deployer_input()

            if stack_create:
                self._deploy_postconfig(stack, parsed_args)

            overcloud_endpoint = utils.get_overcloud_endpoint(stack)
            print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
            print("Overcloud Deployed")
            return True
        except exceptions.DeploymentError as err:
            print("Deployment failed: ", err, file=sys.stderr)
            return False
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)

        self._validate_args(parsed_args)

        errors, warnings = self._predeploy_verify_capabilities(parsed_args)
        if errors > 0:
            self.log.error(
                "Configuration has %d errors, fix them before proceeding. "
                "Ignoring these errors is likely to lead to a failed deploy.",
                errors)
            if parsed_args.validation_warnings_fatal or \
                    parsed_args.validation_errors_fatal:
                return
        if warnings > 0:
            self.log.error(
                "Configuration has %d warnings, fix them before proceeding. ",
                warnings)
            if parsed_args.validation_warnings_fatal:
                return
        else:
            self.log.info("SUCCESS: No warnings or errors in deploy "
                          "configuration, proceeding.")

        clients = self.app.client_manager
        orchestration_client = clients.tripleoclient.orchestration()

        stack = utils.get_stack(orchestration_client, parsed_args.stack)
        stack_create = stack is None
        if stack_create:
            self.log.info("No stack found, will be doing a stack create")
        else:
            self.log.info("Stack found, will be doing a stack update")

        try:
            self._pre_heat_deploy()

            if parsed_args.rhel_reg:
                if parsed_args.reg_method == 'satellite':
                    sat_required_args = (parsed_args.reg_org and
                                         parsed_args.reg_sat_url and
                                         parsed_args.reg_activation_key)
                    if not sat_required_args:
                        raise exceptions.DeploymentError(
                            "ERROR: In order to use satellite registration, "
                            "you must specify --reg-org, --reg-sat-url, and "
                            "--reg-activation-key.")
                else:
                    portal_required_args = (parsed_args.reg_org and
                                            parsed_args.reg_activation_key)
                    if not portal_required_args:
                        raise exceptions.DeploymentError(
                            "ERROR: In order to use portal registration, you "
                            "must specify --reg-org, and "
                            "--reg-activation-key.")

            self._deploy_tripleo_heat_templates(stack, parsed_args)

            # Get a new copy of the stack after stack update/create. If it was
            # a create then the previous stack object would be None.
            stack = utils.get_stack(orchestration_client, parsed_args.stack)

            self._create_overcloudrc(stack, parsed_args)
            self._create_tempest_deployer_input()

            if stack_create:
                self._deploy_postconfig(stack, parsed_args)

            overcloud_endpoint = utils.get_overcloud_endpoint(stack)
            print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
            print("Overcloud Deployed")
            return True
        except exceptions.DeploymentError as err:
            print("Deployment failed: ", err, file=sys.stderr)
            return False
Example #28
0
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        self._setup_clients(parsed_args)

        # Swiftclient logs things like 404s at error level, which is a problem
        # because we use EAFP to check for the existence of files.  Turn off
        # most swiftclient logging to avoid cluttering up our output with
        # pointless tracebacks.
        sc_logger = logging.getLogger("swiftclient")
        sc_logger.setLevel(logging.CRITICAL)

        self._validate_args(parsed_args)

        # Throw warning if deprecated service is enabled and
        # ask user if deployment should still be continued.
        if parsed_args.environment_files:
            utils.check_deprecated_service_is_enabled(
                parsed_args.environment_files)

        stack = utils.get_stack(self.orchestration_client, parsed_args.stack)

        if stack and stack.stack_status == 'IN_PROGRESS':
            raise exceptions.StackInProgress(
                "Unable to deploy as the stack '{}' status is '{}'".format(
                    stack.stack_name, stack.stack_status))

        self._update_parameters(parsed_args, stack)

        stack_create = stack is None
        if stack_create:
            self.log.info("No stack found, will be doing a stack create")
        else:
            self.log.info("Stack found, will be doing a stack update")

        if parsed_args.dry_run:
            print("Validation Finished")
            return

        start = time.time()

        if not parsed_args.config_download_only:
            self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args)

        # Get a new copy of the stack after stack update/create. If it was
        # a create then the previous stack object would be None.
        stack = utils.get_stack(self.orchestration_client, parsed_args.stack)

        if parsed_args.update_plan_only:
            # If we are only updating the plan, then we either wont have a
            # stack yet or there wont be any changes and the following code
            # wont do anything.
            return

        if parsed_args.config_download:
            print("Deploying overcloud configuration")
            deployment.set_deployment_status(
                self.clients, 'deploying',
                plan=stack.stack_name)

            try:
                if not parsed_args.config_download_only:
                    deployment.get_hosts_and_enable_ssh_admin(
                        self.log, self.clients, stack,
                        parsed_args.overcloud_ssh_network,
                        parsed_args.overcloud_ssh_user,
                        parsed_args.overcloud_ssh_key)

                if parsed_args.config_download_timeout:
                    timeout = parsed_args.config_download_timeout * 60
                else:
                    used = int(time.time() - start)
                    timeout = (parsed_args.timeout * 60) - used

                deployment_options = {}
                if parsed_args.deployment_python_interpreter:
                    deployment_options['ansible_python_interpreter'] = \
                        parsed_args.deployment_python_interpreter

                deployment.config_download(
                    self.log, self.clients, stack,
                    parsed_args.templates, parsed_args.overcloud_ssh_user,
                    parsed_args.overcloud_ssh_key,
                    parsed_args.overcloud_ssh_network,
                    parsed_args.output_dir,
                    parsed_args.override_ansible_cfg,
                    timeout,
                    verbosity=self.app_args.verbose_level,
                    deployment_options=deployment_options,
                    in_flight_validations=parsed_args.inflight)
            except Exception:
                deployment.set_deployment_status(
                    self.clients, 'failed',
                    plan=stack.stack_name)
                raise

        # Force fetching of attributes
        stack.get()

        overcloudrcs = deployment.create_overcloudrc(
            self.clients, container=stack.stack_name,
            no_proxy=parsed_args.no_proxy)

        # Create overcloud clouds.yaml
        cloud_data = deployment.create_cloudsyaml(
            self.clients, container=stack.stack_name)
        cloud_yaml_dir = os.path.join(constants.CLOUD_HOME_DIR,
                                      constants.CLOUDS_YAML_DIR)
        cloud_user_id = os.stat(constants.CLOUD_HOME_DIR).st_uid
        cloud_group_id = os.stat(constants.CLOUD_HOME_DIR).st_gid
        clouds_yaml.create_clouds_yaml(
            cloud=cloud_data,
            cloud_yaml_dir=cloud_yaml_dir,
            user_id=cloud_user_id,
            group_id=cloud_group_id)
        rcpath = utils.write_overcloudrc(stack.stack_name, overcloudrcs)
        utils.create_tempest_deployer_input()

        # Run postconfig on create or force. Use force to makes sure endpoints
        # are created with deploy reruns and upgrades
        if (stack_create or parsed_args.force_postconfig
                and not parsed_args.skip_postconfig):
            self._deploy_postconfig(stack, parsed_args)

        overcloud_endpoint = utils.get_overcloud_endpoint(stack)

        horizon_url = deployment.get_horizon_url(
            self.clients, stack=stack.stack_name)

        print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
        print("Overcloud Horizon Dashboard URL: {0}".format(horizon_url))
        print("Overcloud rc file: {0}".format(rcpath))
        print("Overcloud Deployed")
Example #29
0
def export_stack(heat,
                 stack,
                 should_filter=False,
                 config_download_dir=constants.DEFAULT_WORK_DIR):

    # data to export
    # parameter: Parameter to be exported
    # file:   IF file specified it is taken as source instead of heat
    #         output.File is relative to <config-download-dir>/stack.
    # filter: in case only specific settings should be
    #         exported from parameter data.
    export_data = {
        "EndpointMap": {
            "parameter": "EndpointMapOverride",
        },
        "HostsEntry": {
            "parameter": "ExtraHostFileEntries",
        },
        "GlobalConfig": {
            "parameter": "GlobalConfigExtraMapData",
        },
        "AllNodesConfig": {
            "file":
            "group_vars/overcloud.json",
            "parameter":
            "AllNodesExtraMapData",
            "filter": [
                "oslo_messaging_notify_short_bootstrap_node_name",
                "oslo_messaging_notify_node_names",
                "oslo_messaging_rpc_node_names", "memcached_node_ips",
                "ovn_dbs_vip", "redis_vip"
            ]
        },
    }

    data = {}
    heat_stack = oooutils.get_stack(heat, stack)

    for export_key, export_param in export_data.items():
        param = export_param["parameter"]
        if "file" in export_param:
            # get file data
            file = os.path.join(config_download_dir, stack,
                                export_param["file"])
            with open(file, 'r') as ff:
                try:
                    export_data = json.load(ff)
                except Exception as e:
                    LOG.error(_('Could not read file %s') % file)
                    LOG.error(e)

        else:
            # get stack data
            export_data = oooutils.get_stack_output_item(
                heat_stack, export_key)

        if export_data:
            # When we export information from a cell controller stack
            # we don't want to filter.
            if "filter" in export_param and should_filter:
                for filter_key in export_param["filter"]:
                    if filter_key in export_data:
                        element = {filter_key: export_data[filter_key]}
                        data.setdefault(param, {}).update(element)
            else:
                data[param] = export_data

        else:
            raise Exception("No data returned to export %s from." % param)

    return data
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        self._setup_clients(parsed_args)

        # Swiftclient logs things like 404s at error level, which is a problem
        # because we use EAFP to check for the existence of files.  Turn off
        # most swiftclient logging to avoid cluttering up our output with
        # pointless tracebacks.
        sc_logger = logging.getLogger("swiftclient")
        sc_logger.setLevel(logging.CRITICAL)

        self._validate_args(parsed_args)

        stack = utils.get_stack(self.orchestration_client, parsed_args.stack)

        if stack and stack.stack_status == 'IN_PROGRESS':
            raise exceptions.StackInProgress(
                "Unable to deploy as the stack '{}' status is '{}'".format(
                    stack.stack_name, stack.stack_status))

        parameters = self._update_parameters(parsed_args, stack)

        if not parsed_args.disable_validations:
            errors, warnings = self._predeploy_verify_capabilities(
                stack, parameters, parsed_args)
            if errors > 0:
                self.log.error(
                    "Configuration has %d errors, fix them before "
                    "proceeding. Ignoring these errors is likely to lead to "
                    "a failed deploy.", errors)
                if parsed_args.validation_warnings_fatal or \
                        parsed_args.validation_errors_fatal:
                    raise exceptions.InvalidConfiguration()
            if warnings > 0:
                self.log.error(
                    "Configuration has %d warnings, fix them before "
                    "proceeding.", warnings)
                if parsed_args.validation_warnings_fatal:
                    raise exceptions.InvalidConfiguration()
            else:
                self.log.info("SUCCESS: No warnings or errors in deploy "
                              "configuration, proceeding.")

        stack_create = stack is None
        if stack_create:
            self.log.info("No stack found, will be doing a stack create")
        else:
            self.log.info("Stack found, will be doing a stack update")

        if parsed_args.rhel_reg:
            if parsed_args.reg_method == 'satellite':
                sat_required_args = (parsed_args.reg_org
                                     and parsed_args.reg_sat_url
                                     and parsed_args.reg_activation_key)
                if not sat_required_args:
                    raise exceptions.DeploymentError(
                        "ERROR: In order to use satellite registration, "
                        "you must specify --reg-org, --reg-sat-url, and "
                        "--reg-activation-key.")
            else:
                portal_required_args = (parsed_args.reg_org
                                        and parsed_args.reg_activation_key)
                if not portal_required_args:
                    raise exceptions.DeploymentError(
                        "ERROR: In order to use portal registration, you "
                        "must specify --reg-org, and "
                        "--reg-activation-key.")

        if parsed_args.dry_run:
            print("Validation Finished")
            return

        self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args)

        # Get a new copy of the stack after stack update/create. If it was
        # a create then the previous stack object would be None.
        stack = utils.get_stack(self.orchestration_client, parsed_args.stack)

        if parsed_args.update_plan_only:
            # If we are only updating the plan, then we either wont have a
            # stack yet or there wont be any changes and the following code
            # wont do anything.
            return

        if parsed_args.config_download:
            print("Deploying overcloud configuration")

            deployment.config_download(self.log,
                                       self.clients,
                                       stack,
                                       parsed_args.templates,
                                       parsed_args.deployed_server,
                                       parsed_args.overcloud_ssh_user,
                                       parsed_args.overcloud_ssh_key,
                                       parsed_args.output_dir,
                                       verbosity=self.app_args.verbose_level)

        # Force fetching of attributes
        stack.get()

        overcloudrcs = deployment.overcloudrc(self.workflow_client,
                                              container=stack.stack_name,
                                              no_proxy=parsed_args.no_proxy)

        rcpath = utils.write_overcloudrc(stack.stack_name, overcloudrcs)
        utils.create_tempest_deployer_input()

        # Run postconfig on create or force. Use force to makes sure endpoints
        # are created with deploy reruns and upgrades
        if (stack_create or parsed_args.force_postconfig
                and not parsed_args.skip_postconfig):
            self._deploy_postconfig(stack, parsed_args)

        overcloud_endpoint = utils.get_overcloud_endpoint(stack)

        horizon_url = deployment.get_horizon_url(self.clients,
                                                 stack=stack.stack_name)

        print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
        print("Overcloud Horizon Dashboard URL: {0}".format(horizon_url))
        print("Overcloud rc file: {0}".format(rcpath))
        print("Overcloud Deployed")
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)
        self._setup_clients(parsed_args)

        # Swiftclient logs things like 404s at error level, which is a problem
        # because we use EAFP to check for the existence of files.  Turn off
        # most swiftclient logging to avoid cluttering up our output with
        # pointless tracebacks.
        sc_logger = logging.getLogger("swiftclient")
        sc_logger.setLevel(logging.CRITICAL)

        self._validate_args(parsed_args)

        # Throw warning if deprecated service is enabled and
        # ask user if deployment should still be continued.
        if parsed_args.environment_files:
            utils.check_deprecated_service_is_enabled(
                parsed_args.environment_files)

        stack = utils.get_stack(self.orchestration_client, parsed_args.stack)

        self._update_parameters(parsed_args, stack)

        stack_create = stack is None
        if stack_create:
            self.log.info("No stack found, will be doing a stack create")
        else:
            self.log.info("Stack found, will be doing a stack update")

        if parsed_args.dry_run:
            print("Validation Finished")
            return

        start = time.time()

        if not parsed_args.config_download_only:
            self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args)

        # Get a new copy of the stack after stack update/create. If it was
        # a create then the previous stack object would be None.
        stack = utils.get_stack(self.orchestration_client, parsed_args.stack)

        if parsed_args.update_plan_only:
            # If we are only updating the plan, then we either wont have a
            # stack yet or there wont be any changes and the following code
            # wont do anything.
            return

        if parsed_args.config_download:
            print("Deploying overcloud configuration")
            deployment.set_deployment_status(
                clients=self.clients,
                plan=stack.stack_name,
                status='DEPLOYING'
            )

            try:
                if not parsed_args.config_download_only:
                    deployment.get_hosts_and_enable_ssh_admin(
                        stack,
                        parsed_args.overcloud_ssh_network,
                        parsed_args.overcloud_ssh_user,
                        self.get_key_pair(parsed_args),
                        parsed_args.overcloud_ssh_port_timeout,
                        verbosity=utils.playbook_verbosity(self=self)
                    )

                if parsed_args.config_download_timeout:
                    timeout = parsed_args.config_download_timeout
                else:
                    used = int((time.time() - start) // 60)
                    timeout = parsed_args.timeout - used
                    if timeout <= 0:
                        raise exceptions.DeploymentError(
                            'Deployment timed out after %sm' % used)

                deployment_options = {}
                if parsed_args.deployment_python_interpreter:
                    deployment_options['ansible_python_interpreter'] = \
                        parsed_args.deployment_python_interpreter

                deployment.config_download(
                    self.log,
                    self.clients,
                    stack,
                    parsed_args.overcloud_ssh_network,
                    parsed_args.output_dir,
                    parsed_args.override_ansible_cfg,
                    timeout=parsed_args.overcloud_ssh_port_timeout,
                    verbosity=utils.playbook_verbosity(self=self),
                    deployment_options=deployment_options,
                    in_flight_validations=parsed_args.inflight,
                    deployment_timeout=timeout,
                    tags=parsed_args.tags,
                    skip_tags=parsed_args.skip_tags,
                    limit_hosts=utils.playbook_limit_parse(
                        limit_nodes=parsed_args.limit
                    )
                )
                deployment.set_deployment_status(
                    clients=self.clients,
                    plan=stack.stack_name,
                    status='DEPLOY_SUCCESS')
            except Exception:
                deployment.set_deployment_status(
                    clients=self.clients,
                    plan=stack.stack_name,
                    status='DEPLOY_FAILED'
                )
                raise

        # Force fetching of attributes
        stack.get()

        rcpath = deployment.create_overcloudrc(container=stack.stack_name,
                                               no_proxy=parsed_args.no_proxy)

        # Copy clouds.yaml to the cloud user directory
        user = getpwuid(os.stat(constants.CLOUD_HOME_DIR).st_uid).pw_name
        utils.copy_clouds_yaml(user)
        utils.create_tempest_deployer_input()

        # Run postconfig on create or force. Use force to makes sure endpoints
        # are created with deploy reruns and upgrades
        if (stack_create or parsed_args.force_postconfig
                and not parsed_args.skip_postconfig):
            self._deploy_postconfig(stack, parsed_args)

        overcloud_endpoint = utils.get_overcloud_endpoint(stack)

        horizon_url = deployment.get_horizon_url(stack=stack.stack_name)

        print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
        print("Overcloud Horizon Dashboard URL: {0}".format(horizon_url))
        print("Overcloud rc file: {0}".format(rcpath))
        print("Overcloud Deployed")
    def take_action(self, parsed_args):
        self.log.debug("take_action(%s)" % parsed_args)

        # Swiftclient logs things like 404s at error level, which is a problem
        # because we use EAFP to check for the existence of files.  Turn off
        # most swiftclient logging to avoid cluttering up our output with
        # pointless tracebacks.
        sc_logger = logging.getLogger("swiftclient")
        sc_logger.setLevel(logging.CRITICAL)

        self._validate_args(parsed_args)

        clients = self.app.client_manager
        orchestration_client = clients.orchestration

        stack = utils.get_stack(orchestration_client, parsed_args.stack)

        if stack and stack.stack_status == 'IN_PROGRESS':
            raise exceptions.StackInProgress(
                "Unable to deploy as the stack '{}' status is '{}'".format(
                    stack.stack_name, stack.stack_status))

        parameters = self._update_parameters(
            parsed_args, clients.network, stack)

        errors, warnings = self._predeploy_verify_capabilities(
            stack, parameters, parsed_args)
        if errors > 0:
            self.log.error(
                "Configuration has %d errors, fix them before proceeding. "
                "Ignoring these errors is likely to lead to a failed deploy.",
                errors)
            if parsed_args.validation_warnings_fatal or \
                    parsed_args.validation_errors_fatal:
                return
        if warnings > 0:
            self.log.error(
                "Configuration has %d warnings, fix them before proceeding. ",
                warnings)
            if parsed_args.validation_warnings_fatal:
                return
        else:
            self.log.info("SUCCESS: No warnings or errors in deploy "
                          "configuration, proceeding.")

        stack_create = stack is None
        if stack_create:
            self.log.info("No stack found, will be doing a stack create")
        else:
            self.log.info("Stack found, will be doing a stack update")

        if parsed_args.rhel_reg:
            if parsed_args.reg_method == 'satellite':
                sat_required_args = (parsed_args.reg_org and
                                     parsed_args.reg_sat_url and
                                     parsed_args.reg_activation_key)
                if not sat_required_args:
                    raise exceptions.DeploymentError(
                        "ERROR: In order to use satellite registration, "
                        "you must specify --reg-org, --reg-sat-url, and "
                        "--reg-activation-key.")
            else:
                portal_required_args = (parsed_args.reg_org and
                                        parsed_args.reg_activation_key)
                if not portal_required_args:
                    raise exceptions.DeploymentError(
                        "ERROR: In order to use portal registration, you "
                        "must specify --reg-org, and "
                        "--reg-activation-key.")

        if parsed_args.dry_run:
            print("Validation Finished")
            return

        self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args)

        # Get a new copy of the stack after stack update/create. If it was
        # a create then the previous stack object would be None.
        stack = utils.get_stack(orchestration_client, parsed_args.stack)

        if parsed_args.update_plan_only:
            # If we are only updating the plan, then we either wont have a
            # stack yet or there wont be any changes and the following code
            # wont do anything.
            return

        # Force fetching of attributes
        stack.get()

        utils.create_overcloudrc(clients, stack, parsed_args.no_proxy)
        utils.create_tempest_deployer_input()

        # Run postconfig on create or force. Use force to makes sure endpoints
        # are created with deploy reruns and upgrades
        if (stack_create or parsed_args.force_postconfig
                and not parsed_args.skip_postconfig):
            self._deploy_postconfig(stack, parsed_args)

        overcloud_endpoint = utils.get_overcloud_endpoint(stack)
        print("Overcloud Endpoint: {0}".format(overcloud_endpoint))
        print("Overcloud Deployed")