Exemple #1
0
    def _update_stack(self, parameters={}, timeout_mins=240):

        tpl_files, template = template_utils.get_template_contents(
            template_file=os.path.join(self.tht_dir, TEMPLATE_NAME))

        env_paths = []
        if self.environment_files:
            env_paths.extend(self.environment_files)
        env_files, env = (
            template_utils.process_multiple_environments_and_files(
                env_paths=env_paths))
        update.add_breakpoints_cleanup_into_env(env)

        fields = {
            'existing': True,
            'stack_id': self.stack_id,
            'template': template,
            'files': dict(list(tpl_files.items()) +
                          list(env_files.items())),
            'environment': env,
            'parameters': parameters,
            'timeout_mins': timeout_mins
        }

        LOG.debug('stack update params: %s', fields)
        self.heatclient.stacks.update(**fields)
 def _create_breakpoint_cleanup_env(self, tht_root, container_name):
     bp_env = {}
     update.add_breakpoints_cleanup_into_env(bp_env)
     env_path, swift_path = self._write_user_environment(
         bp_env, 'tripleoclient-breakpoint-cleanup.yaml', tht_root,
         container_name)
     return bp_env
Exemple #3
0
    def _update_stack(self,
                      parameters={},
                      timeout_mins=constants.STACK_TIMEOUT_DEFAULT,
                      context=None):
        # TODO(rbrady): migrate _update_stack to it's own action and update
        # the workflow for scale down

        # update the plan parameters with the scaled down parameters
        update_params_action = parameters_actions.UpdateParametersAction(
            parameters, self.container)
        updated_plan = update_params_action.run(context)
        if isinstance(updated_plan, actions.Result):
            return updated_plan

        process_templates_action = templates.ProcessTemplatesAction(
            container=self.container)
        processed_data = process_templates_action.run(context)
        if isinstance(processed_data, actions.Result):
            return processed_data

        update.add_breakpoints_cleanup_into_env(processed_data['environment'])

        fields = processed_data.copy()
        fields['timeout_mins'] = timeout_mins
        fields['existing'] = True
        # As we do a PATCH update when deleting nodes, parameters set for a
        # stack before upgrade to newton (ex. ComputeRemovalPolicies),
        # would still take precedence over the ones set in parameter_defaults
        # after upgrade. Clear these parameters for backward compatibility.
        fields['clear_parameters'] = list(parameters.keys())

        LOG.debug('stack update params: %s', fields)
        self.get_orchestration_client(context).stacks.update(
            self.container, **fields)
Exemple #4
0
    def _update_stack(self, parameters={},
                      timeout_mins=constants.STACK_TIMEOUT_DEFAULT,
                      context=None):
        # TODO(rbrady): migrate _update_stack to it's own action and update
        # the workflow for scale down

        # update the plan parameters with the scaled down parameters
        update_params_action = parameters_actions.UpdateParametersAction(
            parameters, self.container)
        updated_plan = update_params_action.run(context)
        if isinstance(updated_plan, actions.Result):
            return updated_plan

        processed_data = super(ScaleDownAction, self).run(context)
        if isinstance(processed_data, actions.Result):
            return processed_data

        update.add_breakpoints_cleanup_into_env(processed_data['environment'])

        fields = processed_data.copy()
        fields['timeout_mins'] = timeout_mins
        fields['existing'] = True
        # As we do a PATCH update when deleting nodes, parameters set for a
        # stack before upgrade to newton (ex. ComputeRemovalPolicies),
        # would still take precedence over the ones set in parameter_defaults
        # after upgrade. Clear these parameters for backward compatibility.
        fields['clear_parameters'] = list(parameters.keys())

        LOG.debug('stack update params: %s', fields)
        self.get_orchestration_client(context).stacks.update(self.container,
                                                             **fields)
Exemple #5
0
    def _update_stack(self,
                      parameters={},
                      timeout_mins=constants.STACK_TIMEOUT_DEFAULT):
        # TODO(rbrady): migrate _update_stack to it's own action and update
        # the workflow for scale down

        # update the plan parameters with the scaled down parameters
        update_params_action = parameters_actions.UpdateParametersAction(
            parameters, self.container)
        updated_plan = update_params_action.run()
        if isinstance(updated_plan, mistral_workflow_utils.Result):
            return updated_plan

        processed_data = super(ScaleDownAction, self).run()
        if isinstance(processed_data, mistral_workflow_utils.Result):
            return processed_data

        update.add_breakpoints_cleanup_into_env(processed_data['environment'])

        fields = processed_data.copy()
        fields['timeout_mins'] = timeout_mins
        fields['existing'] = True

        LOG.debug('stack update params: %s', fields)
        self.get_orchestration_client().stacks.update(self.container, **fields)
Exemple #6
0
    def _update_stack(self, parameters={},
                      timeout_mins=constants.STACK_TIMEOUT_DEFAULT):

        tpl_files, template = template_utils.get_template_contents(
            template_file=os.path.join(self.tht_dir, constants.TEMPLATE_NAME))

        env_paths = []
        if self.environment_files:
            env_paths.extend(self.environment_files)
        env_files, env = (
            template_utils.process_multiple_environments_and_files(
                env_paths=env_paths))
        update.add_breakpoints_cleanup_into_env(env)

        fields = {
            'existing': True,
            'stack_id': self.stack_id,
            'template': template,
            'files': dict(list(tpl_files.items()) +
                          list(env_files.items())),
            'environment': env,
            'parameters': parameters,
            'timeout_mins': timeout_mins
        }

        LOG.debug('stack update params: %s', fields)
        self.heatclient.stacks.update(**fields)
Exemple #7
0
    def _update_stack(self, parameters={},
                      timeout_mins=constants.STACK_TIMEOUT_DEFAULT,
                      context=None):
        # TODO(rbrady): migrate _update_stack to it's own action and update
        # the workflow for scale down

        # update the plan parameters with the scaled down parameters
        update_params_action = parameters_actions.UpdateParametersAction(
            parameters, self.container)
        updated_plan = update_params_action.run(context)
        if isinstance(updated_plan, actions.Result):
            return updated_plan

        processed_data = super(ScaleDownAction, self).run(context)
        if isinstance(processed_data, actions.Result):
            return processed_data

        update.add_breakpoints_cleanup_into_env(processed_data['environment'])

        fields = processed_data.copy()
        fields['timeout_mins'] = timeout_mins
        fields['existing'] = True

        LOG.debug('stack update params: %s', fields)
        self.get_orchestration_client(context).stacks.update(self.container,
                                                             **fields)
    def _heat_deploy(self, stack, stack_name, template_path, parameters,
                     created_env_files, timeout, tht_root, env):
        """Verify the Baremetal nodes are available and do a stack update"""

        clients = self.app.client_manager
        workflow_client = clients.workflow_engine

        self.log.debug("Processing environment files %s" % created_env_files)
        env_files, localenv = (
            template_utils.process_multiple_environments_and_files(
                created_env_files))
        # Command line has more precedence than env files
        template_utils.deep_update(localenv, env)

        if stack:
            update.add_breakpoints_cleanup_into_env(localenv)

        self.log.debug("Getting template contents from plan %s" % stack_name)
        # We need to reference the plan here, not the local
        # tht root, as we need template_object to refer to
        # the rendered overcloud.yaml, not the tht_root overcloud.j2.yaml
        # FIXME(shardy) we need to move more of this into mistral actions
        plan_yaml_path = os.path.relpath(template_path, tht_root)

        # heatclient template_utils needs a function that can
        # retrieve objects from a container by name/path
        objectclient = clients.tripleoclient.object_store

        def do_object_request(method='GET', object_path=None):
            obj = objectclient.get_object(stack_name, object_path)
            return obj and obj[1]

        template_files, template = template_utils.get_template_contents(
            template_object=plan_yaml_path,
            object_request=do_object_request)

        files = dict(list(template_files.items()) + list(env_files.items()))

        number_controllers = int(parameters.get('ControllerCount', 0))
        if number_controllers > 1:
            if not localenv.get('parameter_defaults').get('NtpServer'):
                raise exceptions.InvalidConfiguration(
                    'Specify --ntp-server as parameter or NtpServer in '
                    'environments when using multiple controllers '
                    '(with HA).')

        clients = self.app.client_manager

        moved_files = self._upload_missing_files(
            stack_name, objectclient, files, tht_root)
        self._process_and_upload_environment(
            stack_name, objectclient, localenv, moved_files, tht_root,
            workflow_client)

        deployment.deploy_and_wait(self.log, clients, stack, stack_name,
                                   self.app_args.verbose_level, timeout)
    def _heat_deploy(self, stack, stack_name, template_path, parameters,
                     environments, timeout):
        """Verify the Baremetal nodes are available and do a stack update"""

        self.log.debug("Processing environment files")
        env_files, env = (
            template_utils.process_multiple_environments_and_files(
                environments))
        if stack:
            update.add_breakpoints_cleanup_into_env(env)

        self.log.debug("Getting template contents")
        template_files, template = template_utils.get_template_contents(
            template_path)

        files = dict(list(template_files.items()) + list(env_files.items()))

        clients = self.app.client_manager
        orchestration_client = clients.tripleoclient.orchestration()

        self.log.debug("Deploying stack: %s", stack_name)
        self.log.debug("Deploying template: %s", template)
        self.log.debug("Deploying parameters: %s", parameters)
        self.log.debug("Deploying environment: %s", env)
        self.log.debug("Deploying files: %s", files)

        stack_args = {
            'stack_name': stack_name,
            'template': template,
            'environment': env,
            'files': files
        }

        if timeout:
            stack_args['timeout_mins'] = timeout

        if stack is None:
            self.log.info("Performing Heat stack create")
            orchestration_client.stacks.create(**stack_args)
        else:
            self.log.info("Performing Heat stack update")
            # Make sure existing parameters for stack are reused
            stack_args['existing'] = 'true'
            orchestration_client.stacks.update(stack.id, **stack_args)

        create_result = utils.wait_for_stack_ready(
            orchestration_client, stack_name)
        if not create_result:
            if stack is None:
                raise Exception("Heat Stack create failed.")
            else:
                raise Exception("Heat Stack update failed.")
    def _heat_deploy(self, stack, stack_name, template_path, parameters,
                     env_files, timeout, tht_root, env, update_plan_only):
        """Verify the Baremetal nodes are available and do a stack update"""

        clients = self.app.client_manager
        workflow_client = clients.workflow_engine

        if stack:
            update.add_breakpoints_cleanup_into_env(env)

        self.log.debug("Getting template contents from plan %s" % stack_name)
        # We need to reference the plan here, not the local
        # tht root, as we need template_object to refer to
        # the rendered overcloud.yaml, not the tht_root overcloud.j2.yaml
        # FIXME(shardy) we need to move more of this into mistral actions
        plan_yaml_path = os.path.relpath(template_path, tht_root)

        # heatclient template_utils needs a function that can
        # retrieve objects from a container by name/path
        objectclient = clients.tripleoclient.object_store

        def do_object_request(method='GET', object_path=None):
            obj = objectclient.get_object(stack_name, object_path)
            return obj and obj[1]

        template_files, template = template_utils.get_template_contents(
            template_object=plan_yaml_path,
            object_request=do_object_request)

        files = dict(list(template_files.items()) + list(env_files.items()))

        number_controllers = int(parameters.get('ControllerCount', 0))
        if number_controllers > 1:
            if not env.get('parameter_defaults').get('NtpServer'):
                raise exceptions.InvalidConfiguration(
                    'Specify --ntp-server as parameter or NtpServer in '
                    'environments when using multiple controllers '
                    '(with HA).')

        clients = self.app.client_manager

        moved_files = self._upload_missing_files(
            stack_name, objectclient, files, tht_root)
        self._process_and_upload_environment(
            stack_name, objectclient, env, moved_files, tht_root,
            workflow_client)

        if not update_plan_only:
            deployment.deploy_and_wait(self.log, clients, stack, stack_name,
                                       self.app_args.verbose_level, timeout)
Exemple #11
0
    def _update_stack(self, parameters={}, timeout_mins=240):

        tpl_files, template = template_utils.get_template_contents(
            template_file=os.path.join(self.tht_dir, TEMPLATE_NAME)
        )

        env_paths = []
        if self.environment_files:
            env_paths.extend(self.environment_files)
        env_files, env = template_utils.process_multiple_environments_and_files(env_paths=env_paths)
        update.add_breakpoints_cleanup_into_env(env)

        fields = {
            "existing": True,
            "stack_id": self.stack_id,
            "template": template,
            "files": dict(list(tpl_files.items()) + list(env_files.items())),
            "environment": env,
            "parameters": parameters,
            "timeout_mins": timeout_mins,
        }

        LOG.debug("stack update params: %s", fields)
        self.heatclient.stacks.update(**fields)
    def _heat_deploy(self, stack, stack_name, template_path, parameters,
                     environments, timeout):
        """Verify the Baremetal nodes are available and do a stack update"""

        self.log.debug("Processing environment files")
        env_files, env = (
            template_utils.process_multiple_environments_and_files(
                environments))
        if stack:
            update.add_breakpoints_cleanup_into_env(env)

        self.log.debug("Getting template contents")
        template_files, template = template_utils.get_template_contents(
            template_path)

        files = dict(list(template_files.items()) + list(env_files.items()))

        clients = self.app.client_manager
        orchestration_client = clients.tripleoclient.orchestration

        self.log.debug("Deploying stack: %s", stack_name)
        self.log.debug("Deploying template: %s", template)
        self.log.debug("Deploying parameters: %s", parameters)
        self.log.debug("Deploying environment: %s", env)
        self.log.debug("Deploying files: %s", files)

        stack_args = {
            'stack_name': stack_name,
            'template': template,
            'environment': env,
            'files': files,
            'clear_parameters': env['parameter_defaults'].keys(),
        }

        if timeout:
            stack_args['timeout_mins'] = timeout

        if stack is None:
            self.log.info("Performing Heat stack create")
            action = 'CREATE'
            marker = None
            orchestration_client.stacks.create(**stack_args)
        else:
            self.log.info("Performing Heat stack update")
            # Make sure existing parameters for stack are reused
            stack_args['existing'] = 'true'
            # Find the last top-level event to use for the first marker
            events = event_utils.get_events(orchestration_client,
                                            stack_id=stack_name,
                                            event_args={
                                                'sort_dir': 'desc',
                                                'limit': 1
                                            })
            marker = events[0].id if events else None
            action = 'UPDATE'

            orchestration_client.stacks.update(stack.id, **stack_args)

        verbose_events = self.app_args.verbose_level > 0
        create_result = utils.wait_for_stack_ready(orchestration_client,
                                                   stack_name, marker, action,
                                                   verbose_events)
        if not create_result:
            if stack is None:
                raise exceptions.DeploymentError("Heat Stack create failed.")
            else:
                raise exceptions.DeploymentError("Heat Stack update failed.")
    def _heat_deploy(self, stack, stack_name, template_path, parameters,
                     environments, timeout):
        """Verify the Baremetal nodes are available and do a stack update"""

        self.log.debug("Processing environment files")
        env_files, env = (
            template_utils.process_multiple_environments_and_files(
                environments))
        if stack:
            update.add_breakpoints_cleanup_into_env(env)

        self.log.debug("Getting template contents")
        template_files, template = template_utils.get_template_contents(
            template_path)

        files = dict(list(template_files.items()) + list(env_files.items()))

        clients = self.app.client_manager
        orchestration_client = clients.tripleoclient.orchestration

        self.log.debug("Deploying stack: %s", stack_name)
        self.log.debug("Deploying template: %s", template)
        self.log.debug("Deploying parameters: %s", parameters)
        self.log.debug("Deploying environment: %s", env)
        self.log.debug("Deploying files: %s", files)

        stack_args = {
            'stack_name': stack_name,
            'template': template,
            'environment': env,
            'files': files,
            'clear_parameters': env['parameter_defaults'].keys(),
        }

        if timeout:
            stack_args['timeout_mins'] = timeout

        if stack is None:
            self.log.info("Performing Heat stack create")
            action = 'CREATE'
            marker = None
            orchestration_client.stacks.create(**stack_args)
        else:
            self.log.info("Performing Heat stack update")
            # Make sure existing parameters for stack are reused
            stack_args['existing'] = 'true'
            # Find the last top-level event to use for the first marker
            events = event_utils.get_events(orchestration_client,
                                            stack_id=stack_name,
                                            event_args={'sort_dir': 'desc',
                                                        'limit': 1})
            marker = events[0].id if events else None
            action = 'UPDATE'

            orchestration_client.stacks.update(stack.id, **stack_args)

        verbose_events = self.app_args.verbose_level > 0
        create_result = utils.wait_for_stack_ready(
            orchestration_client, stack_name, marker, action, verbose_events)
        if not create_result:
            if stack is None:
                raise Exception("Heat Stack create failed.")
            else:
                raise Exception("Heat Stack update failed.")