def test_poll_for_events(self, ge): ge.side_effect = [[ self._mock_event('1', 'astack', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_event('3', 'res_child2', 'CREATE_IN_PROGRESS'), self._mock_event('4', 'res_child3', 'CREATE_IN_PROGRESS') ], [ self._mock_event('5', 'res_child1', 'CREATE_COMPLETE'), self._mock_event('6', 'res_child2', 'CREATE_COMPLETE'), self._mock_event('7', 'res_child3', 'CREATE_COMPLETE'), self._mock_event('8', 'astack', 'CREATE_COMPLETE') ]] stack_status, msg = event_utils.poll_for_events( None, 'astack', action='CREATE', poll_period=0) self.assertEqual('CREATE_COMPLETE', stack_status) self.assertEqual('\n Stack astack CREATE_COMPLETE \n', msg) ge.assert_has_calls([ mock.call(None, stack_id='astack', nested_depth=0, event_args={ 'sort_dir': 'asc', 'marker': None }), mock.call(None, stack_id='astack', nested_depth=0, event_args={ 'sort_dir': 'asc', 'marker': '4' }) ])
def test_poll_for_events_same_name(self, ge): ge.side_effect = [[ self._mock_stack_event('1', 'mything', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_event('3', 'mything', 'CREATE_IN_PROGRESS'), ], [ self._mock_event('4', 'mything', 'CREATE_COMPLETE'), ], [ self._mock_event('5', 'res_child1', 'CREATE_COMPLETE'), self._mock_stack_event('6', 'mything', 'CREATE_COMPLETE'), ]] stack_status, msg = event_utils.poll_for_events( None, 'mything', action='CREATE', poll_period=0) self.assertEqual('CREATE_COMPLETE', stack_status) self.assertEqual('\n Stack mything CREATE_COMPLETE \n', msg) ge.assert_has_calls([ mock.call(None, stack_id='mything', nested_depth=0, event_args={ 'sort_dir': 'asc', 'marker': None }), mock.call(None, stack_id='mything', nested_depth=0, event_args={ 'sort_dir': 'asc', 'marker': '3' }), mock.call(None, stack_id='mything', nested_depth=0, event_args={ 'sort_dir': 'asc', 'marker': '4' }) ])
def _stack_action(stack, parsed_args, heat_client, action, action_name=None): if parsed_args.wait: # find the last event to use as the marker events = event_utils.get_events(heat_client, stack_id=stack, event_args={'sort_dir': 'desc', 'limit': 1}) marker = events[0].id if events else None try: action(stack) except heat_exc.HTTPNotFound: msg = _('Stack not found: %s') % stack raise exc.CommandError(msg) if parsed_args.wait: s = heat_client.stacks.get(stack) stack_status, msg = event_utils.poll_for_events( heat_client, s.stack_name, action=action_name, marker=marker) if action_name: if stack_status == '%s_FAILED' % action_name: raise exc.CommandError(msg) else: if stack_status.endswith('_FAILED'): raise exc.CommandError(msg) return heat_client.stacks.get(stack)
def _stack_action(stack, parsed_args, heat_client, action, action_name=None): if parsed_args.wait: # find the last event to use as the marker events = event_utils.get_events(heat_client, stack_id=stack, event_args={'sort_dir': 'desc'}, limit=1) marker = events[0].id if events else None try: action(stack) except heat_exc.HTTPNotFound: msg = _('Stack not found: %s') % stack raise exc.CommandError(msg) if parsed_args.wait: s = heat_client.stacks.get(stack) stack_status, msg = event_utils.poll_for_events(heat_client, s.stack_name, action=action_name, marker=marker) if action_name: if stack_status == '%s_FAILED' % action_name: raise exc.CommandError(msg) else: if stack_status.endswith('_FAILED'): raise exc.CommandError(msg) return heat_client.stacks.get(stack)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) adopt_url = heat_utils.normalise_file_path_to_url( parsed_args.adopt_file) adopt_data = request.urlopen(adopt_url).read().decode('utf-8') fields = { 'stack_name': parsed_args.name, 'disable_rollback': not parsed_args.enable_rollback, 'adopt_stack_data': adopt_data, 'parameters': heat_utils.format_parameters(parsed_args.parameter), 'files': dict(list(env_files.items())), 'environment': env, 'timeout': parsed_args.timeout } stack = client.stacks.create(**fields)['stack'] if parsed_args.wait: stack_status, msg = event_utils.poll_for_events( client, parsed_args.name, action='ADOPT') if stack_status == 'ADOPT_FAILED': raise exc.CommandError(msg) return _show_stack(client, stack['id'], format='table', short=True)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) adopt_url = heat_utils.normalise_file_path_to_url( parsed_args.adopt_file) adopt_data = request.urlopen(adopt_url).read().decode('utf-8') yaml_adopt_data = yaml.safe_load(adopt_data) or {} files = yaml_adopt_data.get('files', {}) files.update(env_files) fields = { 'stack_name': parsed_args.name, 'disable_rollback': not parsed_args.enable_rollback, 'adopt_stack_data': adopt_data, 'parameters': heat_utils.format_parameters(parsed_args.parameter), 'files': files, 'environment': env, 'timeout': parsed_args.timeout } stack = client.stacks.create(**fields)['stack'] if parsed_args.wait: stack_status, msg = event_utils.poll_for_events(client, parsed_args.name, action='ADOPT') if stack_status == 'ADOPT_FAILED': raise exc.CommandError(msg) return _show_stack(client, stack['id'], format='table', short=True)
def test_poll_for_events_in_progress_resource(self, ge): ge.side_effect = [[ self._mock_event('1', 'astack', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_event('3', 'astack', 'CREATE_COMPLETE') ]] stack_status, msg = event_utils.poll_for_events( None, 'astack', action='CREATE', poll_period=0) self.assertEqual('CREATE_COMPLETE', stack_status) self.assertEqual('\n Stack astack CREATE_COMPLETE \n', msg)
def test_poll_for_events_in_progress_resource(self, ge): ge.side_effect = [[ self._mock_stack_event('1', 'astack', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_stack_event('3', 'astack', 'CREATE_COMPLETE') ]] stack_status, msg = event_utils.poll_for_events( None, 'astack', action='CREATE', poll_period=0) self.assertEqual('CREATE_COMPLETE', stack_status) self.assertEqual('\n Stack astack CREATE_COMPLETE \n', msg)
def test_poll_for_events_stack_get(self, ge): mock_client = mock.MagicMock() mock_client.stacks.get.return_value.stack_status = 'CREATE_FAILED' ge.side_effect = [[ self._mock_stack_event('1', 'astack', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_event('3', 'res_child2', 'CREATE_IN_PROGRESS'), self._mock_event('4', 'res_child3', 'CREATE_IN_PROGRESS') ], [], []] stack_status, msg = event_utils.poll_for_events( mock_client, 'astack', action='CREATE', poll_period=0) self.assertEqual('CREATE_FAILED', stack_status) self.assertEqual('\n Stack astack CREATE_FAILED \n', msg)
def test_poll_for_events_stack_get(self, ge): mock_client = mock.MagicMock() mock_client.stacks.get.return_value.stack_status = 'CREATE_FAILED' ge.side_effect = [[ self._mock_event('1', 'astack', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_event('3', 'res_child2', 'CREATE_IN_PROGRESS'), self._mock_event('4', 'res_child3', 'CREATE_IN_PROGRESS') ], [], []] stack_status, msg = event_utils.poll_for_events( mock_client, 'astack', action='CREATE', poll_period=0) self.assertEqual('CREATE_FAILED', stack_status) self.assertEqual('\n Stack astack CREATE_FAILED \n', msg)
def test_poll_for_events_no_action(self, ge): ge.side_effect = [[ self._mock_event('1', 'astack', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_event('3', 'res_child2', 'CREATE_IN_PROGRESS'), self._mock_event('4', 'res_child3', 'CREATE_IN_PROGRESS') ], [ self._mock_event('5', 'res_child1', 'CREATE_COMPLETE'), self._mock_event('6', 'res_child2', 'CREATE_FAILED'), self._mock_event('7', 'res_child3', 'CREATE_COMPLETE'), self._mock_event('8', 'astack', 'FOO_FAILED') ]] stack_status, msg = event_utils.poll_for_events( None, 'astack', action=None, poll_period=0) self.assertEqual('FOO_FAILED', stack_status) self.assertEqual('\n Stack astack FOO_FAILED \n', msg)
def test_poll_for_events_no_action(self, ge): ge.side_effect = [[ self._mock_stack_event('1', 'astack', 'CREATE_IN_PROGRESS'), self._mock_event('2', 'res_child1', 'CREATE_IN_PROGRESS'), self._mock_event('3', 'res_child2', 'CREATE_IN_PROGRESS'), self._mock_event('4', 'res_child3', 'CREATE_IN_PROGRESS') ], [ self._mock_event('5', 'res_child1', 'CREATE_COMPLETE'), self._mock_event('6', 'res_child2', 'CREATE_FAILED'), self._mock_event('7', 'res_child3', 'CREATE_COMPLETE'), self._mock_stack_event('8', 'astack', 'FOO_FAILED') ]] stack_status, msg = event_utils.poll_for_events( None, 'astack', action=None, poll_period=0) self.assertEqual('FOO_FAILED', stack_status) self.assertEqual('\n Stack astack FOO_FAILED \n', msg)
def test_poll_for_events_with_marker(self, ge): ge.side_effect = [[ self._mock_event('5', 'res_child1', 'CREATE_COMPLETE'), self._mock_event('6', 'res_child2', 'CREATE_COMPLETE'), self._mock_event('7', 'res_child3', 'CREATE_COMPLETE'), self._mock_stack_event('8', 'astack', 'CREATE_COMPLETE') ]] stack_status, msg = event_utils.poll_for_events( None, 'astack', action='CREATE', poll_period=0, marker='4', nested_depth=0) self.assertEqual('CREATE_COMPLETE', stack_status) self.assertEqual('\n Stack astack CREATE_COMPLETE \n', msg) ge.assert_has_calls([ mock.call(None, stack_id='astack', nested_depth=0, event_args={ 'sort_dir': 'asc', 'marker': '4' }) ])
def wait_for_stack_ready(orchestration_client, stack_name, marker=None, action='CREATE', verbose=False): """Check the status of an orchestration stack Get the status of an orchestration stack and check whether it is complete or failed. :param orchestration_client: Instance of Orchestration client :type orchestration_client: heatclient.v1.client.Client :param stack_name: Name or UUID of stack to retrieve :type stack_name: string :param marker: UUID of the last stack event before the current action :type marker: string :param action: Current action to check the stack for COMPLETE :type action: string :param verbose: Whether to print events :type verbose: boolean """ stack = get_stack(orchestration_client, stack_name) if not stack: return False stack_name = "%s/%s" % (stack.stack_name, stack.id) if verbose: out = sys.stdout else: out = open(os.devnull, "w") stack_status, msg = event_utils.poll_for_events(orchestration_client, stack_name, action=action, poll_period=5, marker=marker, out=out, nested_depth=2) print(msg) return stack_status == '%s_COMPLETE' % action
def wait_for_stack_ready(orchestration_client, stack_name, marker=None, action='CREATE', verbose=False): """Check the status of an orchestration stack Get the status of an orchestration stack and check whether it is complete or failed. :param orchestration_client: Instance of Orchestration client :type orchestration_client: heatclient.v1.client.Client :param stack_name: Name or UUID of stack to retrieve :type stack_name: string :param marker: UUID of the last stack event before the current action :type marker: string :param action: Current action to check the stack for COMPLETE :type action: string :param verbose: Whether to print events :type verbose: boolean """ stack = get_stack(orchestration_client, stack_name) if not stack: return False stack_name = stack.stack_name if verbose: out = sys.stdout else: out = open(os.devnull, "w") stack_status, msg = event_utils.poll_for_events( orchestration_client, stack_name, action=action, poll_period=5, marker=marker, out=out, nested_depth=2) print(msg) return stack_status == '%s_COMPLETE' % action
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) print("\nUndercloud deploy is an experimental developer focused " "feature that does not yet replace " "'openstack undercloud install'.") if not parsed_args.local_ip: print('Please set --local-ip to the correct ipaddress/cidr ' 'for this machine.') return if not os.environ.get('HEAT_API_PORT'): os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port # The main thread runs as root and we drop privs for forked # processes below. Only the heat deploy/os-collect-config forked # process runs as root. if os.geteuid() != 0: raise exceptions.DeploymentError("Please run as root.") # configure puppet self._configure_puppet() try: # Launch heat. orchestration_client = self._launch_heat(parsed_args) # Wait for heat to be ready. self._wait_local_port_ready(parsed_args.heat_api_port) # Deploy TripleO Heat templates. stack_id = \ self._deploy_tripleo_heat_templates(orchestration_client, parsed_args) # Wait for complete.. status, msg = event_utils.poll_for_events(orchestration_client, stack_id, nested_depth=6) if status != "CREATE_COMPLETE": raise Exception("Stack create failed; %s" % msg) # download the ansible playbooks and execute them. ansible_dir = \ self._download_ansible_playbooks(orchestration_client, parsed_args.stack, parsed_args.output_dir) # Kill heat, we're done with it now. self._kill_heat(parsed_args) if not parsed_args.output_only: # Never returns.. We exec() it directly. self._launch_ansible(ansible_dir) except Exception as e: print("Exception: %s" % e) print(traceback.format_exception(*sys.exc_info())) raise finally: self._kill_heat(parsed_args) if not parsed_args.output_only: # We only get here on error. print('ERROR: Heat log files: %s' % (self.heat_launch.install_tmp)) return 1 else: return 0
def take_action(self, parsed_args): self.log.debug("take_action(%s)", parsed_args) heat_client = self.app.client_manager.orchestration try: if not parsed_args.yes and sys.stdin.isatty(): prompt_response = input( _("Are you sure you want to delete this stack(s) [y/N]? ") ).lower() if not prompt_response.startswith('y'): self.log.info('User did not confirm stack delete so ' 'taking no action.') return except KeyboardInterrupt: # ctrl-c self.log.info('User did not confirm stack delete ' '(ctrl-c) so taking no action.') return except EOFError: # ctrl-d self.log.info('User did not confirm stack delete ' '(ctrl-d) so taking no action.') return failure_count = 0 stacks_waiting = [] for sid in parsed_args.stack: marker = None if parsed_args.wait: try: # find the last event to use as the marker events = event_utils.get_events( heat_client, stack_id=sid, event_args={'sort_dir': 'desc'}, limit=1) if events: marker = events[0].id except heat_exc.CommandError as ex: failure_count += 1 print(ex) continue try: heat_client.stacks.delete(sid) stacks_waiting.append((sid, marker)) except heat_exc.HTTPNotFound: failure_count += 1 print(_('Stack not found: %s') % sid) except heat_exc.Forbidden: failure_count += 1 print(_('Forbidden: %s') % sid) if parsed_args.wait: for sid, marker in stacks_waiting: try: stack_status, msg = event_utils.poll_for_events( heat_client, sid, action='DELETE', marker=marker) except heat_exc.CommandError: continue if stack_status == 'DELETE_FAILED': failure_count += 1 print(msg) if failure_count: msg = (_('Unable to delete %(count)d of the %(total)d stacks.') % { 'count': failure_count, 'total': len(parsed_args.stack) }) raise exc.CommandError(msg)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration tpl_files, template = template_utils.process_template_path( parsed_args.template, object_request=http.authenticated_fetcher(client), existing=parsed_args.existing, fetch_child=parsed_args.files_container is None) env_files_list = [] env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment, env_list_tracker=env_files_list, fetch_env_files=parsed_args.files_container is None)) parameters = heat_utils.format_all_parameters( parsed_args.parameter, parsed_args.parameter_file, parsed_args.template) if parsed_args.pre_update: template_utils.hooks_to_env(env, parsed_args.pre_update, 'pre-update') fields = { 'stack_id': parsed_args.stack, 'parameters': parameters, 'existing': parsed_args.existing, 'template': template, 'files': dict(list(tpl_files.items()) + list(env_files.items())), 'environment': env } # If one or more environments is found, pass the listing to the server if env_files_list: fields['environment_files'] = env_files_list if parsed_args.files_container: fields['files_container'] = parsed_args.files_container if parsed_args.tags: fields['tags'] = parsed_args.tags if parsed_args.timeout: fields['timeout_mins'] = parsed_args.timeout if parsed_args.clear_parameter: fields['clear_parameters'] = list(parsed_args.clear_parameter) if parsed_args.rollback: rollback = parsed_args.rollback.strip().lower() if rollback not in ('enabled', 'disabled', 'keep'): msg = _('--rollback invalid value: %s') % parsed_args.rollback raise exc.CommandError(msg) if rollback != 'keep': fields['disable_rollback'] = rollback == 'disabled' if parsed_args.dry_run: if parsed_args.show_nested: fields['show_nested'] = parsed_args.show_nested changes = client.stacks.preview_update(**fields) fields = [ 'state', 'resource_name', 'resource_type', 'resource_identity' ] columns = sorted(changes.get("resource_changes", {}).keys()) data = [ heat_utils.json_formatter(changes["resource_changes"][key]) for key in columns ] return columns, data if parsed_args.wait: # find the last event to use as the marker events = event_utils.get_events(client, stack_id=parsed_args.stack, event_args={'sort_dir': 'desc'}, limit=1) marker = events[0].id if events else None if parsed_args.converge: fields['converge'] = True client.stacks.update(**fields) if parsed_args.wait: stack = client.stacks.get(parsed_args.stack) stack_status, msg = event_utils.poll_for_events(client, stack.stack_name, action='UPDATE', marker=marker) if stack_status == 'UPDATE_FAILED': raise exc.CommandError(msg) return _show_stack(client, parsed_args.stack, format='table', short=True)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration tpl_files, template = template_utils.process_template_path( parsed_args.template, object_request=http.authenticated_fetcher(client), fetch_child=parsed_args.files_container is None) env_files_list = [] env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment, env_list_tracker=env_files_list, fetch_env_files=parsed_args.files_container is None)) parameters = heat_utils.format_all_parameters( parsed_args.parameter, parsed_args.parameter_file, parsed_args.template) if parsed_args.pre_create: template_utils.hooks_to_env(env, parsed_args.pre_create, 'pre-create') fields = { 'stack_name': parsed_args.name, 'disable_rollback': not parsed_args.enable_rollback, 'parameters': parameters, 'template': template, 'files': dict(list(tpl_files.items()) + list(env_files.items())), 'environment': env } # If one or more environments is found, pass the listing to the server if env_files_list: fields['environment_files'] = env_files_list if parsed_args.files_container: fields['files_container'] = parsed_args.files_container if parsed_args.tags: fields['tags'] = parsed_args.tags if parsed_args.timeout: fields['timeout_mins'] = parsed_args.timeout if parsed_args.dry_run: stack = client.stacks.preview(**fields) formatters = { 'description': heat_utils.text_wrap_formatter, 'template_description': heat_utils.text_wrap_formatter, 'stack_status_reason': heat_utils.text_wrap_formatter, 'parameters': heat_utils.json_formatter, 'outputs': heat_utils.json_formatter, 'resources': heat_utils.json_formatter, 'links': heat_utils.link_formatter, } columns = [] for key in stack.to_dict(): columns.append(key) columns.sort() return (columns, utils.get_item_properties(stack, columns, formatters=formatters)) stack = client.stacks.create(**fields)['stack'] if parsed_args.wait: stack_status, msg = event_utils.poll_for_events( client, parsed_args.name, action='CREATE', poll_period=parsed_args.poll) if stack_status == 'CREATE_FAILED': raise exc.CommandError(msg) return _show_stack(client, stack['id'], format='table', short=True)
def _standalone_deploy(self, parsed_args): if not parsed_args.local_ip: msg = _('Please set --local-ip to the correct ' 'ipaddress/cidr for this machine.') self.log.error(msg) raise exceptions.DeploymentError(msg) if not os.environ.get('HEAT_API_PORT'): os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port # The main thread runs as root and we drop privs for forked # processes below. Only the heat deploy/os-collect-config forked # process runs as root. if os.geteuid() != 0: msg = _("Please run as root.") self.log.error(msg) raise exceptions.DeploymentError(msg) # prepare working spaces self.output_dir = os.path.abspath(parsed_args.output_dir) self._create_working_dirs() # The state that needs to be persisted between serial deployments # and cannot be contained in ephemeral heat stacks or working dirs self._create_persistent_dirs() # configure puppet self._configure_puppet() # copy the templates dir in place self._populate_templates_dir(parsed_args.templates) # configure our roles data self._set_roles_file(parsed_args.roles_file, self.tht_render) self._get_roles_data() rc = 1 try: # NOTE(bogdando): Look for the unique virtual update mark matching # the heat stack name we are going to create below. If found the # mark, consider the stack action is UPDATE instead of CREATE. mark_uuid = '_'.join(['update_mark', parsed_args.stack]) self.stack_update_mark = os.path.join( constants.STANDALONE_EPHEMERAL_STACK_VSTATE, mark_uuid) # Prepare the heat stack action we want to start deployment with if (os.path.isfile(self.stack_update_mark) or parsed_args.force_stack_update): self.stack_action = 'UPDATE' self.log.warning( _('The heat stack {0} action is {1}').format( parsed_args.stack, self.stack_action)) # Launch heat. orchestration_client = self._launch_heat(parsed_args) # Wait for heat to be ready. utils.wait_api_port_ready(parsed_args.heat_api_port) # Deploy TripleO Heat templates. stack_id = \ self._deploy_tripleo_heat_templates(orchestration_client, parsed_args) # Wait for complete.. status, msg = event_utils.poll_for_events(orchestration_client, stack_id, nested_depth=6) if status != "CREATE_COMPLETE": message = _("Stack create failed; %s") % msg self.log.error(message) raise exceptions.DeploymentError(message) # download the ansible playbooks and execute them. ansible_dir = \ self._download_ansible_playbooks(orchestration_client, parsed_args.stack, parsed_args.standalone_role) # Kill heat, we're done with it now. self._kill_heat(parsed_args) if not parsed_args.output_only: # Run Upgrade tasks before the deployment if parsed_args.upgrade: rc = self._launch_ansible_upgrade(ansible_dir) if rc != 0: raise exceptions.DeploymentError('Upgrade failed') rc = self._launch_ansible_deploy(ansible_dir) except Exception as e: self.log.error("Exception: %s" % six.text_type(e)) raise exceptions.DeploymentError(six.text_type(e)) finally: self._kill_heat(parsed_args) tar_filename = self._create_install_artifact() self._cleanup_working_dirs(cleanup=parsed_args.cleanup) if tar_filename: self.log.warning('Install artifact is located at %s' % tar_filename) if not parsed_args.output_only and rc != 0: # We only get here on error. # Alter the stack virtual state for failed deployments if (self.stack_update_mark and not parsed_args.force_stack_update and os.path.isfile(self.stack_update_mark)): self.log.warning( _('The heat stack %s virtual state/action is ' 'is reset to CREATE. Use "--force-stack-update" to ' ' set it forcefully to UPDATE') % parsed_args.stack) self.log.warning( _('Removing the stack virtual update mark file %s') % self.stack_update_mark) os.remove(self.stack_update_mark) self.log.error( DEPLOY_FAILURE_MESSAGE.format( self.heat_launch.install_tmp)) raise exceptions.DeploymentError('Deployment failed.') else: # We only get here if no errors self.log.warning( DEPLOY_COMPLETION_MESSAGE.format( '~/undercloud-passwords.conf', '~/stackrc')) if (self.stack_update_mark and (not parsed_args.output_only or parsed_args.force_stack_update)): # Persist the unique mark file for this stack # Do not update its atime file system attribute to keep its # genuine timestamp for the 1st time the stack state had # been (virtually) changed to match stack_action UPDATE self.log.warning( _('Writing the stack virtual update mark file %s') % self.stack_update_mark) open(self.stack_update_mark, 'wa').close() elif parsed_args.output_only: self.log.warning( _('Not creating the stack %s virtual update mark file ' 'in the --output-only mode! Re-run with ' '--force-stack-update, if you want to enforce it.') % parsed_args.stack) else: self.log.warning( _('Not creating the stack %s virtual update mark ' 'file') % parsed_args.stack) return rc
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration tpl_files, template = template_utils.process_template_path( parsed_args.template, object_request=http.authenticated_fetcher(client)) env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) parameters = heat_utils.format_all_parameters( parsed_args.parameter, parsed_args.parameter_file, parsed_args.template) if parsed_args.pre_create: template_utils.hooks_to_env(env, parsed_args.pre_create, 'pre-create') fields = { 'stack_name': parsed_args.name, 'disable_rollback': not parsed_args.enable_rollback, 'parameters': parameters, 'template': template, 'files': dict(list(tpl_files.items()) + list(env_files.items())), 'environment': env } if parsed_args.tags: fields['tags'] = parsed_args.tags if parsed_args.timeout: fields['timeout_mins'] = parsed_args.timeout if parsed_args.dry_run: stack = client.stacks.preview(**fields) formatters = { 'description': heat_utils.text_wrap_formatter, 'template_description': heat_utils.text_wrap_formatter, 'stack_status_reason': heat_utils.text_wrap_formatter, 'parameters': heat_utils.json_formatter, 'outputs': heat_utils.json_formatter, 'resources': heat_utils.json_formatter, 'links': heat_utils.link_formatter, } columns = [] for key in stack.to_dict(): columns.append(key) columns.sort() return ( columns, utils.get_item_properties(stack, columns, formatters=formatters) ) stack = client.stacks.create(**fields)['stack'] if parsed_args.wait: stack_status, msg = event_utils.poll_for_events( client, parsed_args.name, action='CREATE') if stack_status == 'CREATE_FAILED': raise exc.CommandError(msg) return _show_stack(client, stack['id'], format='table', short=True)
def take_action(self, parsed_args): self.log.debug("take_action(%s)", parsed_args) heat_client = self.app.client_manager.orchestration try: if not parsed_args.yes and sys.stdin.isatty(): sys.stdout.write( _("Are you sure you want to delete this stack(s) [y/N]? ")) prompt_response = sys.stdin.readline().lower() if not prompt_response.startswith('y'): self.log.info(_LI('User did not confirm stack delete so ' 'taking no action.')) return except KeyboardInterrupt: # ctrl-c self.log.info(_LI('User did not confirm stack delete ' '(ctrl-c) so taking no action.')) return except EOFError: # ctrl-d self.log.info(_LI('User did not confirm stack delete ' '(ctrl-d) so taking no action.')) return failure_count = 0 stacks_waiting = [] for sid in parsed_args.stack: marker = None if parsed_args.wait: try: # find the last event to use as the marker events = event_utils.get_events(heat_client, stack_id=sid, event_args={ 'sort_dir': 'desc', 'limit': 1}) if events: marker = events[0].id except heat_exc.CommandError as ex: failure_count += 1 print(ex) continue try: heat_client.stacks.delete(sid) stacks_waiting.append((sid, marker)) except heat_exc.HTTPNotFound: failure_count += 1 print(_('Stack not found: %s') % sid) except heat_exc.Forbidden: failure_count += 1 print(_('Forbidden: %s') % sid) if parsed_args.wait: for sid, marker in stacks_waiting: try: stack_status, msg = event_utils.poll_for_events( heat_client, sid, action='DELETE', marker=marker) except heat_exc.CommandError: continue if stack_status == 'DELETE_FAILED': failure_count += 1 print(msg) if failure_count: msg = (_('Unable to delete %(count)d of the %(total)d stacks.') % {'count': failure_count, 'total': len(parsed_args.stack)}) raise exc.CommandError(msg)
def take_action(self, parsed_args): self.log.debug('take_action(%s)', parsed_args) client = self.app.client_manager.orchestration tpl_files, template = template_utils.process_template_path( parsed_args.template, object_request=http.authenticated_fetcher(client), existing=parsed_args.existing) env_files, env = ( template_utils.process_multiple_environments_and_files( env_paths=parsed_args.environment)) parameters = heat_utils.format_all_parameters( parsed_args.parameter, parsed_args.parameter_file, parsed_args.template) if parsed_args.pre_update: template_utils.hooks_to_env(env, parsed_args.pre_update, 'pre-update') fields = { 'stack_id': parsed_args.stack, 'parameters': parameters, 'existing': parsed_args.existing, 'template': template, 'files': dict(list(tpl_files.items()) + list(env_files.items())), 'environment': env } if parsed_args.tags: fields['tags'] = parsed_args.tags if parsed_args.timeout: fields['timeout_mins'] = parsed_args.timeout if parsed_args.clear_parameter: fields['clear_parameters'] = list(parsed_args.clear_parameter) if parsed_args.rollback: rollback = parsed_args.rollback.strip().lower() if rollback not in ('enabled', 'disabled', 'keep'): msg = _('--rollback invalid value: %s') % parsed_args.rollback raise exc.CommandError(msg) if rollback != 'keep': fields['disable_rollback'] = rollback == 'disabled' if parsed_args.dry_run: changes = client.stacks.preview_update(**fields) fields = ['state', 'resource_name', 'resource_type', 'resource_identity'] columns = sorted(changes.get("resource_changes", {}).keys()) data = [heat_utils.json_formatter(changes["resource_changes"][key]) for key in columns] return columns, data if parsed_args.wait: # find the last event to use as the marker events = event_utils.get_events(client, stack_id=parsed_args.stack, event_args={'sort_dir': 'desc', 'limit': 1}) marker = events[0].id if events else None client.stacks.update(**fields) if parsed_args.wait: stack = client.stacks.get(parsed_args.stack) stack_status, msg = event_utils.poll_for_events( client, stack.stack_name, action='UPDATE', marker=marker) if stack_status == 'UPDATE_FAILED': raise exc.CommandError(msg) return _show_stack(client, parsed_args.stack, format='table', short=True)