def _user_env_path(self, abs_env_path, tht_root): env_dirname = os.path.dirname(abs_env_path) user_env_dir = os.path.join( tht_root, 'user-environments', env_dirname[1:]) user_env_path = os.path.join( user_env_dir, os.path.basename(abs_env_path)) utils.makedirs(user_env_dir) return user_env_path
def create_config_dir(self, config_dir, preserve_config_dir=True): # Create config directory if os.path.exists(config_dir) and preserve_config_dir is False: try: self.log.info("Directory %s already exists, removing" % config_dir) shutil.rmtree(config_dir) except OSError as e: message = 'Failed to remove: %s, error: %s' % (config_dir, str(e)) raise OSError(message) utils.makedirs(config_dir)
def _download_missing_files_from_plan(self, tht_dir, plan_name): # get and download missing files into tmp directory plan_list = self.object_client.get_container(plan_name) plan_filenames = [f['name'] for f in plan_list[1]] for pf in plan_filenames: file_path = os.path.join(tht_dir, pf) if not os.path.isfile(file_path): self.log.debug("Missing in templates directory, downloading \ %s from swift into %s" % (pf, file_path)) utils.makedirs(os.path.dirname(file_path)) # open in binary as the swiftclient get/put error under # python3 if opened as Text I/O with open(file_path, 'wb') as f: f.write(self.object_client.get_object(plan_name, pf)[1])
def prepare_undercloud_deploy(upgrade=False, no_validations=True, verbose_level=1, yes=False, force_stack_update=False, dry_run=False, inflight=False): """Prepare Undercloud deploy command based on undercloud.conf""" if CONF.get('undercloud_hostname'): utils.set_hostname(CONF.get('undercloud_hostname')) env_data = {} registry_overwrites = {} deploy_args = [] # Fetch configuration and use its log file param to add logging to a file utils.load_config(CONF, constants.UNDERCLOUD_CONF_PATH) utils.configure_logging(LOG, verbose_level, CONF['undercloud_log_file']) _load_subnets_config_groups() # NOTE(bogdando): the generated env files are stored another path then # picked up later. # NOTE(aschultz): We copy this into the tht root that we save because # we move any user provided environment files into this root later. tempdir = os.path.join(os.path.abspath(CONF['output_dir']), 'tripleo-config-generated-env-files') utils.makedirs(tempdir) # Set the undercloud home dir parameter so that stackrc is produced in # the users home directory. env_data['UndercloudHomeDir'] = USER_HOME env_data['PythonInterpreter'] = sys.executable env_data['ContainerImagePrepareDebug'] = CONF['undercloud_debug'] for param_key, param_value in PARAMETER_MAPPING.items(): if param_key in CONF.keys(): env_data[param_value] = CONF[param_key] # Some undercloud config options need to tweak multiple template parameters for undercloud_key in MULTI_PARAMETER_MAPPING: for env_value in MULTI_PARAMETER_MAPPING[undercloud_key]: if undercloud_key in CONF.keys(): env_data[env_value] = CONF[undercloud_key] # Set up parameters for undercloud networking _process_network_args(env_data) # Setup parameter for Chrony ACL rules _process_chrony_acls(env_data) # Parse the undercloud.conf options to include necessary args and # yaml files for undercloud deploy command if CONF.get('undercloud_enable_selinux'): env_data['SELinuxMode'] = 'enforcing' else: env_data['SELinuxMode'] = 'permissive' if CONF.get('undercloud_enable_paunch'): env_data['EnablePaunch'] = True else: env_data['EnablePaunch'] = False if CONF.get('undercloud_ntp_servers', None): env_data['NtpServer'] = CONF['undercloud_ntp_servers'] if CONF.get('undercloud_timezone', None): env_data['TimeZone'] = CONF['undercloud_timezone'] else: env_data['TimeZone'] = utils.get_local_timezone() if CONF.get('enable_validations', False): env_data['UndercloudConfigFilePath'] = constants.UNDERCLOUD_CONF_PATH if not no_validations: env_data['EnableValidations'] = CONF['enable_validations'] if CONF.get('overcloud_domain_name', None): env_data['NeutronDnsDomain'] = CONF['overcloud_domain_name'] deploy_args.append('--local-domain=%s' % CONF['overcloud_domain_name']) local_registry_name = '.'.join([ utils.get_short_hostname(), 'ctlplane', CONF['overcloud_domain_name'] ]) if CONF.get('container_cli', 'podman') == 'podman': env_data['DockerInsecureRegistryAddress'] = [local_registry_name] env_data['DockerInsecureRegistryAddress'].append( CONF['local_ip'].split('/')[0]) env_data['DockerInsecureRegistryAddress'].append( CONF['undercloud_admin_host']) else: env_data['DockerInsecureRegistryAddress'] = [ '%s:8787' % local_registry_name ] env_data['DockerInsecureRegistryAddress'].append( '%s:8787' % CONF['local_ip'].split('/')[0]) env_data['DockerInsecureRegistryAddress'].append( '%s:8787' % CONF['undercloud_admin_host']) env_data['DockerInsecureRegistryAddress'].extend( CONF['container_insecure_registries']) env_data['ContainerCli'] = CONF['container_cli'] # NOTE(aschultz): deprecated in Stein if CONF.get('docker_bip', None): env_data['DockerNetworkOptions'] = CONF['docker_bip'] if CONF.get('container_registry_mirror', None): env_data['DockerRegistryMirror'] = CONF['container_registry_mirror'] # This parameter the IP address used to bind the local container registry env_data['LocalContainerRegistry'] = local_registry_name if CONF['additional_architectures']: # In queens (instack-undercloud) we used this to setup additional # architectures. For rocky+ we want to pass a list and be smarter in # THT. We can remove this in 'T' when we get there. for arch in CONF['additional_architectures']: env_data['EnableArchitecture%s' % arch.upper()] = True env_data['AdditionalArchitectures'] = \ ','.join(CONF['additional_architectures']) if CONF.get('local_ip', None): deploy_args.append('--local-ip=%s' % CONF['local_ip']) if CONF.get('templates', None): tht_templates = CONF['templates'] deploy_args.append('--templates=%s' % tht_templates) else: tht_templates = THT_HOME deploy_args.append('--templates=%s' % THT_HOME) if CONF.get('roles_file', constants.UNDERCLOUD_ROLES_FILE): deploy_args.append('--roles-file=%s' % CONF['roles_file']) if CONF.get('networks_file'): deploy_args.append('--networks-file=%s' % CONF['networks_file']) else: deploy_args.append('--networks-file=%s' % constants.UNDERCLOUD_NETWORKS_FILE) if yes: deploy_args += ['-y'] if upgrade: deploy_args += [ '--upgrade', '-e', os.path.join( tht_templates, "environments/lifecycle/undercloud-upgrade-prepare.yaml") ] if not CONF.get('heat_native', False): deploy_args.append('--heat-native=False') else: deploy_args.append('--heat-native') if CONF.get('heat_container_image'): deploy_args.append('--heat-container-image=%s' % CONF['heat_container_image']) # These should be loaded first so we can override all the bits later deploy_args += [ "-e", os.path.join(tht_templates, "environments/undercloud.yaml"), '-e', os.path.join(tht_templates, 'environments/use-dns-for-vips.yaml') ] # we want to load this environment after undercloud.yaml for precedence. if CONF.get('container_cli', 'podman') == 'podman': deploy_args += [ '-e', os.path.join(tht_templates, 'environments/podman.yaml') ] # If a container images file is used, copy it into the tempdir to make it # later into other deployment artifacts and user-provided files. _container_images_config(CONF, deploy_args, env_data, tempdir) if env_data['MasqueradeNetworks']: deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/masquerade-networks.yaml") ] if CONF.get('enable_ironic'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/ironic.yaml") ] # ironic-inspector can only work if ironic is enabled if CONF.get('enable_ironic_inspector'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/ironic-inspector.yaml") ] _process_drivers_and_hardware_types(CONF, env_data) _process_ipa_args(CONF, env_data) if not CONF.get('enable_nova', True): deploy_args += [ '-e', os.path.join(tht_templates, 'environments/undercloud-disable-nova.yaml') ] if CONF.get('enable_mistral'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/mistral.yaml") ] if CONF.get('enable_novajoin'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/novajoin.yaml") ] env_data['NovajoinIpaOtp'] = CONF['ipa_otp'] if CONF.get('enable_zaqar'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/zaqar-swift-backend.yaml") ] if CONF.get('enable_telemetry'): for env_file in TELEMETRY_DOCKER_ENV_YAML: deploy_args += ['-e', os.path.join(tht_templates, env_file)] else: deploy_args += [ '-e', os.path.join(tht_templates, "environments/disable-telemetry.yaml") ] if CONF.get('enable_cinder'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/undercloud-cinder.yaml") ] if CONF.get('enable_tempest'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/tempest.yaml") ] if CONF.get('enable_swift_encryption'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/barbican.yaml"), '-e', os.path.join(tht_templates, "environments/barbican-backend-simple-crypto.yaml") ] env_data['BarbicanSimpleCryptoGlobalDefault'] = True env_data['SwiftEncryptionEnabled'] = True if CONF.get('undercloud_service_certificate'): # We assume that the certificate is trusted env_data['InternalTLSCAFile'] = '' env_data.update( _get_public_tls_parameters( CONF.get('undercloud_service_certificate'))) elif CONF.get('generate_service_certificate'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/public-tls-undercloud.yaml") ] else: deploy_args += [ '-e', os.path.join(tht_templates, "environments/ssl/no-tls-endpoints-public-ip.yaml") ] if (CONF.get('generate_service_certificate') or CONF.get('undercloud_service_certificate')): endpoint_environment = _get_tls_endpoint_environment( CONF.get('undercloud_public_host'), tht_templates) public_host = utils.get_single_ip(CONF.get('undercloud_public_host')) public_ip = netaddr.IPAddress(public_host) deploy_args += ['--public-virtual-ip', public_host] # To make sure the resolved host is set to the right IP in /etc/hosts if not utils.is_valid_ip(CONF.get('undercloud_public_host')): extra_host = public_host + ' ' + CONF.get('undercloud_public_host') env_data['ExtraHostFileEntries'] = extra_host admin_host = utils.get_single_ip(CONF.get('undercloud_admin_host')) admin_ip = netaddr.IPAddress(admin_host) deploy_args += ['--control-virtual-ip', admin_host] local_net = netaddr.IPNetwork(CONF.get('local_ip')) if CONF.get('net_config_override', None): if (admin_ip not in local_net.cidr): LOG.warning('You may need to specify a custom ' 'ControlVirtualInterface in a custom env file to ' 'correctly assign the ip address to an interface ' 'for undercloud_admin_host. By default it will be ' 'set to br-ctlplane.') if (public_ip not in local_net.cidr): LOG.warning('You may need to specify a custom ' 'PublicVirtualInterface in a custom env file to ' 'correctly assign the ip address to an interface ' 'for undercloud_public_host. By default it will be' ' set to br-ctlplane.') else: if (admin_ip not in local_net.cidr or public_ip not in local_net.cidr): LOG.warning('undercloud_admin_host or undercloud_public_host ' 'is not in the same cidr as local_ip.') # Define the *VirtualInterfaces for keepalived. These are used when # configuring the undercloud_*_host addresses. If these adddesses are # not in the default cidr for the ctlplane, it will not be defined # and leads to general sadness during the deployment. Our default # net_config uses br-ctlplane. See rhbz#1737150 env_data['ControlVirtualInterface'] = 'br-ctlplane' env_data['PublicVirtualInterface'] = 'br-ctlplane' deploy_args += [ '-e', endpoint_environment, '-e', os.path.join(tht_templates, 'environments/services/undercloud-haproxy.yaml'), '-e', os.path.join(tht_templates, 'environments/services/undercloud-keepalived.yaml') ] u = CONF.get('deployment_user') or utils.get_deployment_user() env_data['DeploymentUser'] = u # TODO(cjeanner) drop that once using oslo.privsep deploy_args += ['--deployment-user', u] deploy_args += ['--output-dir=%s' % CONF['output_dir']] utils.makedirs(CONF['output_dir']) if CONF.get('cleanup'): deploy_args.append('--cleanup') if CONF.get('net_config_override', None): data_file = CONF['net_config_override'] if os.path.abspath(data_file) != data_file: data_file = os.path.join(USER_HOME, data_file) if not os.path.exists(data_file): msg = _("Could not find net_config_override file '%s'") % data_file LOG.error(msg) raise RuntimeError(msg) # NOTE(bogdando): Process templated net config override data: # * get a list of used instack_env j2 tags (j2 vars, like {{foo}}), # * fetch values for the tags from the known mappins, # * raise, if there is unmatched tags left # * render the template into a JSON dict net_config_env, template_source = _get_jinja_env_source(data_file) unknown_tags = _get_unknown_instack_tags(net_config_env, template_source) if unknown_tags: msg = (_('Can not render net_config_override file {0} contains ' 'unknown instack_env j2 tags: {1}').format( data_file, unknown_tags)) LOG.error(msg) raise exceptions.DeploymentError(msg) # Create rendering context from the known to be present mappings for # identified instack_env tags to generated in env_data undercloud heat # params. Fall back to config opts, when env_data misses a param. context = {} for tag in INSTACK_NETCONF_MAPPING.keys(): mapped_value = INSTACK_NETCONF_MAPPING[tag] if mapped_value in env_data.keys() or mapped_value in CONF.keys(): try: context[tag] = CONF[mapped_value] except cfg.NoSuchOptError: context[tag] = env_data.get(mapped_value, None) # this returns a unicode string, convert it in into json net_config_str = net_config_env.get_template( os.path.split(data_file)[-1]).render(context).replace( "'", '"').replace('"', '"') try: net_config_json = json.loads(net_config_str) except ValueError: net_config_json = json.loads("{%s}" % net_config_str) if 'network_config' not in net_config_json: msg = ('Unsupported data format in net_config_override ' 'file %s: %s' % (data_file, net_config_str)) LOG.error(msg) raise exceptions.DeploymentError(msg) env_data['UndercloudNetConfigOverride'] = net_config_json params_file = os.path.join(tempdir, 'undercloud_parameters.yaml') utils.write_env_file(env_data, params_file, registry_overwrites) deploy_args += ['-e', params_file] if CONF.get('hieradata_override', None): data_file = CONF['hieradata_override'] if os.path.abspath(data_file) != data_file: data_file = os.path.join(USER_HOME, data_file) if not os.path.exists(data_file): msg = _("Could not find hieradata_override file '%s'") % data_file LOG.error(msg) raise RuntimeError(msg) deploy_args += ['--hieradata-override=%s' % data_file] if CONF.get('enable_validations') and not no_validations: undercloud_preflight.check(verbose_level, upgrade) deploy_args += [ '-e', os.path.join(tht_templates, "environments/tripleo-validations.yaml") ] if inflight: deploy_args.append('--inflight-validations') if CONF.get('custom_env_files'): for custom_file in CONF['custom_env_files']: deploy_args += ['-e', custom_file] if verbose_level > 1: deploy_args.append('--debug') deploy_args.append('--log-file=%s' % CONF['undercloud_log_file']) # Always add a drop-in for the ephemeral undercloud heat stack # virtual state tracking (the actual file will be created later) stack_vstate_dropin = os.path.join(tht_templates, 'undercloud-stack-vstate-dropin.yaml') deploy_args += ["-e", stack_vstate_dropin] if force_stack_update: deploy_args += ["--force-stack-update"] cmd = [ "sudo", "--preserve-env", "openstack", "tripleo", "deploy", "--standalone", "--standalone-role", "Undercloud", "--stack", "undercloud" ] cmd += deploy_args[:] # In dry-run, also report the expected heat stack virtual state/action if dry_run: stack_update_mark = os.path.join( constants.STANDALONE_EPHEMERAL_STACK_VSTATE, 'update_mark_undercloud') if os.path.isfile(stack_update_mark) or force_stack_update: LOG.warning( _('The heat stack undercloud virtual state/action ' ' would be UPDATE')) return cmd
def prepare_minion_deploy(upgrade=False, no_validations=False, verbose_level=1, yes=False, force_stack_update=False, dry_run=False): """Prepare Minion deploy command based on minion.conf""" env_data = {} registry_overwrites = {} deploy_args = [] # Fetch configuration and use its log file param to add logging to a file utils.load_config(CONF, constants.MINION_CONF_PATH) utils.configure_logging(LOG, verbose_level, CONF['minion_log_file']) # NOTE(bogdando): the generated env files are stored another path then # picked up later. # NOTE(aschultz): We copy this into the tht root that we save because # we move any user provided environment files into this root later. tempdir = os.path.join(os.path.abspath(CONF['output_dir']), 'tripleo-config-generated-env-files') utils.makedirs(tempdir) env_data['PythonInterpreter'] = sys.executable env_data['ContainerImagePrepareDebug'] = CONF['minion_debug'] for param_key, param_value in PARAMETER_MAPPING.items(): if param_key in CONF.keys(): env_data[param_value] = CONF[param_key] # Parse the minion.conf options to include necessary args and # yaml files for minion deploy command if CONF.get('minion_enable_selinux'): env_data['SELinuxMode'] = 'enforcing' else: env_data['SELinuxMode'] = 'permissive' if CONF.get('minion_ntp_servers', None): env_data['NtpServer'] = CONF['minion_ntp_servers'] if CONF.get('minion_timezone', None): env_data['TimeZone'] = CONF['minion_timezone'] else: env_data['TimeZone'] = utils.get_local_timezone() # TODO(aschultz): fix this logic, look it up out of undercloud-outputs.yaml env_data['DockerInsecureRegistryAddress'] = [ '%s:8787' % CONF['minion_local_ip'].split('/')[0] ] env_data['DockerInsecureRegistryAddress'].extend( CONF['container_insecure_registries']) env_data['ContainerCli'] = CONF['container_cli'] if CONF.get('container_registry_mirror', None): env_data['DockerRegistryMirror'] = CONF['container_registry_mirror'] # This parameter the IP address used to bind the local container registry env_data['LocalContainerRegistry'] = CONF['minion_local_ip'].split('/')[0] if CONF.get('minion_local_ip', None): deploy_args.append('--local-ip=%s' % CONF['minion_local_ip']) if CONF.get('templates', None): tht_templates = CONF['templates'] deploy_args.append('--templates=%s' % tht_templates) else: tht_templates = THT_HOME deploy_args.append('--templates=%s' % THT_HOME) if CONF.get('roles_file', constants.MINION_ROLES_FILE): deploy_args.append('--roles-file=%s' % CONF['roles_file']) if CONF.get('networks_file'): deploy_args.append('--networks-file=%s' % CONF['networks_file']) else: deploy_args.append('--networks-file=%s' % constants.UNDERCLOUD_NETWORKS_FILE) if yes: deploy_args += ['-y'] # copy the undercloud output file into our working dir and include it output_file = _process_undercloud_output( tempdir, CONF['minion_undercloud_output_file']) deploy_args += ['-e', output_file] # copy undercloud password file (the configuration is minion_password_file # to the place that triple deploy looks for it # tripleo-<stack name>-passwords.yaml) _process_undercloud_passwords(CONF['minion_password_file'], 'tripleo-minion-passwords.yaml') if upgrade: # TODO(aschultz): validate minion upgrade, should be the same as the # undercloud one. deploy_args += [ '--upgrade', '-e', os.path.join( tht_templates, "environments/lifecycle/undercloud-upgrade-prepare.yaml") ] if not CONF.get('heat_native', False): deploy_args.append('--heat-native=False') else: deploy_args.append('--heat-native') if CONF.get('heat_container_image'): deploy_args.append('--heat-container-image=%s' % CONF['heat_container_image']) # These should be loaded first so we can override all the bits later deploy_args += [ "-e", os.path.join(tht_templates, 'environments/undercloud/undercloud-minion.yaml'), '-e', os.path.join(tht_templates, 'environments/use-dns-for-vips.yaml') ] # TODO(aschultz): remove when podman is actual default deploy_args += [ '-e', os.path.join(tht_templates, 'environments/podman.yaml') ] # If a container images file is used, copy it into the tempdir to make it # later into other deployment artifacts and user-provided files. _container_images_config(CONF, deploy_args, env_data, tempdir) if CONF.get('enable_heat_engine'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/heat-engine.yaml") ] if CONF.get('enable_ironic_conductor'): deploy_args += [ '-e', os.path.join(tht_templates, "environments/services/ironic-conductor.yaml") ] if CONF.get('minion_service_certificate'): # We assume that the certificate is trusted env_data['InternalTLSCAFile'] = '' env_data.update( _get_public_tls_parameters(CONF.get('minion_service_certificate'))) u = CONF.get('deployment_user') or utils.get_deployment_user() env_data['DeploymentUser'] = u # TODO(cjeanner) drop that once using oslo.privsep deploy_args += ['--deployment-user', u] deploy_args += ['--output-dir=%s' % CONF['output_dir']] utils.makedirs(CONF['output_dir']) # TODO(aschultz): move this to a central class if CONF.get('net_config_override', None): data_file = CONF['net_config_override'] if os.path.abspath(data_file) != data_file: data_file = os.path.join(USER_HOME, data_file) if not os.path.exists(data_file): msg = _("Could not find net_config_override file '%s'") % data_file LOG.error(msg) raise RuntimeError(msg) # NOTE(bogdando): Process templated net config override data: # * get a list of used instack_env j2 tags (j2 vars, like {{foo}}), # * fetch values for the tags from the known mappins, # * raise, if there is unmatched tags left # * render the template into a JSON dict net_config_env, template_source = _get_jinja_env_source(data_file) # Create rendering context from the known to be present mappings for # identified instack_env tags to generated in env_data minion heat # params. Fall back to config opts, when env_data misses a param. context = {} for tag in INSTACK_NETCONF_MAPPING.keys(): mapped_value = INSTACK_NETCONF_MAPPING[tag] if mapped_value in env_data.keys() or mapped_value in CONF.keys(): try: context[tag] = CONF[mapped_value] except cfg.NoSuchOptError: context[tag] = env_data.get(mapped_value, None) # this returns a unicode string, convert it in into json net_config_str = net_config_env.get_template( os.path.split(data_file)[-1]).render(context).replace( "'", '"').replace('"', '"') try: net_config_json = json.loads(net_config_str) except ValueError: net_config_json = json.loads("{%s}" % net_config_str) if 'network_config' not in net_config_json: msg = ('Unsupported data format in net_config_override ' 'file %s: %s' % (data_file, net_config_str)) LOG.error(msg) raise exceptions.DeploymentError(msg) env_data['UndercloudNetConfigOverride'] = net_config_json params_file = os.path.join(tempdir, 'minion_parameters.yaml') utils.write_env_file(env_data, params_file, registry_overwrites) deploy_args += ['-e', params_file] if CONF.get('hieradata_override', None): data_file = CONF['hieradata_override'] if os.path.abspath(data_file) != data_file: data_file = os.path.join(USER_HOME, data_file) if not os.path.exists(data_file): msg = _("Could not find hieradata_override file '%s'") % data_file LOG.error(msg) raise RuntimeError(msg) deploy_args += ['--hieradata-override=%s' % data_file] if CONF.get('minion_hostname'): utils.set_hostname(CONF.get('minion_hostname')) if CONF.get('minion_enable_validations') and not no_validations: undercloud_preflight.minion_check(verbose_level, upgrade) if CONF.get('custom_env_files'): for custom_file in CONF['custom_env_files']: deploy_args += ['-e', custom_file] if verbose_level > 1: deploy_args.append('--debug') deploy_args.append('--log-file=%s' % CONF['minion_log_file']) # Always add a drop-in for the ephemeral minion heat stack # virtual state tracking (the actual file will be created later) stack_vstate_dropin = os.path.join(tht_templates, 'minion-stack-vstate-dropin.yaml') deploy_args += ["-e", stack_vstate_dropin] if force_stack_update: deploy_args += ["--force-stack-update"] roles_file = os.path.join(tht_templates, constants.MINION_ROLES_FILE) cmd = [ "sudo", "--preserve-env", "openstack", "tripleo", "deploy", "--standalone", "--standalone-role", "UndercloudMinion", "--stack", "minion", "-r", roles_file ] cmd += deploy_args[:] # In dry-run, also report the expected heat stack virtual state/action if dry_run: stack_update_mark = os.path.join( constants.STANDALONE_EPHEMERAL_STACK_VSTATE, 'update_mark_minion') if os.path.isfile(stack_update_mark) or force_stack_update: LOG.warning( _('The heat stack minion virtual state/action ' ' would be UPDATE')) return cmd