def deploy_and_wait(log, clients, stack, plan_name, verbose_level, timeout=None, run_validations=False, skip_deploy_identifier=False, deployment_options={}): """Start the deploy and wait for it to finish""" orchestration_client = clients.orchestration if stack is None: log.info("Performing Heat stack create") action = 'CREATE' marker = None else: log.info("Performing Heat stack update") # Make sure existing parameters for stack are reused # Find the last top-level event to use for the first marker events = event_utils.get_events(orchestration_client, stack_id=plan_name, event_args={ 'sort_dir': 'desc', 'limit': 1 }) marker = events[0].id if events else None action = 'UPDATE' set_deployment_status(clients=clients, plan=plan_name, status='DEPLOYING') try: deploy(container=plan_name, run_validations=run_validations, skip_deploy_identifier=skip_deploy_identifier, timeout=timeout, verbosity=verbose_level) except Exception: set_deployment_status(clients=clients, plan=plan_name, status='DEPLOY_FAILED') raise # we always want the heat stack output while it's going. verbose_events = True # TODO(rabi) Simplify call to get events as we don't need to wait # for stack to be ready anymore i.e just get the events. create_result = utils.wait_for_stack_ready(orchestration_client, plan_name, marker, action, verbose_events) if not create_result: shell.OpenStackShell().run(["stack", "failures", "list", plan_name]) set_deployment_status(clients=clients, plan=plan_name, status='DEPLOY_FAILED') if stack is None: raise exceptions.DeploymentError("Heat Stack create failed.") else: raise exceptions.DeploymentError("Heat Stack update failed.")
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) try: if parsed_args.upgrade and (not parsed_args.yes and sys.stdin.isatty()): prompt_response = six.moves.input( ('It is strongly recommended to perform a backup ' 'before the upgrade. Are you sure you want to ' 'upgrade [y/N]?')).lower() if not prompt_response.startswith('y'): self.log.info('User did not confirm upgrade so ' 'taking no action.') return except KeyboardInterrupt: # ctrl-c self.log.info('User did not confirm upgrade ' '(ctrl-c) so taking no action.') return except EOFError: # ctrl-d self.log.info('User did not confirm upgrade ' '(ctrl-d) so taking no action.') return if parsed_args.standalone: if self._standalone_deploy(parsed_args) != 0: msg = _('Deployment failed.') self.log.error(msg) raise exceptions.DeploymentError(msg) else: msg = _('Non-standalone is currently not supported') self.log.error(msg) raise exceptions.DeploymentError(msg)
def deploy_and_wait(log, clients, stack, plan_name, verbose_level, timeout=None, run_validations=False, skip_deploy_identifier=False, deployment_options={}): """Start the deploy and wait for it to finish""" workflow_input = { "container": plan_name, "run_validations": run_validations, "skip_deploy_identifier": skip_deploy_identifier, "deployment_options": deployment_options, } if timeout is not None: workflow_input['timeout'] = timeout deploy(log, clients, **workflow_input) orchestration_client = clients.orchestration if stack is None: log.info("Performing Heat stack create") action = 'CREATE' marker = None else: log.info("Performing Heat stack update") # Make sure existing parameters for stack are reused # Find the last top-level event to use for the first marker events = event_utils.get_events(orchestration_client, stack_id=plan_name, event_args={ 'sort_dir': 'desc', 'limit': 1 }) marker = events[0].id if events else None action = 'UPDATE' time.sleep(10) verbose_events = verbose_level >= 1 create_result = utils.wait_for_stack_ready(orchestration_client, plan_name, marker, action, verbose_events) if not create_result: shell.OpenStackShell().run(["stack", "failures", "list", plan_name]) set_deployment_status(clients, 'failed', plan=plan_name) if stack is None: raise exceptions.DeploymentError("Heat Stack create failed.") else: raise exceptions.DeploymentError("Heat Stack update failed.")
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.kill: if self._kill_heat(parsed_args) != 0: msg = _('Heat kill failed.') self.log.error(msg) raise exceptions.DeploymentError(msg) else: if self._launch_heat(parsed_args) != 0: msg = _('Heat launch failed.') self.log.error(msg) raise exceptions.DeploymentError(msg)
def take_action(self, parsed_args): # Fetch configuration used to add logging to a file utils.load_config(self.osloconfig, constants.UNDERCLOUD_CONF_PATH) utils.configure_logging(self.log, self.app_args.verbose_level, self.osloconfig['undercloud_log_file']) self.log.debug("take_action(%s)" % parsed_args) utils.ensure_run_as_normal_user() no_validations = parsed_args.dry_run or parsed_args.no_validations inflight = not parsed_args.dry_run and parsed_args.inflight cmd = undercloud_config.prepare_undercloud_deploy( no_validations=no_validations, verbose_level=self.app_args.verbose_level, force_stack_update=parsed_args.force_stack_update, dry_run=parsed_args.dry_run, inflight=inflight) self.log.warning("Running: %s" % ' '.join(cmd)) if not parsed_args.dry_run: try: subprocess.check_call(cmd) self.log.warning( UNDERCLOUD_COMPLETION_MESSAGE.format( '~/undercloud-passwords.conf', '~/stackrc')) except Exception as e: self.log.error(UNDERCLOUD_FAILURE_MESSAGE) self.log.error(e) raise exceptions.DeploymentError(e)
def take_action(self, parsed_args): # Fetch configuration used to add logging to a file utils.load_config(self.osloconfig, constants.UNDERCLOUD_CONF_PATH) utils.configure_logging(self.log, self.app_args.verbose_level, self.osloconfig['undercloud_log_file']) self.log.debug("take action(%s)" % parsed_args) utils.ensure_run_as_normal_user() self._update_extra_packages(constants.UNDERCLOUD_EXTRA_PACKAGES, parsed_args.dry_run) cmd = undercloud_config.\ prepare_undercloud_deploy( upgrade=True, yes=parsed_args.yes, no_validations=parsed_args. no_validations, verbose_level=self.app_args.verbose_level, force_stack_update=parsed_args.force_stack_update) self.log.warning("Running: %s" % ' '.join(cmd)) if not parsed_args.dry_run: try: subprocess.check_call(cmd) self.log.warning( UNDERCLOUD_UPGRADE_COMPLETION_MESSAGE.format( os.path.join(constants.UNDERCLOUD_OUTPUT_DIR, 'undercloud-passwords.conf'), '~/stackrc')) except Exception as e: self.log.error(UNDERCLOUD_FAILURE_MESSAGE) self.log.error(e) raise exceptions.DeploymentError(e)
def take_action(self, parsed_args): # Fetch configuration used to add logging to a file utils.load_config(self.osloconfig, constants.MINION_CONF_PATH) utils.configure_logging(self.log, self.app_args.verbose_level, self.osloconfig['minion_log_file']) self.log.debug("take action(%s)" % parsed_args) utils.ensure_run_as_normal_user() cmd = minion_config.\ prepare_minion_deploy( upgrade=True, yes=parsed_args.yes, no_validations=parsed_args. no_validations, verbose_level=self.app_args.verbose_level, force_stack_update=parsed_args.force_stack_update) self.log.warning("Running: %s" % ' '.join(cmd)) if not parsed_args.dry_run: try: subprocess.check_call(cmd) self.log.warning(MINION_UPGRADE_COMPLETION_MESSAGE) except Exception as e: self.log.error(MINION_FAILURE_MESSAGE) self.log.error(e) raise exceptions.DeploymentError(e)
def update(clients, **workflow_input): workflow_client = clients.workflow_engine tripleoclients = clients.tripleoclient plan_name = workflow_input['container'] with tripleoclients.messaging_websocket() as ws: execution = base.start_workflow( workflow_client, 'tripleo.package_update.v1.package_update_plan', workflow_input=workflow_input) for payload in base.wait_for_messages(workflow_client, ws, execution): assert payload['status'] == "SUCCESS", pprint.pformat(payload) orchestration_client = clients.orchestration events = event_utils.get_events(orchestration_client, stack_id=plan_name, event_args={ 'sort_dir': 'desc', 'limit': 1 }) marker = events[0].id if events else None time.sleep(10) create_result = utils.wait_for_stack_ready(orchestration_client, plan_name, marker, 'UPDATE', 1) if not create_result: shell.OpenStackShell().run(["stack", "failures", "list", plan_name]) raise exceptions.DeploymentError("Heat Stack update failed.")
def config_download(log, clients, stack, templates, ssh_user, ssh_key, ssh_network, output_dir, verbosity=1): workflow_client = clients.workflow_engine tripleoclients = clients.tripleoclient workflow_input = { 'verbosity': verbosity or 1, 'plan_name': stack.stack_name, 'ssh_network': ssh_network } if output_dir: workflow_input.update(dict(work_dir=output_dir)) with tripleoclients.messaging_websocket() as ws: execution = base.start_workflow( workflow_client, 'tripleo.deployment.v1.config_download_deploy', workflow_input=workflow_input) for payload in base.wait_for_messages(workflow_client, ws, execution, 3600): print(payload['message']) if payload['status'] == 'SUCCESS': print("Overcloud configuration completed.") else: raise exceptions.DeploymentError("Overcloud configuration failed.")
def _generate_inspection_subnets(): env_list = [] for subnet in CONF.subnets: env_dict = {} s = CONF.get(subnet) env_dict['tag'] = subnet try: if netaddr.IPNetwork(s.cidr).version == 4: env_dict['ip_range'] = s.inspection_iprange if netaddr.IPNetwork(s.cidr).version == 6: if CONF['ipv6_address_mode'] == 'dhcpv6-stateful': env_dict['ip_range'] = s.inspection_iprange if CONF['ipv6_address_mode'] == 'dhcpv6-stateless': # dnsmasq(8): A static-only subnet with address all zeros # may be used as a "catch-all" address to enable replies to # all Information-request packets on a subnet which is # provided with stateless DHCPv6, ie --dhcp-range=::,static env_dict['ip_range'] = ','.join( [str(netaddr.IPNetwork(s.cidr).ip), 'static']) env_dict['netmask'] = str(netaddr.IPNetwork(s.cidr).netmask) env_dict['gateway'] = s.gateway env_dict['host_routes'] = s.host_routes env_dict['mtu'] = CONF.local_mtu env_list.append(env_dict) except Exception as e: msg = _('Invalid configuration data in subnet "{}". Double check ' 'the settings for this subnet. Error: {}').format( subnet, e) LOG.error(msg) raise exceptions.DeploymentError(msg) return env_list
def test_update_failed(self, mock_deploy, mock_copy, mock_yaml, mock_abspath, mock_open, mock_update, mock_get_stack): mock_stack = mock.Mock(parameters={'DeployIdentifier': ''}) mock_stack.stack_name = 'overcloud' mock_get_stack.return_value = mock_stack mock_update.side_effect = exceptions.DeploymentError() mock_yaml.return_value = {'fake_container': 'fake_value'} argslist = [ '--stack', 'overcloud', '--templates', ] verifylist = [ ('stack', 'overcloud'), ('templates', constants.TRIPLEO_HEAT_TEMPLATES), ] parsed_args = self.check_parser(self.cmd, argslist, verifylist) with mock.patch('os.path.exists') as mock_exists, \ mock.patch('os.path.isfile') as mock_isfile: mock_exists.return_value = True mock_isfile.return_value = True self.assertRaises(exceptions.DeploymentError, self.cmd.take_action, parsed_args)
def check_nodes_count(baremetal_client, stack, parameters, defaults): """Check if there are enough available nodes for creating/scaling stack""" count = 0 if stack: for param in defaults: try: current = int(stack.parameters[param]) except KeyError: raise ValueError( "Parameter '%s' was not found in existing stack" % param) count += parameters.get(param, current) else: for param, default in defaults.items(): count += parameters.get(param, default) # We get number of nodes usable for the stack by getting already # used (associated) nodes and number of nodes which can be used # (not in maintenance mode). # Assumption is that associated nodes are part of the stack (only # one overcloud is supported). associated = len(baremetal_client.node.list(associated=True)) available = len( baremetal_client.node.list(associated=False, maintenance=False)) ironic_nodes_count = associated + available if count > ironic_nodes_count: raise exceptions.DeploymentError( "Not enough nodes - available: {0}, requested: {1}".format( ironic_nodes_count, count)) else: return True
def _create_install_artifact(self): """Create a tarball of the temporary folders used""" self.log.debug(_("Preserving deployment artifacts")) def remove_output_dir(info): """Tar filter to remove output dir from path""" # leading path to tar is home/stack/ rather than /home/stack leading_path = self.output_dir[1:] + '/' info.name = info.name.replace(leading_path, '') return info # tar up working data and put in # output_dir/undercloud-install-TS.tar.bzip2 tar_filename = self._get_tar_filename() try: tf = tarfile.open(tar_filename, 'w:bz2') tf.add(self.tht_render, recursive=True, filter=remove_output_dir) tf.add(self.tmp_ansible_dir, recursive=True, filter=remove_output_dir) tf.close() except Exception as ex: msg = _("Unable to create artifact tarball, %s") % ex.message self.log.error(msg) raise exceptions.DeploymentError(msg) return tar_filename
def deploy_and_wait(log, clients, stack, plan_name, verbose_level, timeout=None): """Start the deploy and wait for it to finish""" workflow_input = { "container": plan_name, "queue_name": str(uuid.uuid4()), } if timeout is not None: workflow_input['timeout'] = timeout deploy(clients, **workflow_input) orchestration_client = clients.orchestration if stack is None: log.info("Performing Heat stack create") action = 'CREATE' marker = None else: log.info("Performing Heat stack update") # Make sure existing parameters for stack are reused # Find the last top-level event to use for the first marker events = event_utils.get_events(orchestration_client, stack_id=plan_name, event_args={ 'sort_dir': 'desc', 'limit': 1 }) marker = events[0].id if events else None action = 'UPDATE' time.sleep(10) verbose_events = verbose_level > 0 create_result = utils.wait_for_stack_ready(orchestration_client, plan_name, marker, action, verbose_events) if not create_result: if stack is None: raise exceptions.DeploymentError("Heat Stack create failed.") else: raise exceptions.DeploymentError("Heat Stack update failed.")
def rel_or_abs_path(tht_root, file_path): '''Find a file, either absolute path or relative to the t-h-t dir''' path = os.path.abspath(file_path) if not os.path.exists(path): path = os.path.abspath(os.path.join(tht_root, file_path)) if not os.path.exists(path): raise exceptions.DeploymentError("Can't find path %s %s" % (file_path, path)) return path
def _pre_heat_deploy(self): """Setup before the Heat stack create or update has been done.""" clients = self.app.client_manager compute_client = clients.compute self.log.debug("Checking hypervisor stats") if utils.check_hypervisor_stats(compute_client) is None: raise exceptions.DeploymentError( "Expected hypervisor stats not met") return True
def _process_hieradata_overrides(self, override_file=None, tripleo_role_name='Standalone'): """Count in hiera data overrides including legacy formats Return a file name that points to processed hiera data overrides file """ if not override_file or not os.path.exists(override_file): # we should never get here because there's a check in # undercloud_conf but stranger things have happened. msg = (_('hieradata_override file could not be found %s') % override_file) self.log.error(msg) raise exceptions.DeploymentError(msg) target = override_file data = open(target, 'r').read() hiera_data = yaml.safe_load(data) if not hiera_data: msg = (_('Unsupported data format in hieradata override %s') % target) self.log.error(msg) raise exceptions.DeploymentError(msg) self._create_working_dirs() # NOTE(bogdando): In t-h-t, hiera data should come in wrapped as # {parameter_defaults: {UndercloudExtraConfig: ... }} extra_config_var = '%sExtraConfig' % tripleo_role_name if (extra_config_var not in hiera_data.get('parameter_defaults', {})): hiera_override_file = os.path.join( self.tht_render, 'tripleo-hieradata-override.yaml') self.log.info('Converting hiera overrides for t-h-t from ' 'legacy format into a file %s' % hiera_override_file) with open(hiera_override_file, 'w') as override: yaml.safe_dump( {'parameter_defaults': { extra_config_var: hiera_data }}, override, default_flow_style=False) target = hiera_override_file return target
def _process_undercloud_output(templates_dir, output_file_path): """copy the undercloud output file to our work dir""" output_file = os.path.join(constants.MINION_OUTPUT_DIR, output_file_path) env_file = os.path.join(templates_dir, 'tripleo-undercloud-base.yaml') if os.path.exists(output_file): src_file = output_file elif os.path.exists(output_file_path): src_file = output_file_path else: raise exceptions.DeploymentError('Cannot locate undercloud output ' 'file {}'.format(output_file_path)) try: shutil.copy(os.path.abspath(src_file), env_file) except Exception: msg = _('Cannot copy undercloud output file %s into a ' 'tempdir!') % src_file LOG.error(msg) raise exceptions.DeploymentError(msg) return env_file
def _process_undercloud_passwords(src_file, dest_file): try: shutil.copy(os.path.abspath(src_file), dest_file) except Exception: msg = _('Cannot copy undercloud password file %(src)s to ' '%(dest)s') % { 'src': src_file, 'dest': dest_file } LOG.error(msg) raise exceptions.DeploymentError(msg)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) print("\nUndercloud deploy is an experimental developer focused " "feature that does not yet replace " "'openstack undercloud install'.") if not parsed_args.local_ip: print('Please set --local-ip to the correct ipaddress/cidr ' 'for this machine.') return if not os.environ.get('HEAT_API_PORT'): os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port # The main thread runs as root and we drop privs for forked # processes below. Only the heat deploy/os-collect-config forked # process runs as root. if os.geteuid() != 0: raise exceptions.DeploymentError("Please run as root.") # configure puppet self._configure_puppet() try: # Launch heat. orchestration_client = self._launch_heat(parsed_args) # Wait for heat to be ready. self._wait_local_port_ready(parsed_args.heat_api_port) # Deploy TripleO Heat templates. stack_id = \ self._deploy_tripleo_heat_templates(orchestration_client, parsed_args) # Wait for complete.. self._wait_for_heat_complete(orchestration_client, stack_id, parsed_args.timeout) # download the ansible playbooks and execute them. ansible_dir = \ self._download_ansible_playbooks(orchestration_client, stack_id) # Kill heat, we're done with it now. self._kill_heat() # Never returns.. We exec() it directly. self._launch_ansible(ansible_dir) except Exception as e: print("Exception: %s" % e) print(traceback.format_exception(*sys.exc_info())) raise finally: # We only get here on error. print('ERROR: Heat log files: %s' % (self.heat_launch.install_tmp)) self._kill_heat() return 1
def config_download(log, clients, stack, templates, ssh_user, ssh_key, ssh_network, output_dir, override_ansible_cfg, timeout, verbosity=1, deployment_options={}, in_flight_validations=False): workflow_client = clients.workflow_engine tripleoclients = clients.tripleoclient if in_flight_validations: skip_tags = '' else: skip_tags = 'opendev-validation' workflow_input = { 'verbosity': verbosity, 'plan_name': stack.stack_name, 'ssh_network': ssh_network, 'config_download_timeout': timeout, 'deployment_options': deployment_options, 'skip_tags': skip_tags } if output_dir: workflow_input.update(dict(work_dir=output_dir)) if override_ansible_cfg: with open(override_ansible_cfg) as cfg: override_ansible_cfg_contents = cfg.read() workflow_input.update( dict(override_ansible_cfg=override_ansible_cfg_contents)) with tripleoclients.messaging_websocket() as ws: execution = base.start_workflow( workflow_client, 'tripleo.deployment.v1.config_download_deploy', workflow_input=workflow_input) for payload in base.wait_for_messages(workflow_client, ws, execution): print(payload['message']) if payload['status'] == 'SUCCESS': print("Overcloud configuration completed.") else: raise exceptions.DeploymentError("Overcloud configuration failed.")
def _container_images_config(conf, deploy_args, env_data, tempdir): if conf.container_images_file: deploy_args += ['-e', conf.container_images_file] try: shutil.copy(os.path.abspath(conf.container_images_file), tempdir) except Exception: msg = _('Cannot copy a container images' 'file %s into a tempdir!') % conf.container_images_file LOG.error(msg) raise exceptions.DeploymentError(msg) else: # no images file was provided. Set a default ContainerImagePrepare # parameter to trigger the preparation of the required container list cip = kolla_builder.CONTAINER_IMAGE_PREPARE_PARAM env_data['ContainerImagePrepare'] = cip
def test_update_failed(self, mock_yaml, mock_abspath, mock_open, mock_update): mock_update.side_effect = exceptions.DeploymentError() mock_abspath.return_value = '/home/fake/my-fake-registry.yaml' mock_yaml.return_value = {'fake_container': 'fake_value'} argslist = [ '--stack', 'overcloud', '--init-minor-update', '--container-registry-file', 'my-fake-registry.yaml' ] verifylist = [('stack', 'overcloud'), ('init_minor_update', True), ('container_registry_file', 'my-fake-registry.yaml')] parsed_args = self.check_parser(self.cmd, argslist, verifylist) self.assertRaises(exceptions.DeploymentError, self.cmd.take_action, parsed_args)
def _get_undercloud_host_entry(self): """Get hosts entry for undercloud ctlplane network The host entry will be added on overcloud nodes """ ctlplane_hostname = '.'.join([utils.get_short_hostname(), 'ctlplane']) cmd = ['getent', 'hosts', ctlplane_hostname] process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True) out, err = process.communicate() if process.returncode != 0: raise exceptions.DeploymentError('No entry for %s in /etc/hosts' % ctlplane_hostname) return re.sub(' +', ' ', str(out).rstrip())
def wait_for_ssh_port(host): start = int(time.time()) while True: now = int(time.time()) if (now - start) > constants.ENABLE_SSH_ADMIN_SSH_PORT_TIMEOUT: raise exceptions.DeploymentError( "Timed out waiting for port 22 from %s" % host) try: socket.socket().connect((host, 22)) return except socket.error: pass time.sleep(1)
def test_update_failed(self, mock_deploy, mock_copy, mock_yaml, mock_abspath, mock_open, mock_update): mock_update.side_effect = exceptions.DeploymentError() mock_abspath.return_value = '/home/fake/my-fake-registry.yaml' mock_yaml.return_value = {'fake_container': 'fake_value'} argslist = [ '--stack', 'overcloud', '--templates', '--container-registry-file', 'my-fake-registry.yaml' ] verifylist = [('stack', 'overcloud'), ('templates', constants.TRIPLEO_HEAT_TEMPLATES), ('container_registry_file', 'my-fake-registry.yaml')] parsed_args = self.check_parser(self.cmd, argslist, verifylist) self.assertRaises(exceptions.DeploymentError, self.cmd.take_action, parsed_args)
def get_hosts_and_enable_ssh_admin(stack, overcloud_ssh_network, overcloud_ssh_user, overcloud_ssh_key, overcloud_ssh_port_timeout, verbosity=0): """Enable ssh admin access. Get a list of hosts from a given stack and enable admin ssh across all of them. :param stack: Stack data. :type stack: Object :param overcloud_ssh_network: Network id. :type overcloud_ssh_network: String :param overcloud_ssh_user: SSH access username. :type overcloud_ssh_user: String :param overcloud_ssh_key: SSH access key. :type overcloud_ssh_key: String :param overcloud_ssh_port_timeout: Ansible connection timeout in seconds :type overcloud_ssh_port_timeout: Int :param verbosity: Verbosity level :type verbosity: Integer """ hosts = get_overcloud_hosts(stack, overcloud_ssh_network) if [host for host in hosts if host]: enable_ssh_admin( stack, hosts, overcloud_ssh_user, overcloud_ssh_key, overcloud_ssh_port_timeout, verbosity=verbosity ) else: raise exceptions.DeploymentError( 'Cannot find any hosts on "{}" in network "{}"'.format( stack.stack_name, overcloud_ssh_network ) )
def _normalize_user_templates(self, user_tht_root, tht_root, env_files=[]): """copy environment files into tht render path This assumes any env file that includes user_tht_root has already been copied into tht_root. :param user_tht_root: string path to the user's template dir :param tht_root: string path to our deployed tht_root :param env_files: list of paths to environment files :return list of absolute pathed environment files that exist in tht_root """ environments = [] # normalize the user template path to ensure it doesn't have a trailing # slash user_tht = os.path.abspath(user_tht_root) for env_path in env_files: self.log.debug("Processing file %s" % env_path) abs_env_path = os.path.abspath(env_path) if (abs_env_path.startswith(user_tht_root) and ((user_tht + '/') in env_path or (user_tht + '/') in abs_env_path or user_tht == abs_env_path or user_tht == env_path)): # file is in tht and will be copied, so just update path new_env_path = env_path.replace(user_tht + '/', tht_root + '/') self.log.debug("Redirecting %s to %s" % (abs_env_path, new_env_path)) environments.append(new_env_path) elif abs_env_path.startswith(tht_root): self.log.debug("File already in tht_root %s") environments.append(abs_env_path) else: self.log.debug("File outside of tht_root %s, copying in") # file is outside of THT, just copy it in # TODO(aschultz): probably shouldn't be flattened? target_dest = os.path.join(tht_root, os.path.basename(abs_env_path)) if os.path.exists(target_dest): raise exceptions.DeploymentError("%s already exists, " "please rename the " "file to something else" % target_dest) shutil.copy(abs_env_path, tht_root) environments.append(target_dest) return environments
def take_action(self, parsed_args): if parsed_args.templates is None and parsed_args.answers_file is None: raise oscexc.CommandError( "You must specify either --templates or --answers-file") if parsed_args.answers_file is not None: with open(parsed_args.answers_file, 'r') as answers_file: answers = yaml.load(answers_file) if parsed_args.templates is None: parsed_args.templates = answers['templates'] if 'environments' in answers: if parsed_args.environment_files is not None: answers.environments.extend( parsed_args.environment_files) parsed_args.environment_files = answers['environments'] self.log.debug("take_action(%s)" % parsed_args) clients = self.app.client_manager update_manager = update.PackageUpdateManager( heatclient=clients.orchestration, novaclient=clients.compute, stack_id=parsed_args.stack, tht_dir=parsed_args.templates, environment_files=parsed_args.environment_files) if parsed_args.abort_update: print("cancelling package update on stack {0}".format( parsed_args.stack)) update_manager.cancel() else: status, resources = update_manager.get_status() if status not in ['IN_PROGRESS', 'WAITING']: print("starting package update on stack {0}".format( parsed_args.stack)) update_manager.update() if parsed_args.interactive: update_manager.do_interactive_update() status, _ = update_manager.get_status() if status not in ['COMPLETE']: raise exceptions.DeploymentError("Stack update failed.") else: print("stack {0} status: {1}".format(parsed_args.stack, status))
def _launch_heat(self, parsed_args): if not os.path.isdir(parsed_args.output_dir): os.mkdir(parsed_args.output_dir) # we do this as root to chown config files properly for docker, etc. if parsed_args.heat_native: self.heat_launch = heat_launcher.HeatNativeLauncher( parsed_args.heat_api_port, parsed_args.heat_container_image, parsed_args.heat_user) else: self.heat_launch = heat_launcher.HeatDockerLauncher( parsed_args.heat_api_port, parsed_args.heat_container_image, parsed_args.heat_user) # NOTE(dprince): we launch heat with fork exec because # we don't want it to inherit our args. Launching heat # as a "library" would be cool... but that would require # more refactoring. It runs a single process and we kill # it always below. self.heat_pid = os.fork() if self.heat_pid == 0: if parsed_args.heat_native: try: uid = pwd.getpwnam(parsed_args.heat_user).pw_uid gid = pwd.getpwnam(parsed_args.heat_user).pw_gid except KeyError: raise exceptions.DeploymentError( "Please create a %s user account before " "proceeding." % parsed_args.heat_user) os.setgid(gid) os.setuid(uid) self.heat_launch.heat_db_sync() # Exec() never returns. self.heat_launch.launch_heat() # NOTE(dprince): we use our own client here because we set # auth_required=False above because keystone isn't running when this # command starts tripleoclients = self.app.client_manager.tripleoclient orchestration_client = \ tripleoclients.local_orchestration(parsed_args.heat_api_port) return orchestration_client