def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.json or parsed_args.file_in.name.endswith('.json'): nodes_config = json.load(parsed_args.file_in) elif parsed_args.csv or parsed_args.file_in.name.endswith('.csv'): nodes_config = _csv_to_nodes_dict(parsed_args.file_in) elif parsed_args.file_in.name.endswith('.yaml'): nodes_config = yaml.safe_load(parsed_args.file_in) else: raise exceptions.InvalidConfiguration( _("Invalid file extension for %s, must be json, yaml or csv") % parsed_args.file_in.name) if 'nodes' in nodes_config: nodes_config = nodes_config['nodes'] client = self.app.client_manager.baremetal if parsed_args.initial_state == "enroll": api_version = client.http_client.os_ironic_api_version if [int(part) for part in api_version.split('.')] < [1, 11]: raise exceptions.InvalidConfiguration( _("OS_BAREMETAL_API_VERSION must be >=1.11 for use of " "'enroll' provision state; currently %s") % api_version) for node in nodes_config: caps = utils.capabilities_to_dict(node.get('capabilities', {})) caps.setdefault('boot_option', parsed_args.instance_boot_option) node['capabilities'] = utils.dict_to_capabilities(caps) new_nodes = nodes.register_all_nodes( parsed_args.service_host, nodes_config, client=client, keystone_client=self.app.client_manager.identity, glance_client=self.app.client_manager.image, kernel_name=(parsed_args.deploy_kernel if not parsed_args.no_deploy_image else None), ramdisk_name=(parsed_args.deploy_ramdisk if not parsed_args.no_deploy_image else None)) if parsed_args.initial_state == "available": manageable_node_uuids = list( utils.set_nodes_state( client, new_nodes, "manage", "manageable", skipped_states={'manageable', 'available'})) manageable_nodes = [ n for n in new_nodes if n.uuid in manageable_node_uuids ] list( utils.set_nodes_state(client, manageable_nodes, "provide", "available", skipped_states={'available'}))
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) clients = self.app.client_manager stack = oooutils.get_stack(clients.orchestration, parsed_args.stack) stack_name = stack.stack_name container_registry = parsed_args.container_registry_file if parsed_args.init_minor_update: # Update the container registry: if container_registry: with open(os.path.abspath(container_registry)) as content: registry = yaml.load(content.read()) else: self.log.warning( "You have not provided a container registry file. Note " "that none of the containers on your environement will be " "updated. If you want to update your container you have " "to re-run this command and provide the registry file " "with: --container-registry-file option.") registry = None # Execute minor update ceph_ansible_playbook = parsed_args.ceph_ansible_playbook package_update.update(clients, container=stack_name, container_registry=registry, ceph_ansible_playbook=ceph_ansible_playbook) print("Minor update init on stack {0} complete.".format( parsed_args.stack)) else: # Run ansible: nodes = parsed_args.nodes playbook = parsed_args.playbook inventory_file = parsed_args.static_inventory if inventory_file is None: inventory_file = '%s/%s' % (os.path.expanduser('~'), 'tripleo-ansible-inventory') try: processutils.execute('/bin/tripleo-ansible-inventory', '--static-inventory', inventory_file) except processutils.ProcessExecutionError as e: message = "Failed to generate inventory: %s" % str(e) raise exceptions.InvalidConfiguration(message) if os.path.exists(inventory_file): inventory = open(inventory_file, 'r').read() else: raise exceptions.InvalidConfiguration( "Inventory file %s can not be found." % inventory_file) package_update.update_ansible( clients, nodes=nodes, inventory_file=inventory, playbook=playbook, ansible_queue_name=constants.UPDATE_QUEUE)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.json or parsed_args.file_in.name.endswith('.json'): nodes_config = json.load(parsed_args.file_in) elif parsed_args.csv or parsed_args.file_in.name.endswith('.csv'): nodes_config = _csv_to_nodes_dict(parsed_args.file_in) elif parsed_args.file_in.name.endswith('.yaml'): nodes_config = yaml.safe_load(parsed_args.file_in) else: raise exceptions.InvalidConfiguration( _("Invalid file extension for %s, must be json, yaml or csv") % parsed_args.file_in.name) if 'nodes' in nodes_config: nodes_config = nodes_config['nodes'] client = self.app.client_manager.baremetal if parsed_args.initial_state == "enroll": api_version = client.http_client.os_ironic_api_version if [int(part) for part in api_version.split('.')] < [1, 11]: raise exceptions.InvalidConfiguration( _("OS_BAREMETAL_API_VERSION must be >=1.11 for use of " "'enroll' provision state; currently %s") % api_version) # NOTE (dprince) move this to tripleo-common? for node in nodes_config: caps = node.get('capabilities', {}) if not isinstance(caps, dict): caps = utils.capabilities_to_dict(caps) caps.setdefault('boot_option', parsed_args.instance_boot_option) node['capabilities'] = caps queue_name = str(uuid.uuid4()) if parsed_args.no_deploy_image: deploy_kernel = None deploy_ramdisk = None else: deploy_kernel = parsed_args.deploy_kernel deploy_ramdisk = parsed_args.deploy_ramdisk nodes = baremetal.register_or_update(self.app.client_manager, nodes_json=nodes_config, queue_name=queue_name, kernel_name=deploy_kernel, ramdisk_name=deploy_ramdisk) node_uuids = [node['uuid'] for node in nodes] if parsed_args.initial_state == "available": baremetal.provide(self.app.client_manager, node_uuids=node_uuids, queue_name=queue_name)
def _process_network_args(env): """Populate the environment with network configuration.""" env['IronicInspectorSubnets'] = _generate_inspection_subnets() env['PortPhysnetCidrMap'] = _generate_inspection_physnet_cidr_map() env['ControlPlaneStaticRoutes'] = _generate_subnets_static_routes() env['UndercloudCtlplaneSubnets'] = {} env['UndercloudCtlplaneIPv6AddressMode'] = CONF['ipv6_address_mode'] for subnet in CONF.subnets: s = CONF.get(subnet) env['UndercloudCtlplaneSubnets'][subnet] = { 'AllocationPools': _calculate_allocation_pools(s) } if s.get('dns_nameservers'): env['UndercloudCtlplaneSubnets'][subnet].update( {'DnsNameServers': s['dns_nameservers']}) else: env['UndercloudCtlplaneSubnets'][subnet].update( {'DnsNameServers': CONF['undercloud_nameservers']}) for param_key, param_value in SUBNET_PARAMETER_MAPPING.items(): if param_value: env['UndercloudCtlplaneSubnets'][subnet].update( {param_value: s[param_key]}) env['MasqueradeNetworks'] = _generate_masquerade_networks() if len(CONF['undercloud_nameservers']) > 5: raise exceptions.InvalidConfiguration('Too many nameservers provided. ' 'Please provide less than 6 ' 'servers in undercloud_' 'nameservers.') if netaddr.IPNetwork(CONF['local_ip']).version == 6: env['NovaIPv6'] = True env['RabbitIPv6'] = True env['MemcachedIPv6'] = True env['RedisIPv6'] = True env['MysqlIPv6'] = True env['IronicIpVersion'] = '6' # We do not use undercloud ips for env, but just validate the configured # value here. if (CONF.get('generate_service_certificate') or CONF.get('undercloud_service_certificate')): undercloud_ips = [ CONF.local_ip.split('/')[0], CONF.undercloud_admin_host, CONF.undercloud_public_host ] if len(undercloud_ips) != len(set(undercloud_ips)): msg = ("The same IP is used for multiple endpoints. Please use " "unique ips for local_ip, undercloud_admin_host and " "undercloud_public_host") raise exceptions.InvalidConfiguration(msg)
def store_cli_param(command_name, parsed_args): """write the cli parameters into an history file""" # The command name is the part after "openstack" with spaces. Switching # to "-" makes it easier to read. "openstack undercloud install" will be # stored as "undercloud-install" for example. command_name = command_name.replace(" ", "-") history_path = os.path.join(os.path.expanduser("~"), '.tripleo') if not os.path.exists(history_path): try: os.mkdir(history_path) except OSError as e: messages = _("Unable to create TripleO history directory: " "{0}, {1}").format(history_path, e) raise OSError(messages) if os.path.isdir(history_path): try: with open(os.path.join(history_path, 'history'), 'a') as history: args = parsed_args.__dict__.copy() used_args = ', '.join('%s=%s' % (key, value) for key, value in args.items()) history.write(' '.join([ str(datetime.datetime.now()), str(command_name), used_args, "\n" ])) except IOError as e: messages = _("Unable to write into TripleO history file: " "{0}, {1}").format(history_path, e) raise IOError(messages) else: raise exceptions.InvalidConfiguration( _("Target path %s is not a " "directory") % history_path)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.file_in.name.endswith('.json'): params = simplejson.load(parsed_args.file_in) elif parsed_args.file_in.name.endswith('.yaml'): params = yaml.safe_load(parsed_args.file_in) else: raise exceptions.InvalidConfiguration( _("Invalid file extension for %s, must be json or yaml") % parsed_args.file_in.name) if 'parameter_defaults' in params: params = params['parameter_defaults'] clients = self.app.client_manager workflow_client = clients.workflow_engine name = parsed_args.name parameters.update_parameters( workflow_client, container=name, parameters=params )
def get_tripleo_ansible_inventory(inventory_file='', ssh_user='******'): if not inventory_file: inventory_file = '%s/%s' % (os.path.expanduser('~'), 'tripleo-ansible-inventory.yaml') try: processutils.execute('/usr/bin/tripleo-ansible-inventory', '--ansible_ssh_user', ssh_user, '--static-yaml-inventory', inventory_file) except processutils.ProcessExecutionError as e: message = "Failed to generate inventory: %s" % str(e) raise exceptions.InvalidConfiguration(message) if os.path.exists(inventory_file): inventory = open(inventory_file, 'r').read() return inventory else: raise exceptions.InvalidConfiguration( "Inventory file %s can not be found." % inventory_file)
def _validate_skip_tags(self, skip_tags): tags_list = skip_tags.split(',') for tag in tags_list: tag = tag.strip() if tag and tag not in constants.MAJOR_UPGRADE_SKIP_TAGS: raise exceptions.InvalidConfiguration( "Unexpected tag %s. Supported values are %s" % (tag, constants.MAJOR_UPGRADE_SKIP_TAGS)) return skip_tags
def _heat_deploy(self, stack, stack_name, template_path, parameters, created_env_files, timeout, tht_root, env): """Verify the Baremetal nodes are available and do a stack update""" clients = self.app.client_manager workflow_client = clients.workflow_engine self.log.debug("Processing environment files %s" % created_env_files) env_files, localenv = ( template_utils.process_multiple_environments_and_files( created_env_files)) # Command line has more precedence than env files template_utils.deep_update(localenv, env) if stack: update.add_breakpoints_cleanup_into_env(localenv) self.log.debug("Getting template contents from plan %s" % stack_name) # We need to reference the plan here, not the local # tht root, as we need template_object to refer to # the rendered overcloud.yaml, not the tht_root overcloud.j2.yaml # FIXME(shardy) we need to move more of this into mistral actions plan_yaml_path = os.path.relpath(template_path, tht_root) # heatclient template_utils needs a function that can # retrieve objects from a container by name/path objectclient = clients.tripleoclient.object_store def do_object_request(method='GET', object_path=None): obj = objectclient.get_object(stack_name, object_path) return obj and obj[1] template_files, template = template_utils.get_template_contents( template_object=plan_yaml_path, object_request=do_object_request) files = dict(list(template_files.items()) + list(env_files.items())) number_controllers = int(parameters.get('ControllerCount', 0)) if number_controllers > 1: if not localenv.get('parameter_defaults').get('NtpServer'): raise exceptions.InvalidConfiguration( 'Specify --ntp-server as parameter or NtpServer in ' 'environments when using multiple controllers ' '(with HA).') clients = self.app.client_manager moved_files = self._upload_missing_files( stack_name, objectclient, files, tht_root) self._process_and_upload_environment( stack_name, objectclient, localenv, moved_files, tht_root, workflow_client) deployment.deploy_and_wait(self.log, clients, stack, stack_name, self.app_args.verbose_level, timeout)
def prepend_environment(environment_files, templates_dir, environment): if not environment_files: environment_files = [] full_path = os.path.join(templates_dir, environment) # sanity check it exists before proceeding if os.path.exists(full_path): # We need to prepend before the files provided by user. environment_files.insert(0, full_path) else: raise exceptions.InvalidConfiguration( _("Expected environment file {0} not found in {1} cannot proceed." ).format(environment, templates_dir)) return environment_files
def parse_env_file(env_file, file_type=None): if file_type == 'json' or env_file.name.endswith('.json'): nodes_config = simplejson.load(env_file) elif file_type == 'csv' or env_file.name.endswith('.csv'): nodes_config = _csv_to_nodes_dict(env_file) elif env_file.name.endswith('.yaml'): nodes_config = yaml.safe_load(env_file) else: raise exceptions.InvalidConfiguration( _("Invalid file extension for %s, must be json, yaml or csv") % env_file.name) if 'nodes' in nodes_config: nodes_config = nodes_config['nodes'] return nodes_config
def delete_node(clients, timeout, **workflow_input): workflow_client = clients.workflow_engine tripleoclients = clients.tripleoclient if timeout is not None: workflow_input['timeout'] = timeout with tripleoclients.messaging_websocket() as ws: execution = base.start_workflow(workflow_client, 'tripleo.scale.v1.delete_node', workflow_input=workflow_input) for payload in base.wait_for_messages(workflow_client, ws, execution): status = payload['status'] if status == 'RUNNING': continue if status != 'SUCCESS': raise exceptions.InvalidConfiguration(payload['message'])
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) file_type = None if parsed_args.json: file_type = 'json' elif parsed_args.csv: file_type = 'csv' nodes_config = utils.parse_env_file(parsed_args.file_in, file_type) client = self.app.client_manager.baremetal if parsed_args.initial_state == "enroll": api_version = client.http_client.os_ironic_api_version if [int(part) for part in api_version.split('.')] < [1, 11]: raise exceptions.InvalidConfiguration( _("OS_BAREMETAL_API_VERSION must be >=1.11 for use of " "'enroll' provision state; currently %s") % api_version) queue_name = str(uuid.uuid4()) if parsed_args.no_deploy_image: deploy_kernel = None deploy_ramdisk = None else: deploy_kernel = parsed_args.deploy_kernel deploy_ramdisk = parsed_args.deploy_ramdisk nodes = baremetal.register_or_update( self.app.client_manager, nodes_json=nodes_config, queue_name=queue_name, kernel_name=deploy_kernel, ramdisk_name=deploy_ramdisk, instance_boot_option=parsed_args.instance_boot_option) node_uuids = [node['uuid'] for node in nodes] if parsed_args.initial_state == "available": baremetal.provide(self.app.client_manager, node_uuids=node_uuids, queue_name=queue_name)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.json or parsed_args.file_in.name.endswith('.json'): nodes_config = json.load(parsed_args.file_in) elif parsed_args.csv or parsed_args.file_in.name.endswith('.csv'): nodes_config = _csv_to_nodes_dict(parsed_args.file_in) elif parsed_args.file_in.name.endswith('.yaml'): nodes_config = yaml.safe_load(parsed_args.file_in) else: raise exceptions.InvalidConfiguration( _("Invalid file extension for %s, must be json, yaml or csv") % parsed_args.file_in.name) if 'nodes' in nodes_config: nodes_config = nodes_config['nodes'] nodes.register_all_nodes( parsed_args.service_host, nodes_config, client=self.app.client_manager.baremetal, keystone_client=self.app.client_manager.identity)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) if parsed_args.file_in.name.endswith('.json'): params = simplejson.load(parsed_args.file_in) elif parsed_args.file_in.name.endswith('.yaml'): params = yaml.safe_load(parsed_args.file_in) else: raise exceptions.InvalidConfiguration( _("Invalid file extension for %s, must be json or yaml") % parsed_args.file_in.name) if 'parameter_defaults' in params: params = params['parameter_defaults'] with utils.TempDirs() as tmp: utils.run_ansible_playbook( playbook='cli-update-params.yaml', inventory='localhost,', workdir=tmp, playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=utils.playbook_verbosity(self=self), extra_vars={"container": parsed_args.name}, extra_vars_file={"parameters": params})
def _deploy_tripleo_heat_templates(self, stack, parsed_args, tht_root, user_tht_root): """Deploy the fixed templates in TripleO Heat Templates""" plans = plan_management.list_deployment_plans(self.clients) generate_passwords = not parsed_args.disable_password_generation # TODO(d0ugal): We need to put a more robust strategy in place here to # handle updating plans. if parsed_args.stack in plans: # Upload the new plan templates to swift to replace the existing # templates. plan_management.update_plan_from_templates( self.clients, parsed_args.stack, tht_root, parsed_args.roles_file, generate_passwords, parsed_args.plan_environment_file, parsed_args.networks_file, type(self)._keep_env_on_update) else: plan_management.create_plan_from_templates( self.clients, parsed_args.stack, tht_root, parsed_args.roles_file, generate_passwords, parsed_args.plan_environment_file, parsed_args.networks_file) # Get any missing (e.g j2 rendered) files from the plan to tht_root self._download_missing_files_from_plan(tht_root, parsed_args.stack) print("Processing templates in the directory {0}".format( os.path.abspath(tht_root))) self.log.debug("Creating Environment files") env = {} created_env_files = [] if parsed_args.environment_directories: created_env_files.extend( utils.load_environment_directories( parsed_args.environment_directories)) parameters = {} if stack: try: # If user environment already exist then keep it user_env = yaml.safe_load( self.object_client.get_object( parsed_args.stack, constants.USER_ENVIRONMENT)[1]) template_utils.deep_update(env, user_env) except ClientException: pass parameters.update(self._update_parameters(parsed_args, stack)) template_utils.deep_update( env, self._create_parameters_env(parameters, tht_root, parsed_args.stack)) if parsed_args.rhel_reg: reg_env_files, reg_env = self._create_registration_env( parsed_args, tht_root) created_env_files.extend(reg_env_files) template_utils.deep_update(env, reg_env) if parsed_args.environment_files: created_env_files.extend(parsed_args.environment_files) self.log.debug("Processing environment files %s" % created_env_files) env_files, localenv = utils.process_multiple_environments( created_env_files, tht_root, user_tht_root, cleanup=not parsed_args.no_cleanup) template_utils.deep_update(env, localenv) if stack: bp_cleanup = self._create_breakpoint_cleanup_env( tht_root, parsed_args.stack) template_utils.deep_update(env, bp_cleanup) # FIXME(shardy) It'd be better to validate this via mistral # e.g part of the plan create/update workflow number_controllers = int(parameters.get('ControllerCount', 0)) if number_controllers > 1: if not env.get('parameter_defaults').get('NtpServer'): raise exceptions.InvalidConfiguration( 'Specify --ntp-server as parameter or NtpServer in ' 'environments when using multiple controllers ' '(with HA).') self._try_overcloud_deploy_with_compat_yaml( tht_root, stack, parsed_args.stack, parameters, env_files, parsed_args.timeout, env, parsed_args.update_plan_only, parsed_args.run_validations, parsed_args.skip_deploy_identifier, parsed_args.plan_environment_file)
def take_action(self, parsed_args): self.log.debug("take_action(%s)" % parsed_args) self._setup_clients(parsed_args) # Swiftclient logs things like 404s at error level, which is a problem # because we use EAFP to check for the existence of files. Turn off # most swiftclient logging to avoid cluttering up our output with # pointless tracebacks. sc_logger = logging.getLogger("swiftclient") sc_logger.setLevel(logging.CRITICAL) self._validate_args(parsed_args) stack = utils.get_stack(self.orchestration_client, parsed_args.stack) if stack and stack.stack_status == 'IN_PROGRESS': raise exceptions.StackInProgress( "Unable to deploy as the stack '{}' status is '{}'".format( stack.stack_name, stack.stack_status)) parameters = self._update_parameters(parsed_args, stack) if not parsed_args.disable_validations: errors, warnings = self._predeploy_verify_capabilities( stack, parameters, parsed_args) if errors > 0: self.log.error( "Configuration has %d errors, fix them before " "proceeding. Ignoring these errors is likely to lead to " "a failed deploy.", errors) if parsed_args.validation_warnings_fatal or \ parsed_args.validation_errors_fatal: raise exceptions.InvalidConfiguration() if warnings > 0: self.log.error( "Configuration has %d warnings, fix them before " "proceeding.", warnings) if parsed_args.validation_warnings_fatal: raise exceptions.InvalidConfiguration() else: self.log.info("SUCCESS: No warnings or errors in deploy " "configuration, proceeding.") stack_create = stack is None if stack_create: self.log.info("No stack found, will be doing a stack create") else: self.log.info("Stack found, will be doing a stack update") if parsed_args.rhel_reg: if parsed_args.reg_method == 'satellite': sat_required_args = (parsed_args.reg_org and parsed_args.reg_sat_url and parsed_args.reg_activation_key) if not sat_required_args: raise exceptions.DeploymentError( "ERROR: In order to use satellite registration, " "you must specify --reg-org, --reg-sat-url, and " "--reg-activation-key.") else: portal_required_args = (parsed_args.reg_org and parsed_args.reg_activation_key) if not portal_required_args: raise exceptions.DeploymentError( "ERROR: In order to use portal registration, you " "must specify --reg-org, and " "--reg-activation-key.") if parsed_args.dry_run: print("Validation Finished") return self._deploy_tripleo_heat_templates_tmpdir(stack, parsed_args) # Get a new copy of the stack after stack update/create. If it was # a create then the previous stack object would be None. stack = utils.get_stack(self.orchestration_client, parsed_args.stack) if parsed_args.update_plan_only: # If we are only updating the plan, then we either wont have a # stack yet or there wont be any changes and the following code # wont do anything. return if parsed_args.config_download: print("Deploying overcloud configuration") deployment.config_download(self.log, self.clients, stack, parsed_args.templates, parsed_args.deployed_server, parsed_args.overcloud_ssh_user, parsed_args.overcloud_ssh_key, parsed_args.output_dir, verbosity=self.app_args.verbose_level) # Force fetching of attributes stack.get() overcloudrcs = deployment.overcloudrc(self.workflow_client, container=stack.stack_name, no_proxy=parsed_args.no_proxy) rcpath = utils.write_overcloudrc(stack.stack_name, overcloudrcs) utils.create_tempest_deployer_input() # Run postconfig on create or force. Use force to makes sure endpoints # are created with deploy reruns and upgrades if (stack_create or parsed_args.force_postconfig and not parsed_args.skip_postconfig): self._deploy_postconfig(stack, parsed_args) overcloud_endpoint = utils.get_overcloud_endpoint(stack) horizon_url = deployment.get_horizon_url(self.clients, stack=stack.stack_name) print("Overcloud Endpoint: {0}".format(overcloud_endpoint)) print("Overcloud Horizon Dashboard URL: {0}".format(horizon_url)) print("Overcloud rc file: {0}".format(rcpath)) print("Overcloud Deployed")
def _update_parameters(self, args, network_client, stack): parameters = {} stack_is_new = stack is None self.log.debug("Generating overcloud passwords") self.set_overcloud_passwords(stack_is_new, parameters) timestamp = int(time.time()) parameters['DeployIdentifier'] = timestamp # Update parameters from answers file: if args.answers_file is not None: with open(args.answers_file, 'r') as answers_file: answers = yaml.load(answers_file) if args.templates is None: args.templates = answers['templates'] if 'environments' in answers: if args.environment_files is not None: answers['environments'].extend(args.environment_files) args.environment_files = answers['environments'] param_args = (('NeutronPublicInterface', 'neutron_public_interface'), ('NeutronBridgeMappings', 'neutron_bridge_mappings'), ('NeutronFlatNetworks', 'neutron_flat_networks'), ('HypervisorNeutronPhysicalBridge', 'neutron_physical_bridge'), ('NtpServer', 'ntp_server'), ('ControllerCount', 'control_scale'), ('ComputeCount', 'compute_scale'), ('ObjectStorageCount', 'swift_storage_scale'), ('BlockStorageCount', 'block_storage_scale'), ('CephStorageCount', 'ceph_storage_scale'), ('OvercloudControlFlavor', 'control_flavor'), ('OvercloudComputeFlavor', 'compute_flavor'), ('OvercloudBlockStorageFlavor', 'block_storage_flavor'), ('OvercloudSwiftStorageFlavor', 'swift_storage_flavor'), ('OvercloudCephStorageFlavor', 'ceph_storage_flavor'), ('NeutronNetworkVLANRanges', 'neutron_network_vlan_ranges'), ('NeutronMechanismDrivers', 'neutron_mechanism_drivers')) if stack_is_new: new_stack_args = ( ('NeutronNetworkType', 'neutron_network_type'), ('NeutronTunnelIdRanges', 'neutron_tunnel_id_ranges'), ('NeutronTunnelTypes', 'neutron_tunnel_types'), ('NeutronVniRanges', 'neutron_vni_ranges'), ('NovaComputeLibvirtType', 'libvirt_type'), ) param_args = param_args + new_stack_args if args.neutron_disable_tunneling is not None: neutron_enable_tunneling = (not args.neutron_disable_tunneling) parameters.update({ 'NeutronEnableTunnelling': neutron_enable_tunneling, }) # Update parameters from commandline for param, arg in param_args: if getattr(args, arg, None) is not None: # these must be converted to [] which is what Heat expects if param.endswith( ('NeutronTunnelIdRanges', 'NeutronVniRanges')): parameters[param] = [getattr(args, arg)] else: parameters[param] = getattr(args, arg) # Scaling needs extra parameters number_controllers = int(parameters.get('ControllerCount', 0)) if number_controllers > 1: if not args.ntp_server: raise exceptions.InvalidConfiguration( 'Specify --ntp-server when using multiple controllers ' '(with HA).') parameters.update({ 'NeutronL3HA': True, 'NeutronAllowL3AgentFailover': False, }) else: parameters.update({ 'NeutronL3HA': False, 'NeutronAllowL3AgentFailover': False, }) dhcp_agents_per_network = (min(number_controllers, 3) if number_controllers else 1) parameters.update({ 'NeutronDhcpAgentsPerNetwork': dhcp_agents_per_network, }) if int(parameters.get('CephStorageCount', 0)) > 0: if stack_is_new: parameters.update({ 'CephClusterFSID': six.text_type(uuid.uuid1()), 'CephMonKey': utils.create_cephx_key(), 'CephAdminKey': utils.create_cephx_key() }) return parameters
def _validate_args(self, parsed_args): if parsed_args.templates is None and parsed_args.answers_file is None: raise oscexc.CommandError( "You must specify either --templates or --answers-file") if parsed_args.environment_files: nonexisting_envs = [] jinja2_envs = [] for env_file in parsed_args.environment_files: if env_file.endswith(".j2.yaml"): jinja2_envs.append(env_file) elif not os.path.isfile(env_file): # Tolerate missing file if there's a j2.yaml file that will # be rendered in the plan but not available locally (yet) if not os.path.isfile(env_file.replace( ".yaml", ".j2.yaml")): nonexisting_envs.append(env_file) if jinja2_envs: rewritten_paths = [ e.replace(".j2.yaml", ".yaml") for e in jinja2_envs ] raise oscexc.CommandError( "Error: The following jinja2 files were provided: -e " "{}. Did you mean -e {}?".format( ' -e '.join(jinja2_envs), ' -e '.join(rewritten_paths))) if nonexisting_envs: raise oscexc.CommandError( "Error: The following files were not found: {0}".format( ", ".join(nonexisting_envs))) if parsed_args.deployed_server and ( parsed_args.run_validations or not parsed_args.disable_validations): raise oscexc.CommandError( "Error: The --deployed-server cannot be used without " "the --disable-validations") # Check if disable_upgrade_deployment is set once self.log.debug("Checking that the disable_upgrade_deployment flag " "is set at least once in the roles file") if parsed_args.roles_file: roles_data = yaml.safe_load(open(parsed_args.roles_file).read()) disable_upgrade_deployment_set = False for r in roles_data: if r.get("disable_upgrade_deployment"): disable_upgrade_deployment_set = True break if not disable_upgrade_deployment_set: self.log.warning( "The disable_upgrade_deployment flag is not set in the " "roles file. This flag is expected when you have a " "nova-compute or swift-storage role. Please check the " "contents of the roles file: %s" % roles_data) if parsed_args.validation_warnings_fatal: raise exceptions.InvalidConfiguration() if parsed_args.environment_directories: self._validate_args_environment_directory( parsed_args.environment_directories)