def configure_system(config_file): """Configure the system""" # Parse the system config file print("Parsing system configuration file... ", end=' ') system_config = parse_system_config(config_file) print("DONE") # Validate the system config file print("Validating system configuration file... ", end=' ') try: create_cgcs_config_file(None, system_config, None, None, None, DEFAULT_CONFIG, validate_only=True) except configparser.Error as e: raise ConfigFail("Error parsing configuration file %s: %s" % (config_file, e)) print("DONE") # Create cgcs_config file print("Creating config apply file... ", end=' ') try: create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, system_config, None, None, None, DEFAULT_CONFIG) except configparser.Error as e: raise ConfigFail("Error parsing configuration file %s: %s" % (config_file, e)) print("DONE")
def get_management_mac_address(): ifname = CONF.get('BOOTSTRAP_CONFIG', 'MANAGEMENT_INTERFACE') try: filename = '/sys/class/net/%s/address' % ifname with open(filename, 'r') as f: return f.readline().rstrip() except Exception: raise ConfigFail("Failed to obtain mac address of %s" % ifname)
def config_main(config_type=REGION_CONFIG): allow_ssh = False if config_type == REGION_CONFIG: config_file = "/home/sysadmin/region_config" elif config_type == SUBCLOUD_CONFIG: config_file = "/home/sysadmin/subcloud_config" else: raise ConfigFail("Invalid config_type: %s" % config_type) arg = 1 while arg < len(sys.argv): if sys.argv[arg] in ['--help', '-h', '-?']: if config_type == REGION_CONFIG: show_help_region() else: show_help_subcloud() exit(1) elif sys.argv[arg] == "--allow-ssh": allow_ssh = True elif arg == len(sys.argv) - 1: config_file = sys.argv[arg] else: print("Invalid option. Use --help for more information.") exit(1) arg += 1 log.configure() # Check if that the command is being run from the console if utils.is_ssh_parent(): if allow_ssh: print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80)) print('') else: print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80)) exit(1) if not os.path.isfile(config_file): print("Config file %s does not exist." % config_file) exit(1) try: configure_region(config_file, config_type=config_type) except KeyboardInterrupt: print("\nAborting configuration") except ConfigFail as e: LOG.exception(e) print("\nConfiguration failed: {}".format(e)) except Exception as e: LOG.exception(e) print("\nConfiguration failed: {}".format(e)) else: print("\nConfiguration finished successfully.") finally: if os.path.isfile(TEMP_CGCS_CONFIG_FILE): os.remove(TEMP_CGCS_CONFIG_FILE)
def validate_region_one_ldap_config(region_config): """Validate ldap on region one by a ldap search""" ldapserver_uri = region_config.get('SHARED_SERVICES', 'LDAP_SERVICE_URL') cmd = ["ldapsearch", "-xH", ldapserver_uri, "-b", "dc=cgcs,dc=local", "(objectclass=*)"] try: with open(os.devnull, "w") as fnull: subprocess.check_call(cmd, stdout=fnull, stderr=fnull) except subprocess.CalledProcessError: raise ConfigFail("LDAP configuration error: not accessible")
def wait_system_config(client): for _ in range(constants.SYSTEM_CONFIG_TIMEOUT): try: systems = client.sysinv.isystem.list() if systems: # only one system (default) return systems[0] except Exception: pass time.sleep(1) else: raise ConfigFail('Timeout waiting for default system ' 'configuration')
def find_boot_device(): """Determine boot device """ boot_device = None context = pyudev.Context() # Get the boot partition # Unfortunately, it seems we can only get it from the logfile. # We'll parse the device used from a line like the following: # BIOSBoot.create: device: /dev/sda1 ; status: False ; type: biosboot ; # or # EFIFS.create: device: /dev/sda1 ; status: False ; type: efi ; # logfile = '/var/log/anaconda/storage.log' re_line = re.compile(r'(BIOSBoot|EFIFS).create: device: ([^\s;]*)') boot_partition = None with open(logfile, 'r') as f: for line in f: match = re_line.search(line) if match: boot_partition = match.group(2) break if boot_partition is None: raise ConfigFail("Failed to determine the boot partition") # Find the boot partition and get its parent for device in context.list_devices(DEVTYPE='partition'): if device.device_node == boot_partition: boot_device = device.find_parent('block').device_node break if boot_device is None: raise ConfigFail("Failed to determine the boot device") return boot_device
def parse_system_config(config_file): """Parse system config file""" system_config = configparser.RawConfigParser() try: system_config.read(config_file) except Exception as e: LOG.exception(e) raise ConfigFail("Error parsing system config file") # Dump configuration for debugging # for section in config.sections(): # print "Section: %s" % section # for (name, value) in config.items(section): # print "name: %s, value: %s" % (name, value) return system_config
def wait_pv_config(client, host): count = 0 for _ in range(constants.SYSTEM_CONFIG_TIMEOUT / 10): try: pvs = client.sysinv.ipv.list(host.uuid) if pvs and count == len(pvs): return pvs count = len(pvs) except Exception: pass if pvs: time.sleep(1) # We don't need to wait that long else: time.sleep(10) else: raise ConfigFail('Timeout waiting for controller PV ' 'configuration')
def configure_management_interface(region_config, config_type=REGION_CONFIG): """Bring up management interface """ mgmt_network = Network() if region_config.has_section('CLM_NETWORK'): naming_type = HP_NAMES else: naming_type = DEFAULT_NAMES if config_type == SUBCLOUD_CONFIG: min_addresses = 5 else: min_addresses = 8 try: mgmt_network.parse_config(region_config, config_type, MGMT_TYPE, min_addresses=min_addresses, naming_type=naming_type) except ConfigFail: raise except Exception as e: LOG.exception("Error parsing configuration file") raise ConfigFail("Error parsing configuration file: %s" % e) try: # Remove interface config files currently installed utils.remove_interface_config_files() # Create the management interface configuration files. # Code based on ConfigAssistant._write_interface_config_management parameters = utils.get_interface_config_static( mgmt_network.start_address, mgmt_network.cidr, mgmt_network.gateway_address) if mgmt_network.logical_interface.lag_interface: management_interface = 'bond0' else: management_interface = mgmt_network.logical_interface.ports[0] if mgmt_network.vlan: management_interface_name = "%s.%s" % (management_interface, mgmt_network.vlan) utils.write_interface_config_vlan( management_interface_name, mgmt_network.logical_interface.mtu, parameters) # underlying interface has no additional parameters parameters = None else: management_interface_name = management_interface if mgmt_network.logical_interface.lag_interface: utils.write_interface_config_bond( management_interface, mgmt_network.logical_interface.mtu, lag_mode_to_str(mgmt_network.logical_interface.lag_mode), None, constants.LAG_MIIMON_FREQUENCY, mgmt_network.logical_interface.ports[0], mgmt_network.logical_interface.ports[1], parameters) else: utils.write_interface_config_ethernet( management_interface, mgmt_network.logical_interface.mtu, parameters) # Restart networking with the new management interface configuration utils.restart_networking() # Send a GARP for floating address. Doing this to help in # cases where we are re-installing in a lab and another node # previously held the floating address. if mgmt_network.cidr.version == 4: utils.send_interface_garp(management_interface_name, mgmt_network.start_address) except Exception: LOG.exception("Failed to configure management interface") raise ConfigFail("Failed to configure management interface")
def main(): options = {} answerfile = None backup_name = None archive_dir = constants.BACKUPS_PATH do_default_config = False do_backup = False do_system_restore = False include_storage_reinstall = False do_clone = False do_non_interactive = False do_provision = False system_config_file = "/home/sysadmin/system_config" allow_ssh = False # Disable completion as the default completer shows python commands readline.set_completer(no_complete) # remove any previous config fail flag file if os.path.exists(constants.CONFIG_FAIL_FILE) is True: os.remove(constants.CONFIG_FAIL_FILE) if os.environ.get('CGCS_LABMODE'): options['labmode'] = True arg = 1 while arg < len(sys.argv): if sys.argv[arg] == "--answerfile": arg += 1 if arg < len(sys.argv): answerfile = sys.argv[arg] else: print("--answerfile option requires a file to be specified") exit(1) elif sys.argv[arg] == "--backup": arg += 1 if arg < len(sys.argv): backup_name = sys.argv[arg] else: print("--backup requires the name of the backup") exit(1) do_backup = True elif sys.argv[arg] == "--restore-system": arg += 1 if arg < len(sys.argv): if sys.argv[arg] in [ "include-storage-reinstall", "exclude-storage-reinstall" ]: if sys.argv[arg] == "include-storage-reinstall": include_storage_reinstall = True arg += 1 if arg < len(sys.argv): backup_name = sys.argv[arg] else: print( textwrap.fill( "--restore-system requires the filename " " of the backup", 80)) exit(1) else: backup_name = sys.argv[arg] else: print( textwrap.fill( "--restore-system requires the filename " "of the backup", 80)) exit(1) do_system_restore = True elif sys.argv[arg] == "--archive-dir": arg += 1 if arg < len(sys.argv): archive_dir = sys.argv[arg] else: print("--archive-dir requires a directory") exit(1) elif sys.argv[arg] == "--clone-iso": arg += 1 if arg < len(sys.argv): backup_name = sys.argv[arg] else: print("--clone-iso requires the name of the image") exit(1) do_clone = True elif sys.argv[arg] == "--clone-status": clone.clone_status() exit(0) elif sys.argv[arg] == "--default": do_default_config = True elif sys.argv[arg] == "--config-file": arg += 1 if arg < len(sys.argv): system_config_file = sys.argv[arg] else: print("--config-file requires the filename of the config file") exit(1) do_non_interactive = True elif sys.argv[arg] in ["--help", "-h", "-?"]: show_help() exit(1) elif sys.argv[arg] == "--labhelp": show_help_lab_only() exit(1) elif sys.argv[arg] == "--provision": do_provision = True elif sys.argv[arg] == "--allow-ssh": allow_ssh = True elif sys.argv[arg] == "--kubernetes": # This is a temporary flag for use during development. Once things # are stable, we will remove it and make kubernetes the default. options['kubernetes'] = True else: print("Invalid option. Use --help for more information.") exit(1) arg += 1 if [ do_backup, do_system_restore, do_clone, do_default_config, do_non_interactive ].count(True) > 1: print("Invalid combination of options selected") exit(1) if answerfile and [ do_backup, do_system_restore, do_clone, do_default_config, do_non_interactive ].count(True) > 0: print("The --answerfile option cannot be used with the selected " "option") exit(1) log.configure() if not do_backup and not do_clone: # Check if that the command is being run from the console if utils.is_ssh_parent(): if allow_ssh: print(textwrap.fill(constants.SSH_WARNING_MESSAGE, 80)) print('') else: print(textwrap.fill(constants.SSH_ERROR_MESSAGE, 80)) exit(1) # Reduce the printk console log level to avoid noise during configuration printk_levels = '' with open('/proc/sys/kernel/printk', 'r') as f: printk_levels = f.readline() temp_printk_levels = '3' + printk_levels[1:] with open('/proc/sys/kernel/printk', 'w') as f: f.write(temp_printk_levels) try: if do_backup: backup_restore.backup(backup_name, archive_dir) print("\nBackup complete") elif do_system_restore: backup_restore.restore_system(backup_name, include_storage_reinstall) print("\nSystem restore complete") elif do_clone: clone.clone(backup_name, archive_dir) print("\nCloning complete") elif do_provision: assistant = ConfigAssistant(**options) assistant.provision(answerfile) else: print( textwrap.fill( "Please use bootstrap playbook to configure the " "first controller.", 80)) exit(1) if do_non_interactive: if not os.path.isfile(system_config_file): raise ConfigFail("Config file %s does not exist." % system_config_file) if (os.path.exists(constants.CGCS_CONFIG_FILE) or os.path.exists(constants.CONFIG_PERMDIR) or os.path.exists( constants.INITIAL_CONFIG_COMPLETE_FILE)): raise ConfigFail("Configuration has already been done " "and cannot be repeated.") configure_system(system_config_file) answerfile = TEMP_CGCS_CONFIG_FILE assistant = ConfigAssistant(**options) assistant.configure(answerfile, do_default_config) print("\nConfiguration was applied\n") print( textwrap.fill( "Please complete any out of service commissioning steps " "with system commands and unlock controller to proceed.", 80)) assistant.check_required_interfaces_status() except KeyboardInterrupt: print("\nAborting configuration") except BackupFail as e: print("\nBackup failed: {}".format(e)) except RestoreFail as e: print("\nRestore failed: {}".format(e)) except ConfigFail as e: print("\nConfiguration failed: {}".format(e)) except CloneFail as e: print("\nCloning failed: {}".format(e)) except UserQuit: print("\nAborted configuration") finally: if os.path.isfile(TEMP_CGCS_CONFIG_FILE): os.remove(TEMP_CGCS_CONFIG_FILE) # Restore the printk console log level with open('/proc/sys/kernel/printk', 'w') as f: f.write(printk_levels)
def create_cgcs_config_file(output_file, system_config, services, endpoints, domains, config_type=REGION_CONFIG, validate_only=False): """ Create cgcs_config file or just perform validation of the system_config if validate_only=True. :param output_file: filename of output cgcs_config file :param system_config: system configuration :param services: keystone services (not used if validate_only) :param endpoints: keystone endpoints (not used if validate_only) :param domains: keystone domains (not used if validate_only) :param config_type: specify region, subcloud or standard config :param validate_only: used to validate the input system_config :return: """ cgcs_config = None if not validate_only: cgcs_config = configparser.RawConfigParser() cgcs_config.optionxform = str # general error checking, if not validate_only cgcs config data is returned validate(system_config, config_type, cgcs_config) # Region configuration: services, endpoints and domain if config_type in [REGION_CONFIG, SUBCLOUD_CONFIG] and not validate_only: # The services and endpoints are not available in the validation phase region_1_name = system_config.get('SHARED_SERVICES', 'REGION_NAME') keystone_service_name = system_config.get('SHARED_SERVICES', 'KEYSTONE_SERVICE_NAME') keystone_service_type = system_config.get('SHARED_SERVICES', 'KEYSTONE_SERVICE_TYPE') keystone_service_id = services.get_service_id(keystone_service_name, keystone_service_type) keystone_admin_url = endpoints.get_service_url(region_1_name, keystone_service_id, "admin") keystone_internal_url = endpoints.get_service_url( region_1_name, keystone_service_id, "internal") keystone_public_url = endpoints.get_service_url( region_1_name, keystone_service_id, "public") cgcs_config.set('cREGION', 'KEYSTONE_AUTH_URI', keystone_internal_url) cgcs_config.set('cREGION', 'KEYSTONE_IDENTITY_URI', keystone_admin_url) cgcs_config.set('cREGION', 'KEYSTONE_ADMIN_URI', keystone_admin_url) cgcs_config.set('cREGION', 'KEYSTONE_INTERNAL_URI', keystone_internal_url) cgcs_config.set('cREGION', 'KEYSTONE_PUBLIC_URI', keystone_public_url) # if ldap is a shared service if (system_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL')): ldap_service_url = system_config.get('SHARED_SERVICES', 'LDAP_SERVICE_URL') cgcs_config.set('cREGION', 'LDAP_SERVICE_URI', ldap_service_url) cgcs_config.set('cREGION', 'LDAP_SERVICE_NAME', 'open-ldap') cgcs_config.set('cREGION', 'LDAP_REGION_NAME', region_1_name) # If primary region is non-TiC and keystone entries already created, # the flag will tell puppet not to create them. if (system_config.has_option('REGION_2_SERVICES', 'CREATE') and system_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'): cgcs_config.set('cREGION', 'REGION_SERVICES_CREATE', 'True') # System Timezone configuration if system_config.has_option('SYSTEM', 'TIMEZONE'): timezone = system_config.get('SYSTEM', 'TIMEZONE') if not os.path.isfile("/usr/share/zoneinfo/%s" % timezone): raise ConfigFail("Timezone file %s does not exist" % timezone) # Dump results for debugging # for section in cgcs_config.sections(): # print "[%s]" % section # for (name, value) in cgcs_config.items(section): # print "%s=%s" % (name, value) if not validate_only: # Write config file with open(output_file, 'w') as config_file: cgcs_config.write(config_file)
def configure_region(config_file, config_type=REGION_CONFIG): """Configure the region""" # Parse the region/subcloud config file print("Parsing configuration file... ", end=' ') region_config = parse_system_config(config_file) print("DONE") if config_type == SUBCLOUD_CONFIG: # Set defaults in region_config for subclouds set_subcloud_config_defaults(region_config) # Validate the region/subcloud config file print("Validating configuration file... ", end=' ') try: create_cgcs_config_file(None, region_config, None, None, None, config_type=config_type, validate_only=True) except configparser.Error as e: raise ConfigFail("Error parsing configuration file %s: %s" % (config_file, e)) print("DONE") # Bring up management interface to allow us to reach Region 1 print("Configuring management interface... ", end=' ') configure_management_interface(region_config, config_type=config_type) print("DONE") # Get token from keystone print("Retrieving keystone token...", end=' ') sys.stdout.flush() auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') if region_config.has_option('SHARED_SERVICES', 'ADMIN_TENANT_NAME'): auth_project = region_config.get('SHARED_SERVICES', 'ADMIN_TENANT_NAME') else: auth_project = region_config.get('SHARED_SERVICES', 'ADMIN_PROJECT_NAME') auth_user = region_config.get('SHARED_SERVICES', 'ADMIN_USER_NAME') auth_password = region_config.get('SHARED_SERVICES', 'ADMIN_PASSWORD') if region_config.has_option('SHARED_SERVICES', 'ADMIN_USER_DOMAIN'): admin_user_domain = region_config.get('SHARED_SERVICES', 'ADMIN_USER_DOMAIN') else: admin_user_domain = DEFAULT_DOMAIN_NAME if region_config.has_option('SHARED_SERVICES', 'ADMIN_PROJECT_DOMAIN'): admin_project_domain = region_config.get('SHARED_SERVICES', 'ADMIN_PROJECT_DOMAIN') else: admin_project_domain = DEFAULT_DOMAIN_NAME attempts = 0 token = None # Wait for connectivity to region one. It can take some time, especially if # we have LAG on the management network. while not token: token = rutils.get_token(auth_url, auth_project, auth_user, auth_password, admin_user_domain, admin_project_domain) if not token: attempts += 1 if attempts < 10: print("\rRetrieving keystone token...{}".format( '.' * attempts), end=' ') sys.stdout.flush() time.sleep(10) else: raise ConfigFail( "Unable to obtain keystone token. Please ensure " "networking and keystone configuration is correct.") print("DONE") # Get services, endpoints, users and domains from keystone print("Retrieving services, endpoints and users from keystone... ", end=' ') region_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') service_name = region_config.get('SHARED_SERVICES', 'KEYSTONE_SERVICE_NAME') service_type = region_config.get('SHARED_SERVICES', 'KEYSTONE_SERVICE_TYPE') api_url = token.get_service_url( region_name, service_name, service_type, "admin").replace( 'v2.0', 'v3') services = rutils.get_services(token, api_url) endpoints = rutils.get_endpoints(token, api_url) users = rutils.get_users(token, api_url) domains = rutils.get_domains(token, api_url) if not services or not endpoints or not users: raise ConfigFail( "Unable to retrieve services, endpoints or users from keystone. " "Please ensure networking and keystone configuration is correct.") print("DONE") user_config = None if config_type == SUBCLOUD_CONFIG: # Retrieve subcloud configuration from dcmanager print("Retrieving configuration from dcmanager... ", end=' ') dcmanager_url = token.get_service_url( 'SystemController', 'dcmanager', 'dcmanager', "admin") subcloud_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME') subcloud_management_subnet = region_config.get('MGMT_NETWORK', 'CIDR') hash_string = subcloud_name + subcloud_management_subnet subcloud_config = rutils.get_subcloud_config(token, dcmanager_url, subcloud_name, hash_string) user_config = subcloud_config['users'] print("DONE") try: # Configure missing region one keystone entries create = True # Prepare region configuration for puppet to create keystone identities if (region_config.has_option('REGION_2_SERVICES', 'CREATE') and region_config.get('REGION_2_SERVICES', 'CREATE') == 'Y'): print("Preparing keystone configuration... ", end=' ') # If keystone configuration for this region already in place, # validate it only else: # Validate region one keystone config create = False print("Validating keystone configuration... ", end=' ') validate_region_one_keystone_config(region_config, token, api_url, users, services, endpoints, create, config_type=config_type, user_config=user_config) print("DONE") # validate ldap if it is shared if region_config.has_option('SHARED_SERVICES', 'LDAP_SERVICE_URL'): print("Validating ldap configuration... ", end=' ') validate_region_one_ldap_config(region_config) print("DONE") # Create cgcs_config file print("Creating config apply file... ", end=' ') try: create_cgcs_config_file(TEMP_CGCS_CONFIG_FILE, region_config, services, endpoints, domains, config_type=config_type) except configparser.Error as e: raise ConfigFail("Error parsing configuration file %s: %s" % (config_file, e)) print("DONE") # Configure controller assistant = ConfigAssistant() assistant.configure(TEMP_CGCS_CONFIG_FILE, display_config=False) except ConfigFail as e: print("A configuration failure has occurred.", end=' ') raise e
def validate_region_one_keystone_config(region_config, token, api_url, users, services, endpoints, create=False, config_type=REGION_CONFIG, user_config=None): """ Validate that the required region one configuration are in place, if create is True, any missing entries will be set up to be added to keystone later on by puppet. """ region_1_name = region_config.get('SHARED_SERVICES', 'REGION_NAME') region_2_name = region_config.get('REGION_2_SERVICES', 'REGION_NAME') # Determine what keystone entries are expected expected_users = EXPECTED_USERS expected_region_2_endpoints = EXPECTED_REGION2_ENDPOINTS # Keystone is always in region 1 expected_region_1_endpoints = [EXPECTED_KEYSTONE_ENDPOINT] domains = rutils.get_domains(token, api_url) # Verify service project domain, creating if necessary if region_config.has_option('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME'): project_domain = region_config.get('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME') else: project_domain = DEFAULT_DOMAIN_NAME project_domain_id = domains.get_domain_id(project_domain) if not project_domain_id: if create and config_type == REGION_CONFIG: region_config.set('REGION_2_SERVICES', 'PROJECT_DOMAIN_NAME', project_domain) else: raise ConfigFail( "Keystone configuration error: service project domain '%s' is " "not configured." % project_domain) # Verify service project, creating if necessary if region_config.has_option('SHARED_SERVICES', 'SERVICE_PROJECT_NAME'): service_project = region_config.get('SHARED_SERVICES', 'SERVICE_PROJECT_NAME') else: service_project = region_config.get('SHARED_SERVICES', 'SERVICE_TENANT_NAME') projects = rutils.get_projects(token, api_url) project_id = projects.get_project_id(service_project) if not project_id: if create and config_type == REGION_CONFIG: region_config.set('SHARED_SERVICES', 'SERVICE_TENANT_NAME', service_project) else: raise ConfigFail( "Keystone configuration error: service project '%s' is not " "configured." % service_project) # Verify and retrieve the id of the admin role (only needed when creating) roles = rutils.get_roles(token, api_url) role_id = roles.get_role_id('admin') if not role_id and create: raise ConfigFail("Keystone configuration error: No admin role present") # verify that the service user domain is configured, creating if necessary if region_config.has_option('REGION_2_SERVICES', 'USER_DOMAIN_NAME'): user_domain = region_config.get('REGION_2_SERVICES', 'USER_DOMAIN_NAME') else: user_domain = DEFAULT_DOMAIN_NAME domains = rutils.get_domains(token, api_url) user_domain_id = domains.get_domain_id(user_domain) if not user_domain_id: if create and config_type == REGION_CONFIG: region_config.set('REGION_2_SERVICES', 'USER_DOMAIN_NAME') else: raise ConfigFail( "Unable to obtain id for for %s domain. Please ensure " "keystone configuration is correct." % user_domain) auth_url = region_config.get('SHARED_SERVICES', 'KEYSTONE_ADMINURL') if config_type == REGION_CONFIG: # Verify that all users are configured and can retrieve a token, # Optionally set up to create missing users + their admin role for user in expected_users: auth_user = region_config.get(user[REGION_NAME], user[USER_KEY] + '_USER_NAME') user_id = users.get_user_id(auth_user) auth_password = None if not user_id and create: if not region_config.has_option( user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): # Generate random password for new user via # /dev/urandom if necessary try: region_config.set( user[REGION_NAME], user[USER_KEY] + '_PASSWORD', uuid.uuid4().hex[:10] + "TiC2*") except Exception as e: raise ConfigFail("Failed to generate random user " "password: %s" % e) elif user_id and user_domain_id and\ project_id and project_domain_id: # If there is a user_id existing then we cannot use # a randomized password as it was either created by # a previous run of regionconfig or was created as # part of Titanium Cloud Primary region config if not region_config.has_option( user[REGION_NAME], user[USER_KEY] + '_PASSWORD'): raise ConfigFail("Failed to find configured password " "for pre-defined user %s" % auth_user) auth_password = region_config.get(user[REGION_NAME], user[USER_KEY] + '_PASSWORD') # Verify that the existing user can seek an auth token user_token = rutils.get_token(auth_url, service_project, auth_user, auth_password, user_domain, project_domain) if not user_token: raise ConfigFail( "Unable to obtain keystone token for %s user. " "Please ensure keystone configuration is correct." % auth_user) else: # For subcloud configs we re-use the users from the system controller # (the primary region). for user in expected_users: auth_user = user[USER_NAME] user_id = users.get_user_id(auth_user) auth_password = None if user_id: # Add the password to the region config so it will be used when # configuring services. auth_password = user_config.get_password(user[USER_NAME]) region_config.set(user[REGION_NAME], user[USER_KEY] + '_PASSWORD', auth_password) else: raise ConfigFail( "Unable to obtain user (%s). Please ensure " "keystone configuration is correct." % user[USER_NAME]) # Verify that the existing user can seek an auth token user_token = rutils.get_token(auth_url, service_project, auth_user, auth_password, user_domain, project_domain) if not user_token: raise ConfigFail( "Unable to obtain keystone token for %s user. " "Please ensure keystone configuration is correct." % auth_user) # Verify that region two endpoints & services for shared services # match our requirements, optionally creating missing entries for endpoint in expected_region_1_endpoints: service_name = region_config.get('SHARED_SERVICES', endpoint[SERVICE_NAME]) service_type = region_config.get('SHARED_SERVICES', endpoint[SERVICE_TYPE]) try: service_id = services.get_service_id(service_name, service_type) except KeystoneFail as ex: # No option to create services for region one, if those are not # present, something is seriously wrong raise ex # Extract region one url information from the existing endpoint entry: try: endpoints.get_service_url( region_1_name, service_id, "public") endpoints.get_service_url( region_1_name, service_id, "internal") endpoints.get_service_url( region_1_name, service_id, "admin") except KeystoneFail as ex: # Fail since shared services endpoints are not found raise ConfigFail("Endpoint for shared service %s " "is not configured" % service_name) # Verify that region two endpoints & services match our requirements, # optionally creating missing entries public_address = utils.get_optional(region_config, 'CAN_NETWORK', 'CAN_IP_START_ADDRESS') if not public_address: public_address = utils.get_optional(region_config, 'CAN_NETWORK', 'CAN_IP_FLOATING_ADDRESS') if not public_address: public_address = utils.get_optional(region_config, 'OAM_NETWORK', 'IP_START_ADDRESS') if not public_address: # AIO-SX configuration public_address = utils.get_optional(region_config, 'OAM_NETWORK', 'IP_ADDRESS') if not public_address: public_address = region_config.get('OAM_NETWORK', 'IP_FLOATING_ADDRESS') if region_config.has_section('CLM_NETWORK'): internal_address = region_config.get('CLM_NETWORK', 'CLM_IP_START_ADDRESS') else: internal_address = region_config.get('MGMT_NETWORK', 'IP_START_ADDRESS') for endpoint in expected_region_2_endpoints: service_name = utils.get_service(region_config, 'REGION_2_SERVICES', endpoint[SERVICE_NAME]) service_type = utils.get_service(region_config, 'REGION_2_SERVICES', endpoint[SERVICE_TYPE]) service_id = services.get_service_id(service_name, service_type) expected_public_url = endpoint[PUBLIC_URL].format(public_address) expected_internal_url = endpoint[INTERNAL_URL].format(internal_address) expected_admin_url = endpoint[ADMIN_URL].format(internal_address) try: public_url = endpoints.get_service_url(region_2_name, service_id, "public") internal_url = endpoints.get_service_url(region_2_name, service_id, "internal") admin_url = endpoints.get_service_url(region_2_name, service_id, "admin") except KeystoneFail as ex: # The endpoint will be created optionally if not create: raise ConfigFail("Keystone configuration error: Unable to " "find endpoints for service %s" % service_name) continue # Validate the existing endpoints for endpointtype, found, expected in [ ('public', public_url, expected_public_url), ('internal', internal_url, expected_internal_url), ('admin', admin_url, expected_admin_url)]: if found != expected: raise ConfigFail( "Keystone configuration error for:\nregion ({}), " "service name ({}), service type ({})\n" "expected {}: {}\nconfigured {}: {}".format( region_2_name, service_name, service_type, endpointtype, expected, endpointtype, found))