default_iallocator = constants.IALLOC_HAIL # check if we have all the users we need try: runtime.GetEnts() except errors.ConfigurationError, err: raise errors.OpPrereqError( "Required system user/group missing: %s" % err, errors.ECODE_ENVIRON) candidate_certs = {} now = time.time() if compression_tools is not None: cluster.CheckCompressionTools(compression_tools) initial_dc_config = dict(active=True, interval=int(constants.MOND_TIME_INTERVAL * 1e6)) data_collectors = dict((name, initial_dc_config.copy()) for name in constants.DATA_COLLECTOR_NAMES) # init of cluster config file cluster_config = objects.Cluster( serial_no=1, rsahostkeypub=rsa_sshkey, dsahostkeypub=dsa_sshkey, highest_used_port=(constants.FIRST_DRBD_PORT - 1), mac_prefix=mac_prefix, volume_group_name=vg_name, tcpudp_port_pool=set(),
def InitCluster( cluster_name, mac_prefix, # pylint: disable=R0913, R0914 master_netmask, master_netdev, file_storage_dir, shared_file_storage_dir, gluster_storage_dir, candidate_pool_size, ssh_key_type, ssh_key_bits, secondary_ip=None, vg_name=None, beparams=None, nicparams=None, ndparams=None, hvparams=None, diskparams=None, enabled_hypervisors=None, modify_etc_hosts=True, modify_ssh_setup=True, maintain_node_health=False, drbd_helper=None, uid_pool=None, default_iallocator=None, default_iallocator_params=None, primary_ip_version=None, ipolicy=None, prealloc_wipe_disks=False, use_external_mip_script=False, hv_state=None, disk_state=None, enabled_disk_templates=None, install_image=None, zeroing_image=None, compression_tools=None, enabled_user_shutdown=False): """Initialise the cluster. @type candidate_pool_size: int @param candidate_pool_size: master candidate pool size @type enabled_disk_templates: list of string @param enabled_disk_templates: list of disk_templates to be used in this cluster @type enabled_user_shutdown: bool @param enabled_user_shutdown: whether user shutdown is enabled cluster wide """ # TODO: complete the docstring if config.ConfigWriter.IsCluster(): raise errors.OpPrereqError("Cluster is already initialised", errors.ECODE_STATE) data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR) queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR) archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR) for ddir in [queue_dir, data_dir, archive_dir]: if os.path.isdir(ddir): for entry in os.listdir(ddir): if not os.path.isdir(os.path.join(ddir, entry)): raise errors.OpPrereqError( "%s contains non-directory entries like %s. Remove left-overs of an" " old cluster before initialising a new one" % (ddir, entry), errors.ECODE_STATE) if not enabled_hypervisors: raise errors.OpPrereqError( "Enabled hypervisors list must contain at" " least one member", errors.ECODE_INVAL) invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES if invalid_hvs: raise errors.OpPrereqError( "Enabled hypervisors contains invalid" " entries: %s" % invalid_hvs, errors.ECODE_INVAL) _InitCheckEnabledDiskTemplates(enabled_disk_templates) try: ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version) except errors.ProgrammerError: raise errors.OpPrereqError( "Invalid primary ip version: %d." % primary_ip_version, errors.ECODE_INVAL) hostname = netutils.GetHostname(family=ipcls.family) if not ipcls.IsValid(hostname.ip): raise errors.OpPrereqError( "This host's IP (%s) is not a valid IPv%d" " address." % (hostname.ip, primary_ip_version), errors.ECODE_INVAL) if ipcls.IsLoopback(hostname.ip): raise errors.OpPrereqError( "This host's IP (%s) resolves to a loopback" " address. Please fix DNS or %s." % (hostname.ip, pathutils.ETC_HOSTS), errors.ECODE_ENVIRON) if not ipcls.Own(hostname.ip): raise errors.OpPrereqError( "Inconsistency: this host's name resolves" " to %s,\nbut this ip address does not" " belong to this host" % hostname.ip, errors.ECODE_ENVIRON) clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family) if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5): raise errors.OpPrereqError("Cluster IP already active", errors.ECODE_NOTUNIQUE) if not secondary_ip: if primary_ip_version == constants.IP6_VERSION: raise errors.OpPrereqError( "When using a IPv6 primary address, a valid" " IPv4 address must be given as secondary", errors.ECODE_INVAL) secondary_ip = hostname.ip if not netutils.IP4Address.IsValid(secondary_ip): raise errors.OpPrereqError( "Secondary IP address (%s) has to be a valid" " IPv4 address." % secondary_ip, errors.ECODE_INVAL) if not netutils.IP4Address.Own(secondary_ip): raise errors.OpPrereqError( "You gave %s as secondary IP," " but it does not belong to this host." % secondary_ip, errors.ECODE_ENVIRON) if master_netmask is not None: if not ipcls.ValidateNetmask(master_netmask): raise errors.OpPrereqError( "CIDR netmask (%s) not valid for IPv%s " % (master_netmask, primary_ip_version), errors.ECODE_INVAL) else: master_netmask = ipcls.iplen if vg_name: # Check if volume group is valid vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, constants.MIN_VG_SIZE) if vgstatus: raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL) drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates _InitCheckDrbdHelper(drbd_helper, drbd_enabled) logging.debug("Stopping daemons (if any are running)") result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"]) if result.failed: raise errors.OpExecError("Could not stop daemons, command %s" " had exitcode %s and error '%s'" % (result.cmd, result.exit_code, result.output)) file_storage_dir = _PrepareFileStorage(enabled_disk_templates, file_storage_dir) shared_file_storage_dir = _PrepareSharedFileStorage( enabled_disk_templates, shared_file_storage_dir) gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates, gluster_storage_dir) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): raise errors.OpPrereqError( "Invalid mac prefix given '%s'" % mac_prefix, errors.ECODE_INVAL) if not nicparams.get('mode', None) == constants.NIC_MODE_OVS: # Do not do this check if mode=openvswitch, since the openvswitch is not # created yet result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) if result.failed: raise errors.OpPrereqError( "Invalid master netdev given (%s): '%s'" % (master_netdev, result.output.strip()), errors.ECODE_INVAL) dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)] utils.EnsureDirs(dirs) objects.UpgradeBeParams(beparams) utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) objects.NIC.CheckParameterSyntax(nicparams) full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy) _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates) if ndparams is not None: utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES) else: ndparams = dict(constants.NDC_DEFAULTS) # This is ugly, as we modify the dict itself # FIXME: Make utils.ForceDictType pure functional or write a wrapper # around it if hv_state: for hvname, hvs_data in hv_state.items(): utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES) hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data) else: hv_state = dict((hvname, constants.HVST_DEFAULTS) for hvname in enabled_hypervisors) # FIXME: disk_state has no default values yet if disk_state: for storage, ds_data in disk_state.items(): if storage not in constants.DS_VALID_TYPES: raise errors.OpPrereqError( "Invalid storage type in disk state: %s" % storage, errors.ECODE_INVAL) for ds_name, state in ds_data.items(): utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES) ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state) # hvparams is a mapping of hypervisor->hvparams dict for hv_name, hv_params in hvparams.items(): utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) hv_class = hypervisor.GetHypervisor(hv_name) hv_class.CheckParameterSyntax(hv_params) # diskparams is a mapping of disk-template->diskparams dict for template, dt_params in diskparams.items(): param_keys = set(dt_params.keys()) default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys()) if param_keys > default_param_keys: unknown_params = param_keys - default_param_keys raise errors.OpPrereqError( "Invalid parameters for disk template %s:" " %s" % (template, utils.CommaJoin(unknown_params)), errors.ECODE_INVAL) utils.ForceDictType(dt_params, constants.DISK_DT_TYPES) if template == constants.DT_DRBD8 and vg_name is not None: # The default METAVG value is equal to the VG name set at init time, # if provided dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name try: utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS) except errors.OpPrereqError as err: raise errors.OpPrereqError("While verify diskparam options: %s" % err, errors.ECODE_INVAL) # set up ssh config and /etc/hosts rsa_sshkey = "" dsa_sshkey = "" if os.path.isfile(pathutils.SSH_HOST_RSA_PUB): sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB) rsa_sshkey = sshline.split(" ")[1] if os.path.isfile(pathutils.SSH_HOST_DSA_PUB): sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB) dsa_sshkey = sshline.split(" ")[1] if not rsa_sshkey and not dsa_sshkey: raise errors.OpPrereqError("Failed to find SSH public keys", errors.ECODE_ENVIRON) if modify_etc_hosts: utils.AddHostToEtcHosts(hostname.name, hostname.ip) if modify_ssh_setup: ssh.InitSSHSetup(ssh_key_type, ssh_key_bits) if default_iallocator is not None: alloc_script = utils.FindFile(default_iallocator, constants.IALLOCATOR_SEARCH_PATH, os.path.isfile) if alloc_script is None: raise errors.OpPrereqError( "Invalid default iallocator script '%s'" " specified" % default_iallocator, errors.ECODE_INVAL) else: # default to htools if utils.FindFile(constants.IALLOC_HAIL, constants.IALLOCATOR_SEARCH_PATH, os.path.isfile): default_iallocator = constants.IALLOC_HAIL # check if we have all the users we need try: runtime.GetEnts() except errors.ConfigurationError as err: raise errors.OpPrereqError( "Required system user/group missing: %s" % err, errors.ECODE_ENVIRON) candidate_certs = {} now = time.time() if compression_tools is not None: cluster.CheckCompressionTools(compression_tools) initial_dc_config = dict(active=True, interval=int(constants.MOND_TIME_INTERVAL * 1e6)) data_collectors = dict((name, initial_dc_config.copy()) for name in constants.DATA_COLLECTOR_NAMES) # init of cluster config file cluster_config = objects.Cluster( serial_no=1, rsahostkeypub=rsa_sshkey, dsahostkeypub=dsa_sshkey, highest_used_port=(constants.FIRST_DRBD_PORT - 1), mac_prefix=mac_prefix, volume_group_name=vg_name, tcpudp_port_pool=set(), master_ip=clustername.ip, master_netmask=master_netmask, master_netdev=master_netdev, cluster_name=clustername.name, file_storage_dir=file_storage_dir, shared_file_storage_dir=shared_file_storage_dir, gluster_storage_dir=gluster_storage_dir, enabled_hypervisors=enabled_hypervisors, beparams={constants.PP_DEFAULT: beparams}, nicparams={constants.PP_DEFAULT: nicparams}, ndparams=ndparams, hvparams=hvparams, diskparams=diskparams, candidate_pool_size=candidate_pool_size, modify_etc_hosts=modify_etc_hosts, modify_ssh_setup=modify_ssh_setup, uid_pool=uid_pool, ctime=now, mtime=now, maintain_node_health=maintain_node_health, data_collectors=data_collectors, drbd_usermode_helper=drbd_helper, default_iallocator=default_iallocator, default_iallocator_params=default_iallocator_params, primary_ip_family=ipcls.family, prealloc_wipe_disks=prealloc_wipe_disks, use_external_mip_script=use_external_mip_script, ipolicy=full_ipolicy, hv_state_static=hv_state, disk_state_static=disk_state, enabled_disk_templates=enabled_disk_templates, candidate_certs=candidate_certs, osparams={}, osparams_private_cluster={}, install_image=install_image, zeroing_image=zeroing_image, compression_tools=compression_tools, enabled_user_shutdown=enabled_user_shutdown, ssh_key_type=ssh_key_type, ssh_key_bits=ssh_key_bits, ) master_node_config = objects.Node( name=hostname.name, primary_ip=hostname.ip, secondary_ip=secondary_ip, serial_no=1, master_candidate=True, offline=False, drained=False, ctime=now, mtime=now, ) InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config) cfg = config.ConfigWriter(offline=True) ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE) cfg.Update(cfg.GetClusterInfo(), logging.error) ssconf.WriteSsconfFiles(cfg.GetSsconfValues()) master_uuid = cfg.GetMasterNode() if modify_ssh_setup: ssh.InitPubKeyFile(master_uuid, ssh_key_type) # set up the inter-node password and certificate _InitGanetiServerSetup(hostname.name, cfg) logging.debug("Starting daemons") result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"]) if result.failed: raise errors.OpExecError("Could not start daemons, command %s" " had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) _WaitForMasterDaemon()