def testCorrectHostname(self): self._CreateValidConfigDir() utils.WriteFile(self.config_path, data=serializer.DumpJson(GetMinimalConfig())) utils.WriteFile(self.ss_master_node_path, data="%s\n" % netutils.GetHostname().name) _RunUpgrade(self.tmpdir, False, True, ignore_hostname=False)
def testWrongHostname(self): self._CreateValidConfigDir() utils.WriteFile(self.config_path, data=serializer.DumpJson(GetMinimalConfig())) hostname = netutils.GetHostname().name assert hostname != utils.ReadOneLineFile(self.ss_master_node_path) self.assertRaises(Exception, _RunUpgrade, self.tmpdir, False, True, ignore_hostname=False)
def CheckHostname(path): """Ensures hostname matches ssconf value. @param path: Path to ssconf file """ ssconf_master_node = utils.ReadOneLineFile(path) hostname = netutils.GetHostname().name if ssconf_master_node == hostname: return True logging.warning("Warning: ssconf says master node is '%s', but this" " machine's name is '%s'; this tool must be run on" " the master node", ssconf_master_node, hostname) return False
def _WaitForSshDaemon(hostname, port, family): """Wait for SSH daemon to become responsive. """ hostip = netutils.GetHostname(name=hostname, family=family).ip def _CheckSshDaemon(): if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True): logging.debug("SSH daemon on %s:%s (IP address %s) has become" " responsive", hostname, port, hostip) else: raise utils.RetryAgain() try: utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT) except utils.RetryTimeout: raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't" " become responsive within %s seconds" % (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
def CheckHostnameSane(lu, name): """Ensures that a given hostname resolves to a 'sane' name. The given name is required to be a prefix of the resolved hostname, to prevent accidental mismatches. @param lu: the logical unit on behalf of which we're checking @param name: the name we should resolve and check @return: the resolved hostname object """ hostname = netutils.GetHostname(name=name) if hostname.name != name: lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name) if not utils.MatchNameComponent(name, [hostname.name]): raise errors.OpPrereqError( ("Resolved hostname '%s' does not look the" " same as given hostname '%s'") % (hostname.name, name), errors.ECODE_INVAL) return hostname
def InitCluster( cluster_name, mac_prefix, # pylint: disable=R0913, R0914 master_netmask, master_netdev, file_storage_dir, shared_file_storage_dir, gluster_storage_dir, candidate_pool_size, secondary_ip=None, vg_name=None, beparams=None, nicparams=None, ndparams=None, hvparams=None, diskparams=None, enabled_hypervisors=None, modify_etc_hosts=True, modify_ssh_setup=True, maintain_node_health=False, drbd_helper=None, uid_pool=None, default_iallocator=None, default_iallocator_params=None, primary_ip_version=None, ipolicy=None, prealloc_wipe_disks=False, use_external_mip_script=False, hv_state=None, disk_state=None, enabled_disk_templates=None, install_image=None, zeroing_image=None, compression_tools=None, enabled_user_shutdown=False): """Initialise the cluster. @type candidate_pool_size: int @param candidate_pool_size: master candidate pool size @type enabled_disk_templates: list of string @param enabled_disk_templates: list of disk_templates to be used in this cluster @type enabled_user_shutdown: bool @param enabled_user_shutdown: whether user shutdown is enabled cluster wide """ # TODO: complete the docstring if config.ConfigWriter.IsCluster(): raise errors.OpPrereqError("Cluster is already initialised", errors.ECODE_STATE) data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR) queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR) archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR) for ddir in [queue_dir, data_dir, archive_dir]: if os.path.isdir(ddir): for entry in os.listdir(ddir): if not os.path.isdir(os.path.join(ddir, entry)): raise errors.OpPrereqError( "%s contains non-directory entries like %s. Remove left-overs of an" " old cluster before initialising a new one" % (ddir, entry), errors.ECODE_STATE) if not enabled_hypervisors: raise errors.OpPrereqError( "Enabled hypervisors list must contain at" " least one member", errors.ECODE_INVAL) invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES if invalid_hvs: raise errors.OpPrereqError( "Enabled hypervisors contains invalid" " entries: %s" % invalid_hvs, errors.ECODE_INVAL) _InitCheckEnabledDiskTemplates(enabled_disk_templates) try: ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version) except errors.ProgrammerError: raise errors.OpPrereqError( "Invalid primary ip version: %d." % primary_ip_version, errors.ECODE_INVAL) hostname = netutils.GetHostname(family=ipcls.family) if not ipcls.IsValid(hostname.ip): raise errors.OpPrereqError( "This host's IP (%s) is not a valid IPv%d" " address." % (hostname.ip, primary_ip_version), errors.ECODE_INVAL) if ipcls.IsLoopback(hostname.ip): raise errors.OpPrereqError( "This host's IP (%s) resolves to a loopback" " address. Please fix DNS or %s." % (hostname.ip, pathutils.ETC_HOSTS), errors.ECODE_ENVIRON) if not ipcls.Own(hostname.ip): raise errors.OpPrereqError( "Inconsistency: this host's name resolves" " to %s,\nbut this ip address does not" " belong to this host" % hostname.ip, errors.ECODE_ENVIRON) clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family) if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5): raise errors.OpPrereqError("Cluster IP already active", errors.ECODE_NOTUNIQUE) if not secondary_ip: if primary_ip_version == constants.IP6_VERSION: raise errors.OpPrereqError( "When using a IPv6 primary address, a valid" " IPv4 address must be given as secondary", errors.ECODE_INVAL) secondary_ip = hostname.ip if not netutils.IP4Address.IsValid(secondary_ip): raise errors.OpPrereqError( "Secondary IP address (%s) has to be a valid" " IPv4 address." % secondary_ip, errors.ECODE_INVAL) if not netutils.IP4Address.Own(secondary_ip): raise errors.OpPrereqError( "You gave %s as secondary IP," " but it does not belong to this host." % secondary_ip, errors.ECODE_ENVIRON) if master_netmask is not None: if not ipcls.ValidateNetmask(master_netmask): raise errors.OpPrereqError( "CIDR netmask (%s) not valid for IPv%s " % (master_netmask, primary_ip_version), errors.ECODE_INVAL) else: master_netmask = ipcls.iplen if vg_name: # Check if volume group is valid vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, constants.MIN_VG_SIZE) if vgstatus: raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL) drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates _InitCheckDrbdHelper(drbd_helper, drbd_enabled) logging.debug("Stopping daemons (if any are running)") result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"]) if result.failed: raise errors.OpExecError("Could not stop daemons, command %s" " had exitcode %s and error '%s'" % (result.cmd, result.exit_code, result.output)) file_storage_dir = _PrepareFileStorage(enabled_disk_templates, file_storage_dir) shared_file_storage_dir = _PrepareSharedFileStorage( enabled_disk_templates, shared_file_storage_dir) gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates, gluster_storage_dir) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): raise errors.OpPrereqError( "Invalid mac prefix given '%s'" % mac_prefix, errors.ECODE_INVAL) if not nicparams.get('mode', None) == constants.NIC_MODE_OVS: # Do not do this check if mode=openvswitch, since the openvswitch is not # created yet result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) if result.failed: raise errors.OpPrereqError( "Invalid master netdev given (%s): '%s'" % (master_netdev, result.output.strip()), errors.ECODE_INVAL) dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)] utils.EnsureDirs(dirs) objects.UpgradeBeParams(beparams) utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) objects.NIC.CheckParameterSyntax(nicparams) full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy) _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates) if ndparams is not None: utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES) else: ndparams = dict(constants.NDC_DEFAULTS) # This is ugly, as we modify the dict itself # FIXME: Make utils.ForceDictType pure functional or write a wrapper # around it if hv_state: for hvname, hvs_data in hv_state.items(): utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES) hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data) else: hv_state = dict((hvname, constants.HVST_DEFAULTS) for hvname in enabled_hypervisors) # FIXME: disk_state has no default values yet if disk_state: for storage, ds_data in disk_state.items(): if storage not in constants.DS_VALID_TYPES: raise errors.OpPrereqError( "Invalid storage type in disk state: %s" % storage, errors.ECODE_INVAL) for ds_name, state in ds_data.items(): utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES) ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state) # hvparams is a mapping of hypervisor->hvparams dict for hv_name, hv_params in hvparams.iteritems(): utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) hv_class = hypervisor.GetHypervisor(hv_name) hv_class.CheckParameterSyntax(hv_params) # diskparams is a mapping of disk-template->diskparams dict for template, dt_params in diskparams.items(): param_keys = set(dt_params.keys()) default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys()) if not (param_keys <= default_param_keys): unknown_params = param_keys - default_param_keys raise errors.OpPrereqError( "Invalid parameters for disk template %s:" " %s" % (template, utils.CommaJoin(unknown_params)), errors.ECODE_INVAL) utils.ForceDictType(dt_params, constants.DISK_DT_TYPES) if template == constants.DT_DRBD8 and vg_name is not None: # The default METAVG value is equal to the VG name set at init time, # if provided dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name try: utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS) except errors.OpPrereqError, err: raise errors.OpPrereqError("While verify diskparam options: %s" % err, errors.ECODE_INVAL)
def AddNode(opts, args): """Add a node to the cluster. @param opts: the command line options selected by the user @type args: list @param args: should contain only one element, the new node name @rtype: int @return: the desired exit code """ cl = GetClient() node = netutils.GetHostname(name=args[0]).name readd = opts.readd # Retrieve relevant parameters of the node group. ssh_port = None try: # Passing [] to QueryGroups means query the default group: node_groups = [opts.nodegroup] if opts.nodegroup is not None else [] output = cl.QueryGroups(names=node_groups, fields=["ndp/ssh_port"], use_locking=False) (ssh_port, ) = output[0] except (errors.OpPrereqError, errors.OpExecError): pass try: output = cl.QueryNodes(names=[node], fields=["name", "sip", "master", "ndp/ssh_port"], use_locking=False) if len(output) == 0: node_exists = "" sip = None else: node_exists, sip, is_master, ssh_port = output[0] except (errors.OpPrereqError, errors.OpExecError): node_exists = "" sip = None if readd: if not node_exists: ToStderr("Node %s not in the cluster" " - please retry without '--readd'", node) return 1 if is_master: ToStderr("Node %s is the master, cannot readd", node) return 1 else: if node_exists: ToStderr("Node %s already in the cluster (as %s)" " - please retry with '--readd'", node, node_exists) return 1 sip = opts.secondary_ip # read the cluster name from the master (cluster_name, ) = cl.QueryConfigValues(["cluster_name"]) if opts.node_setup: ToStderr("-- WARNING -- \n" "Performing this operation is going to perform the following\n" "changes to the target machine (%s) and the current cluster\n" "nodes:\n" "* A new SSH daemon key pair is generated is generated on\n" " the target machine.\n" "* The public SSH keys of all master candidates of the cluster\n" " are added to the target machine's 'authorized_keys' file.\n" "* In case the target machine is a master candidate, its newly\n" " generated public SSH key will be distributed to all other\n" " cluster nodes.\n", node) if opts.node_setup: _SetupSSH(opts, cluster_name, node, ssh_port, cl) bootstrap.SetupNodeDaemon(opts, cluster_name, node, ssh_port) if opts.disk_state: disk_state = utils.FlatToDict(opts.disk_state) else: disk_state = {} hv_state = dict(opts.hv_state) op = opcodes.OpNodeAdd(node_name=args[0], secondary_ip=sip, readd=opts.readd, group=opts.nodegroup, vm_capable=opts.vm_capable, ndparams=opts.ndparams, master_capable=opts.master_capable, disk_state=disk_state, hv_state=hv_state, node_setup=opts.node_setup) SubmitOpCode(op, opts=opts)
def get_ganeti_node(): """Get the Ganeti node this service is running on @returns: The domain of this node """ return netutils.GetHostname().GetFqdn()
def AddNode(opts, args): """Add a node to the cluster. @param opts: the command line options selected by the user @type args: list @param args: should contain only one element, the new node name @rtype: int @return: the desired exit code """ cl = GetClient() query_cl = GetClient(query=True) node = netutils.GetHostname(name=args[0]).name readd = opts.readd # Retrieve relevant parameters of the node group. ssh_port = None try: # Passing [] to QueryGroups means query the default group: node_groups = [opts.nodegroup] if opts.nodegroup is not None else [] output = query_cl.QueryGroups(names=node_groups, fields=["ndp/ssh_port"], use_locking=False) (ssh_port, ) = output[0] except (errors.OpPrereqError, errors.OpExecError): pass try: output = query_cl.QueryNodes( names=[node], fields=["name", "sip", "master", "ndp/ssh_port"], use_locking=False) node_exists, sip, is_master, ssh_port = output[0] except (errors.OpPrereqError, errors.OpExecError): node_exists = "" sip = None if readd: if not node_exists: ToStderr( "Node %s not in the cluster" " - please retry without '--readd'", node) return 1 if is_master: ToStderr("Node %s is the master, cannot readd", node) return 1 else: if node_exists: ToStderr( "Node %s already in the cluster (as %s)" " - please retry with '--readd'", node, node_exists) return 1 sip = opts.secondary_ip # read the cluster name from the master (cluster_name, ) = cl.QueryConfigValues(["cluster_name"]) if not readd and opts.node_setup: ToStderr( "-- WARNING -- \n" "Performing this operation is going to replace the ssh daemon" " keypair\n" "on the target machine (%s) with the ones of the" " current one\n" "and grant full intra-cluster ssh root access to/from it\n", node) if opts.node_setup: _SetupSSH(opts, cluster_name, node, ssh_port) bootstrap.SetupNodeDaemon(opts, cluster_name, node, ssh_port) if opts.disk_state: disk_state = utils.FlatToDict(opts.disk_state) else: disk_state = {} hv_state = dict(opts.hv_state) op = opcodes.OpNodeAdd(node_name=args[0], secondary_ip=sip, readd=opts.readd, group=opts.nodegroup, vm_capable=opts.vm_capable, ndparams=opts.ndparams, master_capable=opts.master_capable, disk_state=disk_state, hv_state=hv_state) SubmitOpCode(op, opts=opts)
def InitCluster( cluster_name, mac_prefix, # pylint: disable=R0913, R0914 master_netmask, master_netdev, file_storage_dir, shared_file_storage_dir, gluster_storage_dir, candidate_pool_size, ssh_key_type, ssh_key_bits, secondary_ip=None, vg_name=None, beparams=None, nicparams=None, ndparams=None, hvparams=None, diskparams=None, enabled_hypervisors=None, modify_etc_hosts=True, modify_ssh_setup=True, maintain_node_health=False, drbd_helper=None, uid_pool=None, default_iallocator=None, default_iallocator_params=None, primary_ip_version=None, ipolicy=None, prealloc_wipe_disks=False, use_external_mip_script=False, hv_state=None, disk_state=None, enabled_disk_templates=None, install_image=None, zeroing_image=None, compression_tools=None, enabled_user_shutdown=False): """Initialise the cluster. @type candidate_pool_size: int @param candidate_pool_size: master candidate pool size @type enabled_disk_templates: list of string @param enabled_disk_templates: list of disk_templates to be used in this cluster @type enabled_user_shutdown: bool @param enabled_user_shutdown: whether user shutdown is enabled cluster wide """ # TODO: complete the docstring if config.ConfigWriter.IsCluster(): raise errors.OpPrereqError("Cluster is already initialised", errors.ECODE_STATE) data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR) queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR) archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR) for ddir in [queue_dir, data_dir, archive_dir]: if os.path.isdir(ddir): for entry in os.listdir(ddir): if not os.path.isdir(os.path.join(ddir, entry)): raise errors.OpPrereqError( "%s contains non-directory entries like %s. Remove left-overs of an" " old cluster before initialising a new one" % (ddir, entry), errors.ECODE_STATE) if not enabled_hypervisors: raise errors.OpPrereqError( "Enabled hypervisors list must contain at" " least one member", errors.ECODE_INVAL) invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES if invalid_hvs: raise errors.OpPrereqError( "Enabled hypervisors contains invalid" " entries: %s" % invalid_hvs, errors.ECODE_INVAL) _InitCheckEnabledDiskTemplates(enabled_disk_templates) try: ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version) except errors.ProgrammerError: raise errors.OpPrereqError( "Invalid primary ip version: %d." % primary_ip_version, errors.ECODE_INVAL) hostname = netutils.GetHostname(family=ipcls.family) if not ipcls.IsValid(hostname.ip): raise errors.OpPrereqError( "This host's IP (%s) is not a valid IPv%d" " address." % (hostname.ip, primary_ip_version), errors.ECODE_INVAL) if ipcls.IsLoopback(hostname.ip): raise errors.OpPrereqError( "This host's IP (%s) resolves to a loopback" " address. Please fix DNS or %s." % (hostname.ip, pathutils.ETC_HOSTS), errors.ECODE_ENVIRON) if not ipcls.Own(hostname.ip): raise errors.OpPrereqError( "Inconsistency: this host's name resolves" " to %s,\nbut this ip address does not" " belong to this host" % hostname.ip, errors.ECODE_ENVIRON) clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family) if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5): raise errors.OpPrereqError("Cluster IP already active", errors.ECODE_NOTUNIQUE) if not secondary_ip: if primary_ip_version == constants.IP6_VERSION: raise errors.OpPrereqError( "When using a IPv6 primary address, a valid" " IPv4 address must be given as secondary", errors.ECODE_INVAL) secondary_ip = hostname.ip if not netutils.IP4Address.IsValid(secondary_ip): raise errors.OpPrereqError( "Secondary IP address (%s) has to be a valid" " IPv4 address." % secondary_ip, errors.ECODE_INVAL) if not netutils.IP4Address.Own(secondary_ip): raise errors.OpPrereqError( "You gave %s as secondary IP," " but it does not belong to this host." % secondary_ip, errors.ECODE_ENVIRON) if master_netmask is not None: if not ipcls.ValidateNetmask(master_netmask): raise errors.OpPrereqError( "CIDR netmask (%s) not valid for IPv%s " % (master_netmask, primary_ip_version), errors.ECODE_INVAL) else: master_netmask = ipcls.iplen if vg_name: # Check if volume group is valid vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, constants.MIN_VG_SIZE) if vgstatus: raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL) drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates _InitCheckDrbdHelper(drbd_helper, drbd_enabled) logging.debug("Stopping daemons (if any are running)") result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"]) if result.failed: raise errors.OpExecError("Could not stop daemons, command %s" " had exitcode %s and error '%s'" % (result.cmd, result.exit_code, result.output)) file_storage_dir = _PrepareFileStorage(enabled_disk_templates, file_storage_dir) shared_file_storage_dir = _PrepareSharedFileStorage( enabled_disk_templates, shared_file_storage_dir) gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates, gluster_storage_dir) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): raise errors.OpPrereqError( "Invalid mac prefix given '%s'" % mac_prefix, errors.ECODE_INVAL) if not nicparams.get('mode', None) == constants.NIC_MODE_OVS: # Do not do this check if mode=openvswitch, since the openvswitch is not # created yet result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) if result.failed: raise errors.OpPrereqError( "Invalid master netdev given (%s): '%s'" % (master_netdev, result.output.strip()), errors.ECODE_INVAL) dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)] utils.EnsureDirs(dirs) objects.UpgradeBeParams(beparams) utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) objects.NIC.CheckParameterSyntax(nicparams) full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy) _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates) if ndparams is not None: utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES) else: ndparams = dict(constants.NDC_DEFAULTS) # This is ugly, as we modify the dict itself # FIXME: Make utils.ForceDictType pure functional or write a wrapper # around it if hv_state: for hvname, hvs_data in hv_state.items(): utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES) hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data) else: hv_state = dict((hvname, constants.HVST_DEFAULTS) for hvname in enabled_hypervisors) # FIXME: disk_state has no default values yet if disk_state: for storage, ds_data in disk_state.items(): if storage not in constants.DS_VALID_TYPES: raise errors.OpPrereqError( "Invalid storage type in disk state: %s" % storage, errors.ECODE_INVAL) for ds_name, state in ds_data.items(): utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES) ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state) # hvparams is a mapping of hypervisor->hvparams dict for hv_name, hv_params in hvparams.items(): utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) hv_class = hypervisor.GetHypervisor(hv_name) hv_class.CheckParameterSyntax(hv_params) # diskparams is a mapping of disk-template->diskparams dict for template, dt_params in diskparams.items(): param_keys = set(dt_params.keys()) default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys()) if param_keys > default_param_keys: unknown_params = param_keys - default_param_keys raise errors.OpPrereqError( "Invalid parameters for disk template %s:" " %s" % (template, utils.CommaJoin(unknown_params)), errors.ECODE_INVAL) utils.ForceDictType(dt_params, constants.DISK_DT_TYPES) if template == constants.DT_DRBD8 and vg_name is not None: # The default METAVG value is equal to the VG name set at init time, # if provided dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name try: utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS) except errors.OpPrereqError as err: raise errors.OpPrereqError("While verify diskparam options: %s" % err, errors.ECODE_INVAL) # set up ssh config and /etc/hosts rsa_sshkey = "" dsa_sshkey = "" if os.path.isfile(pathutils.SSH_HOST_RSA_PUB): sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB) rsa_sshkey = sshline.split(" ")[1] if os.path.isfile(pathutils.SSH_HOST_DSA_PUB): sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB) dsa_sshkey = sshline.split(" ")[1] if not rsa_sshkey and not dsa_sshkey: raise errors.OpPrereqError("Failed to find SSH public keys", errors.ECODE_ENVIRON) if modify_etc_hosts: utils.AddHostToEtcHosts(hostname.name, hostname.ip) if modify_ssh_setup: ssh.InitSSHSetup(ssh_key_type, ssh_key_bits) if default_iallocator is not None: alloc_script = utils.FindFile(default_iallocator, constants.IALLOCATOR_SEARCH_PATH, os.path.isfile) if alloc_script is None: raise errors.OpPrereqError( "Invalid default iallocator script '%s'" " specified" % default_iallocator, errors.ECODE_INVAL) else: # default to htools if utils.FindFile(constants.IALLOC_HAIL, constants.IALLOCATOR_SEARCH_PATH, os.path.isfile): default_iallocator = constants.IALLOC_HAIL # check if we have all the users we need try: runtime.GetEnts() except errors.ConfigurationError as err: raise errors.OpPrereqError( "Required system user/group missing: %s" % err, errors.ECODE_ENVIRON) candidate_certs = {} now = time.time() if compression_tools is not None: cluster.CheckCompressionTools(compression_tools) initial_dc_config = dict(active=True, interval=int(constants.MOND_TIME_INTERVAL * 1e6)) data_collectors = dict((name, initial_dc_config.copy()) for name in constants.DATA_COLLECTOR_NAMES) # init of cluster config file cluster_config = objects.Cluster( serial_no=1, rsahostkeypub=rsa_sshkey, dsahostkeypub=dsa_sshkey, highest_used_port=(constants.FIRST_DRBD_PORT - 1), mac_prefix=mac_prefix, volume_group_name=vg_name, tcpudp_port_pool=set(), master_ip=clustername.ip, master_netmask=master_netmask, master_netdev=master_netdev, cluster_name=clustername.name, file_storage_dir=file_storage_dir, shared_file_storage_dir=shared_file_storage_dir, gluster_storage_dir=gluster_storage_dir, enabled_hypervisors=enabled_hypervisors, beparams={constants.PP_DEFAULT: beparams}, nicparams={constants.PP_DEFAULT: nicparams}, ndparams=ndparams, hvparams=hvparams, diskparams=diskparams, candidate_pool_size=candidate_pool_size, modify_etc_hosts=modify_etc_hosts, modify_ssh_setup=modify_ssh_setup, uid_pool=uid_pool, ctime=now, mtime=now, maintain_node_health=maintain_node_health, data_collectors=data_collectors, drbd_usermode_helper=drbd_helper, default_iallocator=default_iallocator, default_iallocator_params=default_iallocator_params, primary_ip_family=ipcls.family, prealloc_wipe_disks=prealloc_wipe_disks, use_external_mip_script=use_external_mip_script, ipolicy=full_ipolicy, hv_state_static=hv_state, disk_state_static=disk_state, enabled_disk_templates=enabled_disk_templates, candidate_certs=candidate_certs, osparams={}, osparams_private_cluster={}, install_image=install_image, zeroing_image=zeroing_image, compression_tools=compression_tools, enabled_user_shutdown=enabled_user_shutdown, ssh_key_type=ssh_key_type, ssh_key_bits=ssh_key_bits, ) master_node_config = objects.Node( name=hostname.name, primary_ip=hostname.ip, secondary_ip=secondary_ip, serial_no=1, master_candidate=True, offline=False, drained=False, ctime=now, mtime=now, ) InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config) cfg = config.ConfigWriter(offline=True) ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE) cfg.Update(cfg.GetClusterInfo(), logging.error) ssconf.WriteSsconfFiles(cfg.GetSsconfValues()) master_uuid = cfg.GetMasterNode() if modify_ssh_setup: ssh.InitPubKeyFile(master_uuid, ssh_key_type) # set up the inter-node password and certificate _InitGanetiServerSetup(hostname.name, cfg) logging.debug("Starting daemons") result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"]) if result.failed: raise errors.OpExecError("Could not start daemons, command %s" " had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) _WaitForMasterDaemon()