Example #1
0
    def setUp(self):
        hvparams = {
            constants.HT_FAKE: {
                "foo": "bar",
                "bar": "foo",
                "foobar": "barfoo",
            },
        }
        os_hvp = {
            "lenny-image": {
                constants.HT_FAKE: {
                    "foo": "baz",
                    "foobar": "foobar",
                    "blah": "blibb",
                    "blubb": "blah",
                },
                constants.HT_XEN_PVM: {
                    "root_path": "/dev/sda5",
                    "foo": "foobar",
                },
            },
            "ubuntu-hardy": {},
        }
        ndparams = {
            constants.ND_OOB_PROGRAM: "/bin/cluster-oob",
            constants.ND_SPINDLE_COUNT: 1,
            constants.ND_EXCLUSIVE_STORAGE: False,
        }

        self.fake_cl = objects.Cluster(hvparams=hvparams,
                                       os_hvp=os_hvp,
                                       ndparams=ndparams)
        self.fake_cl.UpgradeConfig()
    def _init_cluster(self, cfg):
        """Initializes the cfg object"""
        me = netutils.Hostname()
        ip = constants.IP4_ADDRESS_LOCALHOST
        # master_ip must not conflict with the node ip address
        master_ip = "127.0.0.2"

        cluster_config = objects.Cluster(
            serial_no=1,
            rsahostkeypub="",
            dsahostkeypub="",
            highest_used_port=(constants.FIRST_DRBD_PORT - 1),
            mac_prefix="aa:00:00",
            volume_group_name="xenvg",
            drbd_usermode_helper="/bin/true",
            nicparams={constants.PP_DEFAULT: constants.NICC_DEFAULTS},
            ndparams=constants.NDC_DEFAULTS,
            tcpudp_port_pool=set(),
            enabled_hypervisors=[constants.HT_FAKE],
            master_node=me.name,
            master_ip=master_ip,
            master_netdev=constants.DEFAULT_BRIDGE,
            cluster_name="cluster.local",
            file_storage_dir="/tmp",
            uid_pool=[],
        )

        master_node_config = objects.Node(name=me.name,
                                          primary_ip=me.ip,
                                          secondary_ip=ip,
                                          serial_no=1,
                                          master_candidate=True)

        bootstrap.InitConfig(constants.CONFIG_VERSION, cluster_config,
                             master_node_config, self.cfg_file)
Example #3
0
 def testUpgradeEnabledDiskTemplates(self):
     cfg = objects.ConfigData()
     cfg.cluster = objects.Cluster()
     cfg.cluster.volume_group_name = "myvg"
     instance1 = objects.Instance()
     instance1.disk_template = constants.DT_DISKLESS
     instance2 = objects.Instance()
     instance2.disk_template = constants.DT_RBD
     cfg.instances = {"myinstance1": instance1, "myinstance2": instance2}
     nodegroup = objects.NodeGroup()
     nodegroup.ipolicy = {}
     nodegroup.ipolicy[constants.IPOLICY_DTS] = [instance1.disk_template, \
       constants.DT_BLOCK]
     cfg.cluster.ipolicy = {}
     cfg.cluster.ipolicy[constants.IPOLICY_DTS] = \
       [constants.DT_EXT, constants.DT_DISKLESS]
     cfg.nodegroups = {"mynodegroup": nodegroup}
     cfg._UpgradeEnabledDiskTemplates()
     expected_disk_templates = [
         constants.DT_DRBD8, constants.DT_PLAIN, instance1.disk_template,
         instance2.disk_template
     ]
     self.assertEqual(set(expected_disk_templates),
                      set(cfg.cluster.enabled_disk_templates))
     self.assertEqual(set([instance1.disk_template]),
                      set(cfg.cluster.ipolicy[constants.IPOLICY_DTS]))
Example #4
0
    def _CreateConfig(self):
        self._config_data = objects.ConfigData(
            version=constants.CONFIG_VERSION,
            cluster=None,
            nodegroups={},
            nodes={},
            instances={},
            networks={},
            disks={})

        master_node_uuid = self._GetUuid()

        self._cluster = objects.Cluster(
            serial_no=1,
            rsahostkeypub="",
            highest_used_port=(constants.FIRST_DRBD_PORT - 1),
            tcpudp_port_pool=set(),
            mac_prefix="aa:00:00",
            volume_group_name="xenvg",
            reserved_lvs=None,
            drbd_usermode_helper="/bin/true",
            master_node=master_node_uuid,
            master_ip="192.0.2.254",
            master_netdev=constants.DEFAULT_BRIDGE,
            master_netmask=None,
            use_external_mip_script=None,
            cluster_name="cluster.example.com",
            file_storage_dir="/tmp",
            shared_file_storage_dir=None,
            enabled_hypervisors=[
                constants.HT_XEN_HVM, constants.HT_XEN_PVM, constants.HT_KVM
            ],
            hvparams=constants.HVC_DEFAULTS.copy(),
            ipolicy=None,
            os_hvp={self.GetDefaultOs().name: constants.HVC_DEFAULTS.copy()},
            beparams=None,
            osparams=None,
            osparams_private_cluster=None,
            nicparams={constants.PP_DEFAULT: constants.NICC_DEFAULTS},
            ndparams=None,
            diskparams=None,
            candidate_pool_size=3,
            modify_etc_hosts=False,
            modify_ssh_setup=False,
            maintain_node_health=False,
            uid_pool=None,
            default_iallocator="mock_iallocator",
            hidden_os=None,
            blacklisted_os=None,
            primary_ip_family=None,
            prealloc_wipe_disks=None,
            enabled_disk_templates=list(constants.DISK_TEMPLATE_PREFERENCE),
        )
        self._cluster.ctime = self._cluster.mtime = time.time()
        self._cluster.UpgradeConfig()
        self._ConfigData().cluster = self._cluster

        self._default_group = self.AddNewNodeGroup(name="default")
        self._master_node = self.AddNewNode(uuid=master_node_uuid)
  def testUpdateCluster(self):
    """Test updates on the cluster object"""
    cfg = self._get_object()
    # construct a fake cluster object
    fake_cl = objects.Cluster()
    # fail if we didn't read the config
    self.failUnlessRaises(errors.ConfigurationError, cfg.Update, fake_cl, None)

    cl = cfg.GetClusterInfo()
    # first pass, must not fail
    cfg.Update(cl, None)
    # second pass, also must not fail (after the config has been written)
    cfg.Update(cl, None)
    # but the fake_cl update should still fail
    self.failUnlessRaises(errors.ConfigurationError, cfg.Update, fake_cl, None)
 def setUp(self):
     self.fake_cl = objects.Cluster()
     self.fake_cl.UpgradeConfig()
     self.fake_cl.osparams = {
         "os": {
             "A": "ERROR",
             "D": "ERROR",
             "E": "E"
         },
         "os+a": {
             "D": "D"
         }
     }
     self.fake_cl.osparams_private_cluster = {
         "os": {
             "B": "ERROR",
             "F": "ERROR",
             "G": "G"
         },
         "os+a": {
             "F": "F"
         }
     }
 cluster_config = objects.Cluster(
     serial_no=1,
     rsahostkeypub=rsa_sshkey,
     dsahostkeypub=dsa_sshkey,
     highest_used_port=(constants.FIRST_DRBD_PORT - 1),
     mac_prefix=mac_prefix,
     volume_group_name=vg_name,
     tcpudp_port_pool=set(),
     master_ip=clustername.ip,
     master_netmask=master_netmask,
     master_netdev=master_netdev,
     cluster_name=clustername.name,
     file_storage_dir=file_storage_dir,
     shared_file_storage_dir=shared_file_storage_dir,
     gluster_storage_dir=gluster_storage_dir,
     enabled_hypervisors=enabled_hypervisors,
     beparams={constants.PP_DEFAULT: beparams},
     nicparams={constants.PP_DEFAULT: nicparams},
     ndparams=ndparams,
     hvparams=hvparams,
     diskparams=diskparams,
     candidate_pool_size=candidate_pool_size,
     modify_etc_hosts=modify_etc_hosts,
     modify_ssh_setup=modify_ssh_setup,
     uid_pool=uid_pool,
     ctime=now,
     mtime=now,
     maintain_node_health=maintain_node_health,
     data_collectors=data_collectors,
     drbd_usermode_helper=drbd_helper,
     default_iallocator=default_iallocator,
     default_iallocator_params=default_iallocator_params,
     primary_ip_family=ipcls.family,
     prealloc_wipe_disks=prealloc_wipe_disks,
     use_external_mip_script=use_external_mip_script,
     ipolicy=full_ipolicy,
     hv_state_static=hv_state,
     disk_state_static=disk_state,
     enabled_disk_templates=enabled_disk_templates,
     candidate_certs=candidate_certs,
     osparams={},
     osparams_private_cluster={},
     install_image=install_image,
     zeroing_image=zeroing_image,
     compression_tools=compression_tools,
     enabled_user_shutdown=enabled_user_shutdown,
 )
 def __init__(self, be, excl_stor):
   self.cluster = objects.Cluster(beparams={"default": be})
   self.excl_stor = excl_stor
Example #9
0
 def testSerializingEmpty(self):
     self.assertEqual(objects.Cluster().ToDict(), {
         "tcpudp_port_pool": [],
     })
Example #10
0
 def testNewCluster(self):
     self.assertTrue(objects.Cluster().tcpudp_port_pool is None)
Example #11
0
 def testUpgradeConfig(self):
     # FIXME: This test is incomplete
     cluster = objects.Cluster()
     cluster.UpgradeConfig()
     cluster = objects.Cluster(ipolicy={"unknown_key": None})
     self.assertRaises(errors.ConfigurationError, cluster.UpgradeConfig)
Example #12
0
    def testEncodeInstance(self):
        cluster = objects.Cluster(hvparams={
            constants.HT_KVM: {
                constants.HV_CDROM_IMAGE_PATH: "foo",
            },
        },
                                  beparams={
                                      constants.PP_DEFAULT: {
                                          constants.BE_MAXMEM: 8192,
                                      },
                                  },
                                  os_hvp={},
                                  osparams={
                                      "linux": {
                                          "role": "unknown",
                                      },
                                  })
        cluster.UpgradeConfig()

        inst = objects.Instance(name="inst1.example.com",
                                hypervisor=constants.HT_KVM,
                                os="linux",
                                hvparams={
                                    constants.HV_CDROM_IMAGE_PATH: "bar",
                                    constants.HV_ROOT_PATH: "/tmp",
                                },
                                beparams={
                                    constants.BE_MINMEM: 128,
                                    constants.BE_MAXMEM: 256,
                                },
                                nics=[
                                    objects.NIC(nicparams={
                                        constants.NIC_MODE: "mymode",
                                    }),
                                ],
                                disk_template=constants.DT_PLAIN,
                                disks=["disk_uuid_1", "disk_uuid_2"])
        inst.UpgradeConfig()

        cfg = _FakeConfigForRpcRunner(cluster=cluster)
        runner = rpc.RpcRunner(cfg,
                               None,
                               _req_process_fn=NotImplemented,
                               _getents=mocks.FakeGetentResolver)

        def _CheckBasics(result):
            self.assertEqual(result["name"], "inst1.example.com")
            self.assertEqual(result["os"], "linux")
            self.assertEqual(result["beparams"][constants.BE_MINMEM], 128)
            self.assertEqual(len(result["nics"]), 1)
            self.assertEqual(
                result["nics"][0]["nicparams"][constants.NIC_MODE], "mymode")

        # Generic object serialization
        result = runner._encoder(NotImplemented,
                                 (rpc_defs.ED_OBJECT_DICT, inst))
        _CheckBasics(result)
        self.assertEqual(len(result["hvparams"]), 2)

        result = runner._encoder(NotImplemented,
                                 (rpc_defs.ED_OBJECT_DICT_LIST, 5 * [inst]))
        map(_CheckBasics, result)
        map(lambda r: self.assertEqual(len(r["hvparams"]), 2), result)

        # Just an instance
        result = runner._encoder(NotImplemented, (rpc_defs.ED_INST_DICT, inst))
        _CheckBasics(result)
        self.assertEqual(result["beparams"][constants.BE_MAXMEM], 256)
        self.assertEqual(result["hvparams"][constants.HV_CDROM_IMAGE_PATH],
                         "bar")
        self.assertEqual(result["hvparams"][constants.HV_ROOT_PATH], "/tmp")
        self.assertEqual(result["osparams"], {
            "role": "unknown",
        })
        self.assertEqual(len(result["hvparams"]),
                         len(constants.HVC_DEFAULTS[constants.HT_KVM]))

        # Instance with OS parameters
        result = runner._encoder(NotImplemented, (rpc_defs.ED_INST_DICT_OSP_DP,
                                                  (inst, {
                                                      "role": "webserver",
                                                      "other": "field",
                                                  })))
        _CheckBasics(result)
        self.assertEqual(result["beparams"][constants.BE_MAXMEM], 256)
        self.assertEqual(result["hvparams"][constants.HV_CDROM_IMAGE_PATH],
                         "bar")
        self.assertEqual(result["hvparams"][constants.HV_ROOT_PATH], "/tmp")
        self.assertEqual(result["osparams"], {
            "role": "webserver",
            "other": "field",
        })

        # Instance with hypervisor and backend parameters
        result = runner._encoder(NotImplemented,
                                 (rpc_defs.ED_INST_DICT_HVP_BEP_DP,
                                  (inst, {
                                      constants.HT_KVM: {
                                          constants.HV_BOOT_ORDER: "xyz",
                                      },
                                  }, {
                                      constants.BE_VCPUS: 100,
                                      constants.BE_MAXMEM: 4096,
                                  })))
        _CheckBasics(result)
        self.assertEqual(result["beparams"][constants.BE_MAXMEM], 4096)
        self.assertEqual(result["beparams"][constants.BE_VCPUS], 100)
        self.assertEqual(result["hvparams"][constants.HT_KVM], {
            constants.HV_BOOT_ORDER: "xyz",
        })
        del result["disks_info"][0]["ctime"]
        del result["disks_info"][0]["mtime"]
        del result["disks_info"][1]["ctime"]
        del result["disks_info"][1]["mtime"]
        self.assertEqual(
            result["disks_info"],
            [{
                "dev_type": constants.DT_PLAIN,
                "dynamic_params": {},
                "size": 4096,
                "logical_id": ("vg", "disk6120"),
                "params": constants.DISK_DT_DEFAULTS[inst.disk_template],
                "serial_no": 1,
                "uuid": "disk_uuid_1",
            }, {
                "dev_type": constants.DT_PLAIN,
                "dynamic_params": {},
                "size": 1024,
                "logical_id": ("vg", "disk8508"),
                "params": constants.DISK_DT_DEFAULTS[inst.disk_template],
                "serial_no": 1,
                "uuid": "disk_uuid_2",
            }])

        inst_disks = cfg.GetInstanceDisks(inst.uuid)
        self.assertTrue(compat.all(disk.params == {} for disk in inst_disks),
                        msg="Configuration objects were modified")
Example #13
0
def InitCluster(
        cluster_name,
        mac_prefix,  # pylint: disable=R0913, R0914
        master_netmask,
        master_netdev,
        file_storage_dir,
        shared_file_storage_dir,
        gluster_storage_dir,
        candidate_pool_size,
        ssh_key_type,
        ssh_key_bits,
        secondary_ip=None,
        vg_name=None,
        beparams=None,
        nicparams=None,
        ndparams=None,
        hvparams=None,
        diskparams=None,
        enabled_hypervisors=None,
        modify_etc_hosts=True,
        modify_ssh_setup=True,
        maintain_node_health=False,
        drbd_helper=None,
        uid_pool=None,
        default_iallocator=None,
        default_iallocator_params=None,
        primary_ip_version=None,
        ipolicy=None,
        prealloc_wipe_disks=False,
        use_external_mip_script=False,
        hv_state=None,
        disk_state=None,
        enabled_disk_templates=None,
        install_image=None,
        zeroing_image=None,
        compression_tools=None,
        enabled_user_shutdown=False):
    """Initialise the cluster.

  @type candidate_pool_size: int
  @param candidate_pool_size: master candidate pool size

  @type enabled_disk_templates: list of string
  @param enabled_disk_templates: list of disk_templates to be used in this
    cluster

  @type enabled_user_shutdown: bool
  @param enabled_user_shutdown: whether user shutdown is enabled cluster
                                wide

  """
    # TODO: complete the docstring
    if config.ConfigWriter.IsCluster():
        raise errors.OpPrereqError("Cluster is already initialised",
                                   errors.ECODE_STATE)

    data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
    queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
    archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
    for ddir in [queue_dir, data_dir, archive_dir]:
        if os.path.isdir(ddir):
            for entry in os.listdir(ddir):
                if not os.path.isdir(os.path.join(ddir, entry)):
                    raise errors.OpPrereqError(
                        "%s contains non-directory entries like %s. Remove left-overs of an"
                        " old cluster before initialising a new one" %
                        (ddir, entry), errors.ECODE_STATE)

    if not enabled_hypervisors:
        raise errors.OpPrereqError(
            "Enabled hypervisors list must contain at"
            " least one member", errors.ECODE_INVAL)
    invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
    if invalid_hvs:
        raise errors.OpPrereqError(
            "Enabled hypervisors contains invalid"
            " entries: %s" % invalid_hvs, errors.ECODE_INVAL)

    _InitCheckEnabledDiskTemplates(enabled_disk_templates)

    try:
        ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
    except errors.ProgrammerError:
        raise errors.OpPrereqError(
            "Invalid primary ip version: %d." % primary_ip_version,
            errors.ECODE_INVAL)

    hostname = netutils.GetHostname(family=ipcls.family)
    if not ipcls.IsValid(hostname.ip):
        raise errors.OpPrereqError(
            "This host's IP (%s) is not a valid IPv%d"
            " address." % (hostname.ip, primary_ip_version),
            errors.ECODE_INVAL)

    if ipcls.IsLoopback(hostname.ip):
        raise errors.OpPrereqError(
            "This host's IP (%s) resolves to a loopback"
            " address. Please fix DNS or %s." %
            (hostname.ip, pathutils.ETC_HOSTS), errors.ECODE_ENVIRON)

    if not ipcls.Own(hostname.ip):
        raise errors.OpPrereqError(
            "Inconsistency: this host's name resolves"
            " to %s,\nbut this ip address does not"
            " belong to this host" % hostname.ip, errors.ECODE_ENVIRON)

    clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)

    if netutils.TcpPing(clustername.ip,
                        constants.DEFAULT_NODED_PORT,
                        timeout=5):
        raise errors.OpPrereqError("Cluster IP already active",
                                   errors.ECODE_NOTUNIQUE)

    if not secondary_ip:
        if primary_ip_version == constants.IP6_VERSION:
            raise errors.OpPrereqError(
                "When using a IPv6 primary address, a valid"
                " IPv4 address must be given as secondary", errors.ECODE_INVAL)
        secondary_ip = hostname.ip

    if not netutils.IP4Address.IsValid(secondary_ip):
        raise errors.OpPrereqError(
            "Secondary IP address (%s) has to be a valid"
            " IPv4 address." % secondary_ip, errors.ECODE_INVAL)

    if not netutils.IP4Address.Own(secondary_ip):
        raise errors.OpPrereqError(
            "You gave %s as secondary IP,"
            " but it does not belong to this host." % secondary_ip,
            errors.ECODE_ENVIRON)

    if master_netmask is not None:
        if not ipcls.ValidateNetmask(master_netmask):
            raise errors.OpPrereqError(
                "CIDR netmask (%s) not valid for IPv%s " %
                (master_netmask, primary_ip_version), errors.ECODE_INVAL)
    else:
        master_netmask = ipcls.iplen

    if vg_name:
        # Check if volume group is valid
        vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(),
                                              vg_name, constants.MIN_VG_SIZE)
        if vgstatus:
            raise errors.OpPrereqError("Error: %s" % vgstatus,
                                       errors.ECODE_INVAL)

    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
    _InitCheckDrbdHelper(drbd_helper, drbd_enabled)

    logging.debug("Stopping daemons (if any are running)")
    result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
    if result.failed:
        raise errors.OpExecError("Could not stop daemons, command %s"
                                 " had exitcode %s and error '%s'" %
                                 (result.cmd, result.exit_code, result.output))

    file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
                                           file_storage_dir)
    shared_file_storage_dir = _PrepareSharedFileStorage(
        enabled_disk_templates, shared_file_storage_dir)
    gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
                                                 gluster_storage_dir)

    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
        raise errors.OpPrereqError(
            "Invalid mac prefix given '%s'" % mac_prefix, errors.ECODE_INVAL)

    if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
        # Do not do this check if mode=openvswitch, since the openvswitch is not
        # created yet
        result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
        if result.failed:
            raise errors.OpPrereqError(
                "Invalid master netdev given (%s): '%s'" %
                (master_netdev, result.output.strip()), errors.ECODE_INVAL)

    dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
    utils.EnsureDirs(dirs)

    objects.UpgradeBeParams(beparams)
    utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
    utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)

    objects.NIC.CheckParameterSyntax(nicparams)

    full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
    _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy,
                                           enabled_disk_templates)

    if ndparams is not None:
        utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
    else:
        ndparams = dict(constants.NDC_DEFAULTS)

    # This is ugly, as we modify the dict itself
    # FIXME: Make utils.ForceDictType pure functional or write a wrapper
    # around it
    if hv_state:
        for hvname, hvs_data in hv_state.items():
            utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
            hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
    else:
        hv_state = dict((hvname, constants.HVST_DEFAULTS)
                        for hvname in enabled_hypervisors)

    # FIXME: disk_state has no default values yet
    if disk_state:
        for storage, ds_data in disk_state.items():
            if storage not in constants.DS_VALID_TYPES:
                raise errors.OpPrereqError(
                    "Invalid storage type in disk state: %s" % storage,
                    errors.ECODE_INVAL)
            for ds_name, state in ds_data.items():
                utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
                ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)

    # hvparams is a mapping of hypervisor->hvparams dict
    for hv_name, hv_params in hvparams.items():
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
        hv_class = hypervisor.GetHypervisor(hv_name)
        hv_class.CheckParameterSyntax(hv_params)

    # diskparams is a mapping of disk-template->diskparams dict
    for template, dt_params in diskparams.items():
        param_keys = set(dt_params.keys())
        default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
        if param_keys > default_param_keys:
            unknown_params = param_keys - default_param_keys
            raise errors.OpPrereqError(
                "Invalid parameters for disk template %s:"
                " %s" % (template, utils.CommaJoin(unknown_params)),
                errors.ECODE_INVAL)
        utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
        if template == constants.DT_DRBD8 and vg_name is not None:
            # The default METAVG value is equal to the VG name set at init time,
            # if provided
            dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name

    try:
        utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
    except errors.OpPrereqError as err:
        raise errors.OpPrereqError("While verify diskparam options: %s" % err,
                                   errors.ECODE_INVAL)

    # set up ssh config and /etc/hosts
    rsa_sshkey = ""
    dsa_sshkey = ""
    if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
        sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
        rsa_sshkey = sshline.split(" ")[1]
    if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
        sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
        dsa_sshkey = sshline.split(" ")[1]
    if not rsa_sshkey and not dsa_sshkey:
        raise errors.OpPrereqError("Failed to find SSH public keys",
                                   errors.ECODE_ENVIRON)

    if modify_etc_hosts:
        utils.AddHostToEtcHosts(hostname.name, hostname.ip)

    if modify_ssh_setup:
        ssh.InitSSHSetup(ssh_key_type, ssh_key_bits)

    if default_iallocator is not None:
        alloc_script = utils.FindFile(default_iallocator,
                                      constants.IALLOCATOR_SEARCH_PATH,
                                      os.path.isfile)
        if alloc_script is None:
            raise errors.OpPrereqError(
                "Invalid default iallocator script '%s'"
                " specified" % default_iallocator, errors.ECODE_INVAL)
    else:
        # default to htools
        if utils.FindFile(constants.IALLOC_HAIL,
                          constants.IALLOCATOR_SEARCH_PATH, os.path.isfile):
            default_iallocator = constants.IALLOC_HAIL

    # check if we have all the users we need
    try:
        runtime.GetEnts()
    except errors.ConfigurationError as err:
        raise errors.OpPrereqError(
            "Required system user/group missing: %s" % err,
            errors.ECODE_ENVIRON)

    candidate_certs = {}

    now = time.time()

    if compression_tools is not None:
        cluster.CheckCompressionTools(compression_tools)

    initial_dc_config = dict(active=True,
                             interval=int(constants.MOND_TIME_INTERVAL * 1e6))
    data_collectors = dict((name, initial_dc_config.copy())
                           for name in constants.DATA_COLLECTOR_NAMES)

    # init of cluster config file
    cluster_config = objects.Cluster(
        serial_no=1,
        rsahostkeypub=rsa_sshkey,
        dsahostkeypub=dsa_sshkey,
        highest_used_port=(constants.FIRST_DRBD_PORT - 1),
        mac_prefix=mac_prefix,
        volume_group_name=vg_name,
        tcpudp_port_pool=set(),
        master_ip=clustername.ip,
        master_netmask=master_netmask,
        master_netdev=master_netdev,
        cluster_name=clustername.name,
        file_storage_dir=file_storage_dir,
        shared_file_storage_dir=shared_file_storage_dir,
        gluster_storage_dir=gluster_storage_dir,
        enabled_hypervisors=enabled_hypervisors,
        beparams={constants.PP_DEFAULT: beparams},
        nicparams={constants.PP_DEFAULT: nicparams},
        ndparams=ndparams,
        hvparams=hvparams,
        diskparams=diskparams,
        candidate_pool_size=candidate_pool_size,
        modify_etc_hosts=modify_etc_hosts,
        modify_ssh_setup=modify_ssh_setup,
        uid_pool=uid_pool,
        ctime=now,
        mtime=now,
        maintain_node_health=maintain_node_health,
        data_collectors=data_collectors,
        drbd_usermode_helper=drbd_helper,
        default_iallocator=default_iallocator,
        default_iallocator_params=default_iallocator_params,
        primary_ip_family=ipcls.family,
        prealloc_wipe_disks=prealloc_wipe_disks,
        use_external_mip_script=use_external_mip_script,
        ipolicy=full_ipolicy,
        hv_state_static=hv_state,
        disk_state_static=disk_state,
        enabled_disk_templates=enabled_disk_templates,
        candidate_certs=candidate_certs,
        osparams={},
        osparams_private_cluster={},
        install_image=install_image,
        zeroing_image=zeroing_image,
        compression_tools=compression_tools,
        enabled_user_shutdown=enabled_user_shutdown,
        ssh_key_type=ssh_key_type,
        ssh_key_bits=ssh_key_bits,
    )
    master_node_config = objects.Node(
        name=hostname.name,
        primary_ip=hostname.ip,
        secondary_ip=secondary_ip,
        serial_no=1,
        master_candidate=True,
        offline=False,
        drained=False,
        ctime=now,
        mtime=now,
    )
    InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
    cfg = config.ConfigWriter(offline=True)
    ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
    cfg.Update(cfg.GetClusterInfo(), logging.error)
    ssconf.WriteSsconfFiles(cfg.GetSsconfValues())

    master_uuid = cfg.GetMasterNode()
    if modify_ssh_setup:
        ssh.InitPubKeyFile(master_uuid, ssh_key_type)
    # set up the inter-node password and certificate
    _InitGanetiServerSetup(hostname.name, cfg)

    logging.debug("Starting daemons")
    result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
    if result.failed:
        raise errors.OpExecError("Could not start daemons, command %s"
                                 " had exitcode %s and error %s" %
                                 (result.cmd, result.exit_code, result.output))

    _WaitForMasterDaemon()