def testUpgradeEnabledDiskTemplates(self): cfg = objects.ConfigData() cfg.cluster = objects.Cluster() cfg.cluster.volume_group_name = "myvg" instance1 = objects.Instance() instance1.disk_template = constants.DT_DISKLESS instance2 = objects.Instance() instance2.disk_template = constants.DT_RBD cfg.instances = {"myinstance1": instance1, "myinstance2": instance2} nodegroup = objects.NodeGroup() nodegroup.ipolicy = {} nodegroup.ipolicy[constants.IPOLICY_DTS] = [instance1.disk_template, \ constants.DT_BLOCK] cfg.cluster.ipolicy = {} cfg.cluster.ipolicy[constants.IPOLICY_DTS] = \ [constants.DT_EXT, constants.DT_DISKLESS] cfg.nodegroups = {"mynodegroup": nodegroup} cfg._UpgradeEnabledDiskTemplates() expected_disk_templates = [ constants.DT_DRBD8, constants.DT_PLAIN, instance1.disk_template, instance2.disk_template ] self.assertEqual(set(expected_disk_templates), set(cfg.cluster.enabled_disk_templates)) self.assertEqual(set([instance1.disk_template]), set(cfg.cluster.ipolicy[constants.IPOLICY_DTS]))
def test(self): instance = objects.Instance(name="fake.example.com") node = objects.Node(name="fakenode.example.com", ndparams={}) group = objects.NodeGroup(name="default", ndparams={}) cons = hv_fake.FakeHypervisor.GetInstanceConsole( instance, node, group, {}, {}) self.assertTrue(cons.Validate()) self.assertEqual(cons.kind, constants.CONS_MESSAGE)
def testAddGroupDoesNotPreserveFields(self): cfg = self._get_object() group = objects.NodeGroup(name="test", members=[], serial_no=17, ctime=123, mtime=456) cfg.AddNodeGroup(group, "my-job") self.assertEqual(1, group.serial_no) self.assert_(group.ctime > 1200000000) self.assert_(group.mtime > 1200000000)
def testAddGroupPreservesFields(self): cfg = self._get_object() group = objects.NodeGroup( name="test", members=[], alloc_policy=constants.ALLOC_POLICY_LAST_RESORT) cfg.AddNodeGroup(group, "my-job") self.assertEqual(constants.ALLOC_POLICY_LAST_RESORT, group.alloc_policy)
def testAddGroupCanSkipUUIDCheck(self): cfg = self._get_object() uuid = cfg.GenerateUniqueID("my-job") group = objects.NodeGroup(name="test", members=[], uuid=uuid, serial_no=17, ctime=123, mtime=456) self.assertRaises(errors.ConfigurationError, cfg.AddNodeGroup, group, "my-job") cfg.AddNodeGroup(group, "my-job", check_uuid=False) # Does not raise. self.assertEqual(uuid, group.uuid)
def InitConfig(version, cluster_config, master_node_config, cfg_file=pathutils.CLUSTER_CONF_FILE): """Create the initial cluster configuration. It will contain the current node, which will also be the master node, and no instances. @type version: int @param version: configuration version @type cluster_config: L{objects.Cluster} @param cluster_config: cluster configuration @type master_node_config: L{objects.Node} @param master_node_config: master node configuration @type cfg_file: string @param cfg_file: configuration file path """ uuid_generator = config.TemporaryReservationManager() cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID) master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID) cluster_config.master_node = master_node_config.uuid nodes = { master_node_config.uuid: master_node_config, } default_nodegroup = objects.NodeGroup( uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID), name=constants.INITIAL_NODE_GROUP_NAME, members=[master_node_config.uuid], diskparams={}, ) nodegroups = { default_nodegroup.uuid: default_nodegroup, } now = time.time() maintenance = objects.Maintenance(serial_no=1, ctime=now, mtime=now) config_data = objects.ConfigData(version=version, cluster=cluster_config, nodegroups=nodegroups, nodes=nodes, instances={}, networks={}, disks={}, filters={}, maintenance=maintenance, serial_no=1, ctime=now, mtime=now) utils.WriteFile(cfg_file, data=serializer.Dump(config_data.ToDict()), mode=0600)
def test(self): instance = objects.Instance(name="lxc.example.com", primary_node="node199-uuid") node = objects.Node(name="node199", uuid="node199-uuid", ndparams={}) group = objects.NodeGroup(name="group991", ndparams={}) cons = hv_lxc.LXCHypervisor.GetInstanceConsole(instance, node, group, {}, {}) self.assertEqual(cons.Validate(), None) self.assertEqual(cons.kind, constants.CONS_SSH) self.assertEqual(cons.host, node.name) self.assertEqual(cons.command[-1], instance.name)
def test(self): instance = objects.Instance(name="fake.example.com", primary_node="node837-uuid") node = objects.Node(name="node837", uuid="node837-uuid", ndparams={}) group = objects.NodeGroup(name="group164", ndparams={}) cons = hv_chroot.ChrootManager.GetInstanceConsole(instance, node, group, {}, {}, root_dir=self.tmpdir) self.assertEqual(cons.Validate(), None) self.assertEqual(cons.kind, constants.CONS_SSH) self.assertEqual(cons.host, node.name)
def test(self): hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL} for cls in [hv_xen.XenPvmHypervisor(), hv_xen.XenHvmHypervisor()]: instance = objects.Instance(name="xen.example.com", primary_node="node24828-uuid") node = objects.Node(name="node24828", uuid="node24828-uuid", ndparams={}) group = objects.NodeGroup(name="group52341", ndparams={}) cons = cls.GetInstanceConsole(instance, node, group, hvparams, {}) self.assertEqual(cons.Validate(), None) self.assertEqual(cons.kind, constants.CONS_SSH) self.assertEqual(cons.host, node.name) self.assertEqual(cons.command[-1], instance.name)
def testNoConsole(self): instance = objects.Instance(name="kvm.example.com", primary_node="node24325", network_port=0) node = objects.Node(name="node24325", uuid="node24325-uuid", ndparams={}) group = objects.NodeGroup(name="group9184", ndparams={}) hvparams = { constants.HV_SERIAL_CONSOLE: False, constants.HV_VNC_BIND_ADDRESS: None, constants.HV_KVM_SPICE_BIND: None, } cons = self.MakeConsole(instance, node, group, hvparams) self.assertEqual(cons.kind, constants.CONS_MESSAGE)
def testSerial(self): instance = objects.Instance(name="kvm.example.com", primary_node="node6017-uuid") node = objects.Node(name="node6017", uuid="node6017-uuid", ndparams={}) group = objects.NodeGroup(name="group6134", ndparams={}) hvparams = { constants.HV_SERIAL_CONSOLE: True, constants.HV_VNC_BIND_ADDRESS: None, constants.HV_KVM_SPICE_BIND: None, } cons = self._Test(instance, node, group, hvparams) self.assertEqual(cons.kind, constants.CONS_SSH) self.assertEqual(cons.host, node.name) self.assertEqual(cons.command[0], pathutils.KVM_CONSOLE_WRAPPER) self.assertEqual(cons.command[1], constants.SOCAT_PATH)
def testSpice(self): instance = objects.Instance(name="kvm.example.com", primary_node="node7235", network_port=11000) node = objects.Node(name="node7235", uuid="node7235-uuid", ndparams={}) group = objects.NodeGroup(name="group0132", ndparams={}) hvparams = { constants.HV_SERIAL_CONSOLE: False, constants.HV_VNC_BIND_ADDRESS: None, constants.HV_KVM_SPICE_BIND: "192.0.2.1", } cons = self._Test(instance, node, group, hvparams) self.assertEqual(cons.kind, constants.CONS_SPICE) self.assertEqual(cons.host, "192.0.2.1") self.assertEqual(cons.port, 11000)
def testFillNdParamsNodeGroup(self): fake_node = objects.Node(name="test", ndparams={}, group="testgroup") group_ndparams = { constants.ND_OOB_PROGRAM: "/bin/group-oob", constants.ND_SPINDLE_COUNT: 10, constants.ND_EXCLUSIVE_STORAGE: True, constants.ND_OVS: True, constants.ND_OVS_LINK: "eth2", constants.ND_OVS_NAME: "openvswitch", constants.ND_SSH_PORT: 122, } fake_group = objects.NodeGroup(name="testgroup", ndparams=group_ndparams) self.assertEqual(group_ndparams, self.fake_cl.FillND(fake_node, fake_group))
def testVnc(self): instance = objects.Instance(name="kvm.example.com", primary_node="node7235-uuid", network_port=constants.VNC_BASE_PORT + 10) node = objects.Node(name="node7235", uuid="node7235-uuid", ndparams={}) group = objects.NodeGroup(name="group3632", ndparams={}) hvparams = { constants.HV_SERIAL_CONSOLE: False, constants.HV_VNC_BIND_ADDRESS: "192.0.2.1", constants.HV_KVM_SPICE_BIND: None, } cons = self.MakeConsole(instance, node, group, hvparams) self.assertEqual(cons.kind, constants.CONS_VNC) self.assertEqual(cons.host, "192.0.2.1") self.assertEqual(cons.port, constants.VNC_BASE_PORT + 10) self.assertEqual(cons.display, 10)
def testFillNdParamsNode(self): node_ndparams = { constants.ND_OOB_PROGRAM: "/bin/node-oob", constants.ND_SPINDLE_COUNT: 2, constants.ND_EXCLUSIVE_STORAGE: True, constants.ND_OVS: True, constants.ND_OVS_LINK: "eth2", constants.ND_OVS_NAME: "openvswitch", constants.ND_SSH_PORT: 222, constants.ND_CPU_SPEED: 1.1, } fake_node = objects.Node(name="test", ndparams=node_ndparams, group="testgroup") fake_group = objects.NodeGroup(name="testgroup", ndparams={}) self.assertEqual(node_ndparams, self.fake_cl.FillND(fake_node, fake_group))
def Exec(self, feedback_fn): """Add the node group to the cluster. """ group_obj = objects.NodeGroup(name=self.op.group_name, members=[], uuid=self.group_uuid, alloc_policy=self.op.alloc_policy, ndparams=self.op.ndparams, diskparams=self.new_diskparams, ipolicy=self.op.ipolicy, hv_state_static=self.new_hv_state, disk_state_static=self.new_disk_state) self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False) network_name = self.cfg.GetClusterInfo().instance_communication_network if network_name: return self._ConnectInstanceCommunicationNetwork( self.cfg, self.group_uuid, network_name)
def AddNewNodeGroup(self, uuid=None, name=None, ndparams=None, diskparams=None, ipolicy=None, hv_state_static=None, disk_state_static=None, alloc_policy=None, networks=None): """Add a new L{objects.NodeGroup} to the cluster configuration See L{objects.NodeGroup} for parameter documentation. @rtype: L{objects.NodeGroup} @return: the newly added node group """ group_id = self._cur_group_id self._cur_group_id += 1 if uuid is None: uuid = self._GetUuid() if name is None: name = "mock_group_%d" % group_id if networks is None: networks = {} group = objects.NodeGroup(uuid=uuid, name=name, ndparams=ndparams, diskparams=diskparams, ipolicy=ipolicy, hv_state_static=hv_state_static, disk_state_static=disk_state_static, alloc_policy=alloc_policy, networks=networks, members=[]) self._UnlockedAddNodeGroup(group, None, True) return group
def testAssignGroupNodes(self): me = netutils.Hostname() cfg = self._get_object() # Create two groups grp1 = objects.NodeGroup(name="grp1", members=[], uuid="2f2fadf7-2a70-4a23-9ab5-2568c252032c") grp1_serial = 1 cfg.AddNodeGroup(grp1, "job") grp2 = objects.NodeGroup(name="grp2", members=[], uuid="798d0de3-680f-4a0e-b29a-0f54f693b3f1") grp2_serial = 1 cfg.AddNodeGroup(grp2, "job") self.assertEqual( set( map(operator.attrgetter("name"), cfg.GetAllNodeGroupsInfo().values())), set(["grp1", "grp2", constants.INITIAL_NODE_GROUP_NAME])) # No-op cluster_serial = cfg.GetClusterInfo().serial_no cfg.AssignGroupNodes([]) cluster_serial += 1 # Create two nodes node1 = objects.Node(name="node1", group=grp1.uuid, ndparams={}, uuid="node1-uuid") node1_serial = 1 node2 = objects.Node(name="node2", group=grp2.uuid, ndparams={}, uuid="node2-uuid") node2_serial = 1 cfg.AddNode(node1, "job") cfg.AddNode(node2, "job") cluster_serial += 2 self.assertEqual( set(cfg.GetNodeList()), set([ "node1-uuid", "node2-uuid", cfg.GetNodeInfoByName(me.name).uuid ])) (grp1, grp2) = [cfg.GetNodeGroup(grp.uuid) for grp in (grp1, grp2)] def _VerifySerials(): self.assertEqual(cfg.GetClusterInfo().serial_no, cluster_serial) self.assertEqual(node1.serial_no, node1_serial) self.assertEqual(node2.serial_no, node2_serial) self.assertEqual(grp1.serial_no, grp1_serial) self.assertEqual(grp2.serial_no, grp2_serial) _VerifySerials() self.assertEqual(set(grp1.members), set(["node1-uuid"])) self.assertEqual(set(grp2.members), set(["node2-uuid"])) # Check invalid nodes and groups self.assertRaises(errors.ConfigurationError, cfg.AssignGroupNodes, [ ("unknown.node.example.com", grp2.uuid), ]) self.assertRaises(errors.ConfigurationError, cfg.AssignGroupNodes, [ (node1.name, "unknown-uuid"), ]) self.assertEqual(node1.group, grp1.uuid) self.assertEqual(node2.group, grp2.uuid) self.assertEqual(set(grp1.members), set(["node1-uuid"])) self.assertEqual(set(grp2.members), set(["node2-uuid"])) # Another no-op cfg.AssignGroupNodes([]) cluster_serial += 1 _VerifySerials() # Assign to the same group (should be a no-op) self.assertEqual(node2.group, grp2.uuid) cfg.AssignGroupNodes([ (node2.uuid, grp2.uuid), ]) cluster_serial += 1 self.assertEqual(node2.group, grp2.uuid) _VerifySerials() self.assertEqual(set(grp1.members), set(["node1-uuid"])) self.assertEqual(set(grp2.members), set(["node2-uuid"])) # Assign node 2 to group 1 self.assertEqual(node2.group, grp2.uuid) cfg.AssignGroupNodes([ (node2.uuid, grp1.uuid), ]) (grp1, grp2) = [cfg.GetNodeGroup(grp.uuid) for grp in (grp1, grp2)] (node1, node2) = [cfg.GetNodeInfo(node.uuid) for node in (node1, node2)] cluster_serial += 1 node2_serial += 1 grp1_serial += 1 grp2_serial += 1 self.assertEqual(node2.group, grp1.uuid) _VerifySerials() self.assertEqual(set(grp1.members), set(["node1-uuid", "node2-uuid"])) self.assertFalse(grp2.members) # And assign both nodes to group 2 self.assertEqual(node1.group, grp1.uuid) self.assertEqual(node2.group, grp1.uuid) self.assertNotEqual(grp1.uuid, grp2.uuid) cfg.AssignGroupNodes([ (node1.uuid, grp2.uuid), (node2.uuid, grp2.uuid), ]) (grp1, grp2) = [cfg.GetNodeGroup(grp.uuid) for grp in (grp1, grp2)] (node1, node2) = [cfg.GetNodeInfo(node.uuid) for node in (node1, node2)] cluster_serial += 1 node1_serial += 1 node2_serial += 1 grp1_serial += 1 grp2_serial += 1 self.assertEqual(node1.group, grp2.uuid) self.assertEqual(node2.group, grp2.uuid) _VerifySerials() self.assertFalse(grp1.members) self.assertEqual(set(grp2.members), set(["node1-uuid", "node2-uuid"]))
def testAddGroupFillsFieldsIfMissing(self): cfg = self._get_object() group = objects.NodeGroup(name="test", members=[]) cfg.AddNodeGroup(group, "my-job") self.assert_(utils.UUID_RE.match(group.uuid)) self.assertEqual(constants.ALLOC_POLICY_PREFERRED, group.alloc_policy)
def testFillNdParamsCluster(self): fake_node = objects.Node(name="test", ndparams={}, group="testgroup") fake_group = objects.NodeGroup(name="testgroup", ndparams={}) self.assertEqual(self.fake_cl.ndparams, self.fake_cl.FillND(fake_node, fake_group))