コード例 #1
0
    def testSimple(self):
        self.maxDiff = None
        node1 = objects.Node(name="node1",
                             primary_ip="192.0.2.1",
                             secondary_ip="192.0.2.2",
                             offline=False,
                             drained=False,
                             master_candidate=True,
                             master_capable=True,
                             group="11112222",
                             vm_capable=False)

        node2 = objects.Node(name="node2",
                             primary_ip="192.0.2.3",
                             secondary_ip="192.0.2.4",
                             offline=True,
                             drained=False,
                             master_candidate=False,
                             master_capable=False,
                             group="11112222",
                             vm_capable=True)

        assert node1 != node2

        ninfo = {
            "#unused-1#": node1,
            "#unused-2#": node2,
        }

        self.assertEqual(
            self.fn(ninfo), {
                "node1": {
                    "tags": [],
                    "primary_ip": "192.0.2.1",
                    "secondary_ip": "192.0.2.2",
                    "offline": False,
                    "drained": False,
                    "master_candidate": True,
                    "group": "11112222",
                    "master_capable": True,
                    "vm_capable": False,
                    "ndparams": None,
                    "hv_state": None,
                },
                "node2": {
                    "tags": [],
                    "primary_ip": "192.0.2.3",
                    "secondary_ip": "192.0.2.4",
                    "offline": True,
                    "drained": False,
                    "master_candidate": False,
                    "group": "11112222",
                    "master_capable": False,
                    "vm_capable": True,
                    "ndparams": None,
                    "hv_state": None,
                },
            })
コード例 #2
0
    def _init_cluster(self, cfg):
        """Initializes the cfg object"""
        me = netutils.Hostname()
        ip = constants.IP4_ADDRESS_LOCALHOST
        # master_ip must not conflict with the node ip address
        master_ip = "127.0.0.2"

        cluster_config = objects.Cluster(
            serial_no=1,
            rsahostkeypub="",
            dsahostkeypub="",
            highest_used_port=(constants.FIRST_DRBD_PORT - 1),
            mac_prefix="aa:00:00",
            volume_group_name="xenvg",
            drbd_usermode_helper="/bin/true",
            nicparams={constants.PP_DEFAULT: constants.NICC_DEFAULTS},
            ndparams=constants.NDC_DEFAULTS,
            tcpudp_port_pool=set(),
            enabled_hypervisors=[constants.HT_FAKE],
            master_node=me.name,
            master_ip=master_ip,
            master_netdev=constants.DEFAULT_BRIDGE,
            cluster_name="cluster.local",
            file_storage_dir="/tmp",
            uid_pool=[],
        )

        master_node_config = objects.Node(name=me.name,
                                          primary_ip=me.ip,
                                          secondary_ip=ip,
                                          serial_no=1,
                                          master_candidate=True)

        bootstrap.InitConfig(constants.CONFIG_VERSION, cluster_config,
                             master_node_config, self.cfg_file)
コード例 #3
0
 def testOfflineNode(self):
   for whitelist in [None, [], set(), ["node1"], ["node2"]]:
     result = self.fn({
       "node1": objects.Node(name="node1", offline=True)
       }, whitelist)
     self.assertEqual(len(result), 1)
     self.assertTrue(result["node1"]["offline"])
コード例 #4
0
    def testMultiSomeOffline(self):
        nodes = dict(("node%s-uuid" % i,
                      objects.Node(name="node%s.example.com" % i,
                                   offline=((i % 3) == 0),
                                   primary_ip="192.0.2.%s" % i,
                                   uuid="node%s-uuid" % i))
                     for i in range(1, 255))

        # Resolve no names
        self.assertEqual(
            rpc._NodeConfigResolver(NotImplemented, lambda: nodes, [], None),
            [])

        # Offline, online and unknown hosts
        self.assertEqual(
            rpc._NodeConfigResolver(NotImplemented, lambda: nodes, [
                "node3-uuid",
                "node92-uuid",
                "node54-uuid",
                "unknown.example.com",
            ], None), [
                ("node3.example.com", rpc._OFFLINE, "node3-uuid"),
                ("node92.example.com", "192.0.2.92", "node92-uuid"),
                ("node54.example.com", rpc._OFFLINE, "node54-uuid"),
                ("unknown.example.com", "unknown.example.com",
                 "unknown.example.com"),
            ])
コード例 #5
0
    def testDiskState(self):
        node = objects.Node(name="node32087.example.com",
                            disk_state={
                                constants.DT_PLAIN: {
                                    "lv32352":
                                    objects.NodeDiskState(total=128),
                                    "lv2082": objects.NodeDiskState(total=512),
                                },
                            })

        node2 = objects.Node.FromDict(node.ToDict())

        # Make sure nothing can reference it anymore
        del node

        self.assertEqual(node2.name, "node32087.example.com")
        self.assertEqual(frozenset(node2.disk_state),
                         frozenset([
                             constants.DT_PLAIN,
                         ]))
        self.assertEqual(frozenset(node2.disk_state[constants.DT_PLAIN]),
                         frozenset(["lv32352", "lv2082"]))
        self.assertEqual(node2.disk_state[constants.DT_PLAIN]["lv2082"].total,
                         512)
        self.assertEqual(node2.disk_state[constants.DT_PLAIN]["lv32352"].total,
                         128)
コード例 #6
0
 def test(self):
     instance = objects.Instance(name="fake.example.com")
     node = objects.Node(name="fakenode.example.com", ndparams={})
     group = objects.NodeGroup(name="default", ndparams={})
     cons = hv_fake.FakeHypervisor.GetInstanceConsole(
         instance, node, group, {}, {})
     self.assertTrue(cons.Validate())
     self.assertEqual(cons.kind, constants.CONS_MESSAGE)
コード例 #7
0
 def testFilterEsNdp(self):
     node1 = objects.Node(name="node11673.example.com",
                          ndparams={
                              constants.ND_EXCLUSIVE_STORAGE: True,
                          })
     node2 = objects.Node(name="node11674.example.com",
                          ndparams={
                              constants.ND_SPINDLE_COUNT: 3,
                              constants.ND_EXCLUSIVE_STORAGE: False,
                          })
     self.assertTrue(constants.ND_EXCLUSIVE_STORAGE in node1.ndparams)
     node1.UpgradeConfig()
     self.assertFalse(constants.ND_EXCLUSIVE_STORAGE in node1.ndparams)
     self.assertTrue(constants.ND_EXCLUSIVE_STORAGE in node2.ndparams)
     self.assertTrue(constants.ND_SPINDLE_COUNT in node2.ndparams)
     node2.UpgradeConfig()
     self.assertFalse(constants.ND_EXCLUSIVE_STORAGE in node2.ndparams)
     self.assertTrue(constants.ND_SPINDLE_COUNT in node2.ndparams)
コード例 #8
0
 def test(self):
   instance = objects.Instance(name="fake.example.com",
                               primary_node="node837-uuid")
   node = objects.Node(name="node837", uuid="node837-uuid", ndparams={})
   group = objects.NodeGroup(name="group164", ndparams={})
   cons = hv_chroot.ChrootManager.GetInstanceConsole(instance, node, group,
                                                     {}, {},
                                                     root_dir=self.tmpdir)
   self.assertEqual(cons.Validate(), None)
   self.assertEqual(cons.kind, constants.CONS_SSH)
   self.assertEqual(cons.host, node.name)
コード例 #9
0
 def test(self):
     instance = objects.Instance(name="lxc.example.com",
                                 primary_node="node199-uuid")
     node = objects.Node(name="node199", uuid="node199-uuid", ndparams={})
     group = objects.NodeGroup(name="group991", ndparams={})
     cons = hv_lxc.LXCHypervisor.GetInstanceConsole(instance, node, group,
                                                    {}, {})
     self.assertEqual(cons.Validate(), None)
     self.assertEqual(cons.kind, constants.CONS_SSH)
     self.assertEqual(cons.host, node.name)
     self.assertEqual(cons.command[-1], instance.name)
コード例 #10
0
  def testWhitelist(self):
    for whitelist in [None, [], set(), ["node1"], ["node2"]]:
      result = self.fn({
        "node1": objects.Node(name="node1", offline=False)
        }, whitelist)
      self.assertEqual(len(result), 1)

      if whitelist is None or "node1" in whitelist:
        self.assertFalse(result["node1"]["offline"])
      else:
        self.assertTrue(result["node1"]["offline"])
コード例 #11
0
 def test(self):
   hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL}
   for cls in [hv_xen.XenPvmHypervisor(), hv_xen.XenHvmHypervisor()]:
     instance = objects.Instance(name="xen.example.com",
                                 primary_node="node24828-uuid")
     node = objects.Node(name="node24828", uuid="node24828-uuid",
                         ndparams={})
     group = objects.NodeGroup(name="group52341", ndparams={})
     cons = cls.GetInstanceConsole(instance, node, group, hvparams, {})
     self.assertEqual(cons.Validate(), None)
     self.assertEqual(cons.kind, constants.CONS_SSH)
     self.assertEqual(cons.host, node.name)
     self.assertEqual(cons.command[-1], instance.name)
コード例 #12
0
 def testNoConsole(self):
   instance = objects.Instance(name="kvm.example.com",
                               primary_node="node24325",
                               network_port=0)
   node = objects.Node(name="node24325", uuid="node24325-uuid",
                       ndparams={})
   group = objects.NodeGroup(name="group9184", ndparams={})
   hvparams = {
     constants.HV_SERIAL_CONSOLE: False,
     constants.HV_VNC_BIND_ADDRESS: None,
     constants.HV_KVM_SPICE_BIND: None,
     }
   cons = self.MakeConsole(instance, node, group, hvparams)
   self.assertEqual(cons.kind, constants.CONS_MESSAGE)
コード例 #13
0
 def testSerial(self):
     instance = objects.Instance(name="kvm.example.com",
                                 primary_node="node6017-uuid")
     node = objects.Node(name="node6017", uuid="node6017-uuid", ndparams={})
     group = objects.NodeGroup(name="group6134", ndparams={})
     hvparams = {
         constants.HV_SERIAL_CONSOLE: True,
         constants.HV_VNC_BIND_ADDRESS: None,
         constants.HV_KVM_SPICE_BIND: None,
     }
     cons = self._Test(instance, node, group, hvparams)
     self.assertEqual(cons.kind, constants.CONS_SSH)
     self.assertEqual(cons.host, node.name)
     self.assertEqual(cons.command[0], pathutils.KVM_CONSOLE_WRAPPER)
     self.assertEqual(cons.command[1], constants.SOCAT_PATH)
コード例 #14
0
 def testFillNdParamsNodeGroup(self):
     fake_node = objects.Node(name="test", ndparams={}, group="testgroup")
     group_ndparams = {
         constants.ND_OOB_PROGRAM: "/bin/group-oob",
         constants.ND_SPINDLE_COUNT: 10,
         constants.ND_EXCLUSIVE_STORAGE: True,
         constants.ND_OVS: True,
         constants.ND_OVS_LINK: "eth2",
         constants.ND_OVS_NAME: "openvswitch",
         constants.ND_SSH_PORT: 122,
     }
     fake_group = objects.NodeGroup(name="testgroup",
                                    ndparams=group_ndparams)
     self.assertEqual(group_ndparams,
                      self.fake_cl.FillND(fake_node, fake_group))
コード例 #15
0
 def testSpice(self):
     instance = objects.Instance(name="kvm.example.com",
                                 primary_node="node7235",
                                 network_port=11000)
     node = objects.Node(name="node7235", uuid="node7235-uuid", ndparams={})
     group = objects.NodeGroup(name="group0132", ndparams={})
     hvparams = {
         constants.HV_SERIAL_CONSOLE: False,
         constants.HV_VNC_BIND_ADDRESS: None,
         constants.HV_KVM_SPICE_BIND: "192.0.2.1",
     }
     cons = self._Test(instance, node, group, hvparams)
     self.assertEqual(cons.kind, constants.CONS_SPICE)
     self.assertEqual(cons.host, "192.0.2.1")
     self.assertEqual(cons.port, 11000)
コード例 #16
0
 def testVnc(self):
     instance = objects.Instance(name="kvm.example.com",
                                 primary_node="node7235-uuid",
                                 network_port=constants.VNC_BASE_PORT + 10)
     node = objects.Node(name="node7235", uuid="node7235-uuid", ndparams={})
     group = objects.NodeGroup(name="group3632", ndparams={})
     hvparams = {
         constants.HV_SERIAL_CONSOLE: False,
         constants.HV_VNC_BIND_ADDRESS: "192.0.2.1",
         constants.HV_KVM_SPICE_BIND: None,
     }
     cons = self.MakeConsole(instance, node, group, hvparams)
     self.assertEqual(cons.kind, constants.CONS_VNC)
     self.assertEqual(cons.host, "192.0.2.1")
     self.assertEqual(cons.port, constants.VNC_BASE_PORT + 10)
     self.assertEqual(cons.display, 10)
コード例 #17
0
 def testFillNdParamsNode(self):
     node_ndparams = {
         constants.ND_OOB_PROGRAM: "/bin/node-oob",
         constants.ND_SPINDLE_COUNT: 2,
         constants.ND_EXCLUSIVE_STORAGE: True,
         constants.ND_OVS: True,
         constants.ND_OVS_LINK: "eth2",
         constants.ND_OVS_NAME: "openvswitch",
         constants.ND_SSH_PORT: 222,
         constants.ND_CPU_SPEED: 1.1,
     }
     fake_node = objects.Node(name="test",
                              ndparams=node_ndparams,
                              group="testgroup")
     fake_group = objects.NodeGroup(name="testgroup", ndparams={})
     self.assertEqual(node_ndparams,
                      self.fake_cl.FillND(fake_node, fake_group))
コード例 #18
0
    def testUpdateNode(self):
        """Test updates on one node object"""
        cfg = self._get_object()
        # construct a fake node
        fake_node = objects.Node()
        # fail if we didn't read the config
        self.failUnlessRaises(errors.ConfigurationError, cfg.Update, fake_node,
                              None)

        node = cfg.GetNodeInfo(cfg.GetNodeList()[0])
        # first pass, must not fail
        cfg.Update(node, None)
        # second pass, also must not fail (after the config has been written)
        cfg.Update(node, None)
        # but the fake_node update should still fail
        self.failUnlessRaises(errors.ConfigurationError, cfg.Update, fake_node,
                              None)
コード例 #19
0
    def testInstNodesDrbdDisks(self):
        # construct a second node
        cfg = self._get_object_mock()
        node_group = cfg.LookupNodeGroup(None)
        master_uuid = cfg.GetMasterNode()
        node2 = objects.Node(name="node2.example.com",
                             group=node_group,
                             ndparams={},
                             uuid="node2-uuid")
        cfg.AddNode(node2, "my-job")

        # construct instance
        inst = self._create_instance(cfg)
        disks = [
            objects.Disk(dev_type=constants.DT_DRBD8,
                         size=786432,
                         logical_id=(master_uuid, node2.uuid, 12300, 0, 0,
                                     "secret"),
                         children=[
                             objects.Disk(dev_type=constants.DT_PLAIN,
                                          size=786432,
                                          logical_id=("myxenvg", "disk0"),
                                          uuid="data0"),
                             objects.Disk(dev_type=constants.DT_PLAIN,
                                          size=128,
                                          logical_id=("myxenvg", "meta0"),
                                          uuid="meta0")
                         ],
                         iv_name="disk/0",
                         uuid="disk0")
        ]
        cfg.AddInstance(inst, "my-job")
        for disk in disks:
            cfg.AddInstanceDisk(inst.uuid, disk)

        # Drbd Disks
        all_nodes = cfg.GetInstanceNodes(inst.uuid)
        secondary_nodes = cfg.GetInstanceSecondaryNodes(inst.uuid)
        self._GenericNodesCheck(inst, all_nodes, secondary_nodes)
        self.assertEqual(set(secondary_nodes), set([node2.uuid]))
        self.assertEqual(set(all_nodes), set([inst.primary_node, node2.uuid]))
        self.assertEqual(
            cfg.GetInstanceLVsByNode(inst.uuid), {
                master_uuid: ["myxenvg/disk0", "myxenvg/meta0"],
                node2.uuid: ["myxenvg/disk0", "myxenvg/meta0"],
            })
コード例 #20
0
ファイル: node_unittest.py プロジェクト: yiannist/ganeti
    def setUp(self):
        super(TestLUNodeAdd, self).setUp()

        # One node for testing readding:
        self.node_readd = self.cfg.AddNewNode()
        self.op_readd = opcodes.OpNodeAdd(
            node_name=self.node_readd.name,
            readd=True,
            primary_ip=self.node_readd.primary_ip,
            secondary_ip=self.node_readd.secondary_ip)

        # One node for testing adding:
        # don't add to configuration now!
        self.node_add = objects.Node(name="node_add",
                                     primary_ip="192.0.2.200",
                                     secondary_ip="203.0.113.200")

        self.op_add = opcodes.OpNodeAdd(
            node_name=self.node_add.name,
            primary_ip=self.node_add.primary_ip,
            secondary_ip=self.node_add.secondary_ip)

        self.netutils_mod.TcpPing.return_value = True

        self.mocked_dns_rpc = self.rpc_mod.DnsOnlyRunner.return_value

        self.mocked_dns_rpc.call_version.return_value = \
          self.RpcResultsBuilder(use_node_names=True) \
            .AddSuccessfulNode(self.node_add, constants.CONFIG_VERSION) \
            .AddSuccessfulNode(self.node_readd, constants.CONFIG_VERSION) \
            .Build()

        node_verify_result = \
          self.RpcResultsBuilder() \
            .CreateSuccessfulNodeResult(self.node_add, {constants.NV_NODELIST: []})
        # we can't know the node's UUID in advance, so use defaultdict here
        self.rpc.call_node_verify.return_value = \
          defaultdict(lambda: node_verify_result, {})
        self.rpc.call_node_crypto_tokens.return_value = \
          self.RpcResultsBuilder() \
            .CreateSuccessfulNodeResult(self.node_add,
                [(constants.CRYPTO_TYPE_SSL_DIGEST, "IA:MA:FA:KE:DI:GE:ST")])
コード例 #21
0
    def testHvState(self):
        node = objects.Node(name="node18157.example.com",
                            hv_state={
                                constants.HT_XEN_HVM:
                                objects.NodeHvState(cpu_total=64),
                                constants.HT_KVM:
                                objects.NodeHvState(cpu_node=1),
                            })

        node2 = objects.Node.FromDict(node.ToDict())

        # Make sure nothing can reference it anymore
        del node

        self.assertEqual(node2.name, "node18157.example.com")
        self.assertEqual(frozenset(node2.hv_state),
                         frozenset([
                             constants.HT_XEN_HVM,
                             constants.HT_KVM,
                         ]))
        self.assertEqual(node2.hv_state[constants.HT_KVM].cpu_node, 1)
        self.assertEqual(node2.hv_state[constants.HT_XEN_HVM].cpu_total, 64)
コード例 #22
0
    def testAssignGroupNodes(self):
        me = netutils.Hostname()
        cfg = self._get_object()

        # Create two groups
        grp1 = objects.NodeGroup(name="grp1",
                                 members=[],
                                 uuid="2f2fadf7-2a70-4a23-9ab5-2568c252032c")
        grp1_serial = 1
        cfg.AddNodeGroup(grp1, "job")

        grp2 = objects.NodeGroup(name="grp2",
                                 members=[],
                                 uuid="798d0de3-680f-4a0e-b29a-0f54f693b3f1")
        grp2_serial = 1
        cfg.AddNodeGroup(grp2, "job")
        self.assertEqual(
            set(
                map(operator.attrgetter("name"),
                    cfg.GetAllNodeGroupsInfo().values())),
            set(["grp1", "grp2", constants.INITIAL_NODE_GROUP_NAME]))

        # No-op
        cluster_serial = cfg.GetClusterInfo().serial_no
        cfg.AssignGroupNodes([])
        cluster_serial += 1

        # Create two nodes
        node1 = objects.Node(name="node1",
                             group=grp1.uuid,
                             ndparams={},
                             uuid="node1-uuid")
        node1_serial = 1
        node2 = objects.Node(name="node2",
                             group=grp2.uuid,
                             ndparams={},
                             uuid="node2-uuid")
        node2_serial = 1
        cfg.AddNode(node1, "job")
        cfg.AddNode(node2, "job")
        cluster_serial += 2
        self.assertEqual(
            set(cfg.GetNodeList()),
            set([
                "node1-uuid", "node2-uuid",
                cfg.GetNodeInfoByName(me.name).uuid
            ]))

        (grp1, grp2) = [cfg.GetNodeGroup(grp.uuid) for grp in (grp1, grp2)]

        def _VerifySerials():
            self.assertEqual(cfg.GetClusterInfo().serial_no, cluster_serial)
            self.assertEqual(node1.serial_no, node1_serial)
            self.assertEqual(node2.serial_no, node2_serial)
            self.assertEqual(grp1.serial_no, grp1_serial)
            self.assertEqual(grp2.serial_no, grp2_serial)

        _VerifySerials()

        self.assertEqual(set(grp1.members), set(["node1-uuid"]))
        self.assertEqual(set(grp2.members), set(["node2-uuid"]))

        # Check invalid nodes and groups
        self.assertRaises(errors.ConfigurationError, cfg.AssignGroupNodes, [
            ("unknown.node.example.com", grp2.uuid),
        ])
        self.assertRaises(errors.ConfigurationError, cfg.AssignGroupNodes, [
            (node1.name, "unknown-uuid"),
        ])

        self.assertEqual(node1.group, grp1.uuid)
        self.assertEqual(node2.group, grp2.uuid)
        self.assertEqual(set(grp1.members), set(["node1-uuid"]))
        self.assertEqual(set(grp2.members), set(["node2-uuid"]))

        # Another no-op
        cfg.AssignGroupNodes([])
        cluster_serial += 1
        _VerifySerials()

        # Assign to the same group (should be a no-op)
        self.assertEqual(node2.group, grp2.uuid)
        cfg.AssignGroupNodes([
            (node2.uuid, grp2.uuid),
        ])
        cluster_serial += 1
        self.assertEqual(node2.group, grp2.uuid)
        _VerifySerials()
        self.assertEqual(set(grp1.members), set(["node1-uuid"]))
        self.assertEqual(set(grp2.members), set(["node2-uuid"]))

        # Assign node 2 to group 1
        self.assertEqual(node2.group, grp2.uuid)
        cfg.AssignGroupNodes([
            (node2.uuid, grp1.uuid),
        ])
        (grp1, grp2) = [cfg.GetNodeGroup(grp.uuid) for grp in (grp1, grp2)]
        (node1,
         node2) = [cfg.GetNodeInfo(node.uuid) for node in (node1, node2)]
        cluster_serial += 1
        node2_serial += 1
        grp1_serial += 1
        grp2_serial += 1
        self.assertEqual(node2.group, grp1.uuid)
        _VerifySerials()
        self.assertEqual(set(grp1.members), set(["node1-uuid", "node2-uuid"]))
        self.assertFalse(grp2.members)

        # And assign both nodes to group 2
        self.assertEqual(node1.group, grp1.uuid)
        self.assertEqual(node2.group, grp1.uuid)
        self.assertNotEqual(grp1.uuid, grp2.uuid)
        cfg.AssignGroupNodes([
            (node1.uuid, grp2.uuid),
            (node2.uuid, grp2.uuid),
        ])
        (grp1, grp2) = [cfg.GetNodeGroup(grp.uuid) for grp in (grp1, grp2)]
        (node1,
         node2) = [cfg.GetNodeInfo(node.uuid) for node in (node1, node2)]
        cluster_serial += 1
        node1_serial += 1
        node2_serial += 1
        grp1_serial += 1
        grp2_serial += 1
        self.assertEqual(node1.group, grp2.uuid)
        self.assertEqual(node2.group, grp2.uuid)
        _VerifySerials()
        self.assertFalse(grp1.members)
        self.assertEqual(set(grp2.members), set(["node1-uuid", "node2-uuid"]))
コード例 #23
0
ファイル: bootstrap.py プロジェクト: vali-um/ganeti
def InitCluster(
        cluster_name,
        mac_prefix,  # pylint: disable=R0913, R0914
        master_netmask,
        master_netdev,
        file_storage_dir,
        shared_file_storage_dir,
        gluster_storage_dir,
        candidate_pool_size,
        ssh_key_type,
        ssh_key_bits,
        secondary_ip=None,
        vg_name=None,
        beparams=None,
        nicparams=None,
        ndparams=None,
        hvparams=None,
        diskparams=None,
        enabled_hypervisors=None,
        modify_etc_hosts=True,
        modify_ssh_setup=True,
        maintain_node_health=False,
        drbd_helper=None,
        uid_pool=None,
        default_iallocator=None,
        default_iallocator_params=None,
        primary_ip_version=None,
        ipolicy=None,
        prealloc_wipe_disks=False,
        use_external_mip_script=False,
        hv_state=None,
        disk_state=None,
        enabled_disk_templates=None,
        install_image=None,
        zeroing_image=None,
        compression_tools=None,
        enabled_user_shutdown=False):
    """Initialise the cluster.

  @type candidate_pool_size: int
  @param candidate_pool_size: master candidate pool size

  @type enabled_disk_templates: list of string
  @param enabled_disk_templates: list of disk_templates to be used in this
    cluster

  @type enabled_user_shutdown: bool
  @param enabled_user_shutdown: whether user shutdown is enabled cluster
                                wide

  """
    # TODO: complete the docstring
    if config.ConfigWriter.IsCluster():
        raise errors.OpPrereqError("Cluster is already initialised",
                                   errors.ECODE_STATE)

    data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
    queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
    archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
    for ddir in [queue_dir, data_dir, archive_dir]:
        if os.path.isdir(ddir):
            for entry in os.listdir(ddir):
                if not os.path.isdir(os.path.join(ddir, entry)):
                    raise errors.OpPrereqError(
                        "%s contains non-directory entries like %s. Remove left-overs of an"
                        " old cluster before initialising a new one" %
                        (ddir, entry), errors.ECODE_STATE)

    if not enabled_hypervisors:
        raise errors.OpPrereqError(
            "Enabled hypervisors list must contain at"
            " least one member", errors.ECODE_INVAL)
    invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
    if invalid_hvs:
        raise errors.OpPrereqError(
            "Enabled hypervisors contains invalid"
            " entries: %s" % invalid_hvs, errors.ECODE_INVAL)

    _InitCheckEnabledDiskTemplates(enabled_disk_templates)

    try:
        ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
    except errors.ProgrammerError:
        raise errors.OpPrereqError(
            "Invalid primary ip version: %d." % primary_ip_version,
            errors.ECODE_INVAL)

    hostname = netutils.GetHostname(family=ipcls.family)
    if not ipcls.IsValid(hostname.ip):
        raise errors.OpPrereqError(
            "This host's IP (%s) is not a valid IPv%d"
            " address." % (hostname.ip, primary_ip_version),
            errors.ECODE_INVAL)

    if ipcls.IsLoopback(hostname.ip):
        raise errors.OpPrereqError(
            "This host's IP (%s) resolves to a loopback"
            " address. Please fix DNS or %s." %
            (hostname.ip, pathutils.ETC_HOSTS), errors.ECODE_ENVIRON)

    if not ipcls.Own(hostname.ip):
        raise errors.OpPrereqError(
            "Inconsistency: this host's name resolves"
            " to %s,\nbut this ip address does not"
            " belong to this host" % hostname.ip, errors.ECODE_ENVIRON)

    clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)

    if netutils.TcpPing(clustername.ip,
                        constants.DEFAULT_NODED_PORT,
                        timeout=5):
        raise errors.OpPrereqError("Cluster IP already active",
                                   errors.ECODE_NOTUNIQUE)

    if not secondary_ip:
        if primary_ip_version == constants.IP6_VERSION:
            raise errors.OpPrereqError(
                "When using a IPv6 primary address, a valid"
                " IPv4 address must be given as secondary", errors.ECODE_INVAL)
        secondary_ip = hostname.ip

    if not netutils.IP4Address.IsValid(secondary_ip):
        raise errors.OpPrereqError(
            "Secondary IP address (%s) has to be a valid"
            " IPv4 address." % secondary_ip, errors.ECODE_INVAL)

    if not netutils.IP4Address.Own(secondary_ip):
        raise errors.OpPrereqError(
            "You gave %s as secondary IP,"
            " but it does not belong to this host." % secondary_ip,
            errors.ECODE_ENVIRON)

    if master_netmask is not None:
        if not ipcls.ValidateNetmask(master_netmask):
            raise errors.OpPrereqError(
                "CIDR netmask (%s) not valid for IPv%s " %
                (master_netmask, primary_ip_version), errors.ECODE_INVAL)
    else:
        master_netmask = ipcls.iplen

    if vg_name:
        # Check if volume group is valid
        vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(),
                                              vg_name, constants.MIN_VG_SIZE)
        if vgstatus:
            raise errors.OpPrereqError("Error: %s" % vgstatus,
                                       errors.ECODE_INVAL)

    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
    _InitCheckDrbdHelper(drbd_helper, drbd_enabled)

    logging.debug("Stopping daemons (if any are running)")
    result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
    if result.failed:
        raise errors.OpExecError("Could not stop daemons, command %s"
                                 " had exitcode %s and error '%s'" %
                                 (result.cmd, result.exit_code, result.output))

    file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
                                           file_storage_dir)
    shared_file_storage_dir = _PrepareSharedFileStorage(
        enabled_disk_templates, shared_file_storage_dir)
    gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
                                                 gluster_storage_dir)

    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
        raise errors.OpPrereqError(
            "Invalid mac prefix given '%s'" % mac_prefix, errors.ECODE_INVAL)

    if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
        # Do not do this check if mode=openvswitch, since the openvswitch is not
        # created yet
        result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
        if result.failed:
            raise errors.OpPrereqError(
                "Invalid master netdev given (%s): '%s'" %
                (master_netdev, result.output.strip()), errors.ECODE_INVAL)

    dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
    utils.EnsureDirs(dirs)

    objects.UpgradeBeParams(beparams)
    utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
    utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)

    objects.NIC.CheckParameterSyntax(nicparams)

    full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
    _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy,
                                           enabled_disk_templates)

    if ndparams is not None:
        utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
    else:
        ndparams = dict(constants.NDC_DEFAULTS)

    # This is ugly, as we modify the dict itself
    # FIXME: Make utils.ForceDictType pure functional or write a wrapper
    # around it
    if hv_state:
        for hvname, hvs_data in hv_state.items():
            utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
            hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
    else:
        hv_state = dict((hvname, constants.HVST_DEFAULTS)
                        for hvname in enabled_hypervisors)

    # FIXME: disk_state has no default values yet
    if disk_state:
        for storage, ds_data in disk_state.items():
            if storage not in constants.DS_VALID_TYPES:
                raise errors.OpPrereqError(
                    "Invalid storage type in disk state: %s" % storage,
                    errors.ECODE_INVAL)
            for ds_name, state in ds_data.items():
                utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
                ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)

    # hvparams is a mapping of hypervisor->hvparams dict
    for hv_name, hv_params in hvparams.items():
        utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
        hv_class = hypervisor.GetHypervisor(hv_name)
        hv_class.CheckParameterSyntax(hv_params)

    # diskparams is a mapping of disk-template->diskparams dict
    for template, dt_params in diskparams.items():
        param_keys = set(dt_params.keys())
        default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
        if param_keys > default_param_keys:
            unknown_params = param_keys - default_param_keys
            raise errors.OpPrereqError(
                "Invalid parameters for disk template %s:"
                " %s" % (template, utils.CommaJoin(unknown_params)),
                errors.ECODE_INVAL)
        utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
        if template == constants.DT_DRBD8 and vg_name is not None:
            # The default METAVG value is equal to the VG name set at init time,
            # if provided
            dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name

    try:
        utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
    except errors.OpPrereqError as err:
        raise errors.OpPrereqError("While verify diskparam options: %s" % err,
                                   errors.ECODE_INVAL)

    # set up ssh config and /etc/hosts
    rsa_sshkey = ""
    dsa_sshkey = ""
    if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
        sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
        rsa_sshkey = sshline.split(" ")[1]
    if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
        sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
        dsa_sshkey = sshline.split(" ")[1]
    if not rsa_sshkey and not dsa_sshkey:
        raise errors.OpPrereqError("Failed to find SSH public keys",
                                   errors.ECODE_ENVIRON)

    if modify_etc_hosts:
        utils.AddHostToEtcHosts(hostname.name, hostname.ip)

    if modify_ssh_setup:
        ssh.InitSSHSetup(ssh_key_type, ssh_key_bits)

    if default_iallocator is not None:
        alloc_script = utils.FindFile(default_iallocator,
                                      constants.IALLOCATOR_SEARCH_PATH,
                                      os.path.isfile)
        if alloc_script is None:
            raise errors.OpPrereqError(
                "Invalid default iallocator script '%s'"
                " specified" % default_iallocator, errors.ECODE_INVAL)
    else:
        # default to htools
        if utils.FindFile(constants.IALLOC_HAIL,
                          constants.IALLOCATOR_SEARCH_PATH, os.path.isfile):
            default_iallocator = constants.IALLOC_HAIL

    # check if we have all the users we need
    try:
        runtime.GetEnts()
    except errors.ConfigurationError as err:
        raise errors.OpPrereqError(
            "Required system user/group missing: %s" % err,
            errors.ECODE_ENVIRON)

    candidate_certs = {}

    now = time.time()

    if compression_tools is not None:
        cluster.CheckCompressionTools(compression_tools)

    initial_dc_config = dict(active=True,
                             interval=int(constants.MOND_TIME_INTERVAL * 1e6))
    data_collectors = dict((name, initial_dc_config.copy())
                           for name in constants.DATA_COLLECTOR_NAMES)

    # init of cluster config file
    cluster_config = objects.Cluster(
        serial_no=1,
        rsahostkeypub=rsa_sshkey,
        dsahostkeypub=dsa_sshkey,
        highest_used_port=(constants.FIRST_DRBD_PORT - 1),
        mac_prefix=mac_prefix,
        volume_group_name=vg_name,
        tcpudp_port_pool=set(),
        master_ip=clustername.ip,
        master_netmask=master_netmask,
        master_netdev=master_netdev,
        cluster_name=clustername.name,
        file_storage_dir=file_storage_dir,
        shared_file_storage_dir=shared_file_storage_dir,
        gluster_storage_dir=gluster_storage_dir,
        enabled_hypervisors=enabled_hypervisors,
        beparams={constants.PP_DEFAULT: beparams},
        nicparams={constants.PP_DEFAULT: nicparams},
        ndparams=ndparams,
        hvparams=hvparams,
        diskparams=diskparams,
        candidate_pool_size=candidate_pool_size,
        modify_etc_hosts=modify_etc_hosts,
        modify_ssh_setup=modify_ssh_setup,
        uid_pool=uid_pool,
        ctime=now,
        mtime=now,
        maintain_node_health=maintain_node_health,
        data_collectors=data_collectors,
        drbd_usermode_helper=drbd_helper,
        default_iallocator=default_iallocator,
        default_iallocator_params=default_iallocator_params,
        primary_ip_family=ipcls.family,
        prealloc_wipe_disks=prealloc_wipe_disks,
        use_external_mip_script=use_external_mip_script,
        ipolicy=full_ipolicy,
        hv_state_static=hv_state,
        disk_state_static=disk_state,
        enabled_disk_templates=enabled_disk_templates,
        candidate_certs=candidate_certs,
        osparams={},
        osparams_private_cluster={},
        install_image=install_image,
        zeroing_image=zeroing_image,
        compression_tools=compression_tools,
        enabled_user_shutdown=enabled_user_shutdown,
        ssh_key_type=ssh_key_type,
        ssh_key_bits=ssh_key_bits,
    )
    master_node_config = objects.Node(
        name=hostname.name,
        primary_ip=hostname.ip,
        secondary_ip=secondary_ip,
        serial_no=1,
        master_candidate=True,
        offline=False,
        drained=False,
        ctime=now,
        mtime=now,
    )
    InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
    cfg = config.ConfigWriter(offline=True)
    ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
    cfg.Update(cfg.GetClusterInfo(), logging.error)
    ssconf.WriteSsconfFiles(cfg.GetSsconfValues())

    master_uuid = cfg.GetMasterNode()
    if modify_ssh_setup:
        ssh.InitPubKeyFile(master_uuid, ssh_key_type)
    # set up the inter-node password and certificate
    _InitGanetiServerSetup(hostname.name, cfg)

    logging.debug("Starting daemons")
    result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
    if result.failed:
        raise errors.OpExecError("Could not start daemons, command %s"
                                 " had exitcode %s and error %s" %
                                 (result.cmd, result.exit_code, result.output))

    _WaitForMasterDaemon()
コード例 #24
0
 def _GetSingleOfflineNode(uuid):
     assert uuid == "node100-uuid"
     return objects.Node(name="node100.example.com",
                         uuid=uuid,
                         offline=True,
                         primary_ip="192.0.2.100")
コード例 #25
0
 def _GetSingleOnlineNode(uuid):
     assert uuid == "node90-uuid"
     return objects.Node(name="node90.example.com",
                         uuid=uuid,
                         offline=False,
                         primary_ip="192.0.2.90")
コード例 #26
0
 def GetNodeInfo(self, name):
     return objects.Node(name=name)
コード例 #27
0
        disk_state_static=disk_state,
        enabled_disk_templates=enabled_disk_templates,
        candidate_certs=candidate_certs,
        osparams={},
        osparams_private_cluster={},
        install_image=install_image,
        zeroing_image=zeroing_image,
        compression_tools=compression_tools,
        enabled_user_shutdown=enabled_user_shutdown,
    )
    master_node_config = objects.Node(
        name=hostname.name,
        primary_ip=hostname.ip,
        secondary_ip=secondary_ip,
        serial_no=1,
        master_candidate=True,
        offline=False,
        drained=False,
        ctime=now,
        mtime=now,
    )
    InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
    cfg = config.ConfigWriter(offline=True)
    ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
    cfg.Update(cfg.GetClusterInfo(), logging.error)
    ssconf.WriteSsconfFiles(cfg.GetSsconfValues())

    master_uuid = cfg.GetMasterNode()
    if modify_ssh_setup:
        ssh.InitPubKeyFile(master_uuid)
    # set up the inter-node password and certificate
コード例 #28
0
 def testFillNdParamsCluster(self):
     fake_node = objects.Node(name="test", ndparams={}, group="testgroup")
     fake_group = objects.NodeGroup(name="testgroup", ndparams={})
     self.assertEqual(self.fake_cl.ndparams,
                      self.fake_cl.FillND(fake_node, fake_group))
コード例 #29
0
 def testEmpty(self):
     self.assertEqual(objects.Node().ToDict(), {})
     self.assertTrue(isinstance(objects.Node.FromDict({}), objects.Node))
コード例 #30
0
ファイル: config_mock.py プロジェクト: dimara/ganeti
    def AddNewNode(self,
                   uuid=None,
                   name=None,
                   primary_ip=None,
                   secondary_ip=None,
                   master_candidate=True,
                   offline=False,
                   drained=False,
                   group=None,
                   master_capable=True,
                   vm_capable=True,
                   ndparams=None,
                   powered=True,
                   hv_state=None,
                   hv_state_static=None,
                   disk_state=None,
                   disk_state_static=None):
        """Add a new L{objects.Node} to the cluster configuration

    See L{objects.Node} for parameter documentation.

    @rtype: L{objects.Node}
    @return: the newly added node

    """
        node_id = self._cur_node_id
        self._cur_node_id += 1

        if uuid is None:
            uuid = self._GetUuid()
        if name is None:
            name = "mock_node_%d.example.com" % node_id
        if primary_ip is None:
            primary_ip = "192.0.2.%d" % node_id
        if secondary_ip is None:
            secondary_ip = "203.0.113.%d" % node_id
        if group is None:
            group = self._default_group.uuid
        group = self._GetObjUuid(group)
        if ndparams is None:
            ndparams = {}

        node = objects.Node(uuid=uuid,
                            name=name,
                            primary_ip=primary_ip,
                            secondary_ip=secondary_ip,
                            master_candidate=master_candidate,
                            offline=offline,
                            drained=drained,
                            group=group,
                            master_capable=master_capable,
                            vm_capable=vm_capable,
                            ndparams=ndparams,
                            powered=powered,
                            hv_state=hv_state,
                            hv_state_static=hv_state_static,
                            disk_state=disk_state,
                            disk_state_static=disk_state_static)

        self._UnlockedAddNode(node, None)
        return node