def testFindDisk(self): inst = objects.Instance( name="fakeinstdrbd.example.com", primary_node="node20.example.com", disks=[ objects.Disk(dev_type=constants.DT_DRBD8, size=786432, logical_id=("node20.example.com", "node15.example.com", 12300, 0, 0, "secret"), children=[ objects.Disk(dev_type=constants.DT_PLAIN, size=786432, logical_id=("myxenvg", "disk0")), objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "meta0")) ], iv_name="disk/0") ]) self.assertEqual(inst.FindDisk(0), inst.disks[0]) self.assertRaises(errors.OpPrereqError, inst.FindDisk, "hello") self.assertRaises(errors.OpPrereqError, inst.FindDisk, 100) self.assertRaises(errors.OpPrereqError, inst.FindDisk, 1)
def testInstNodesPlainDisks(self): # construct instance cfg = self._get_object_mock() inst = self._create_instance(cfg) disks = [ objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "disk25494"), uuid="disk0"), objects.Disk(dev_type=constants.DT_PLAIN, size=512, logical_id=("myxenvg", "disk29071"), uuid="disk1"), ] cfg.AddInstance(inst, "my-job") for disk in disks: cfg.AddInstanceDisk(inst.uuid, disk) # Plain disks all_nodes = cfg.GetInstanceNodes(inst.uuid) secondary_nodes = cfg.GetInstanceSecondaryNodes(inst.uuid) self._GenericNodesCheck(inst, all_nodes, secondary_nodes) self.assertEqual(len(secondary_nodes), 0) self.assertEqual(set(all_nodes), set([inst.primary_node])) self.assertEqual(cfg.GetInstanceLVsByNode(inst.uuid), { inst.primary_node: ["myxenvg/disk25494", "myxenvg/disk29071"], })
def _MakeInstance(self): # Copy default parameters bep = objects.FillDict(constants.BEC_DEFAULTS, {}) hvp = objects.FillDict(constants.HVC_DEFAULTS[self.HVNAME], {}) # Override default VNC password file path if constants.HV_VNC_PASSWORD_FILE in hvp: hvp[constants.HV_VNC_PASSWORD_FILE] = self.vncpw_path disks = [ (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDWR), utils.PathJoin(self.tmpdir, "disk0"), NotImplemented), (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDONLY), utils.PathJoin(self.tmpdir, "disk1"), NotImplemented), ] inst = objects.Instance(name="server01.example.com", hvparams=hvp, beparams=bep, osparams={}, nics=[], os="deb1", disks=map(compat.fst, disks)) inst.UpgradeConfig() return (inst, disks)
def setUp(self): """Set up input data""" self.disks = [ objects.Disk(dev_type=constants.DT_PLAIN, size=1024, logical_id=("ganeti", "disk01234"), name="disk-0", mode="rw", params={}, children=[], uuid="disk0"), objects.Disk(dev_type=constants.DT_PLAIN, size=2048, logical_id=("ganeti", "disk56789"), name="disk-1", mode="ro", params={}, children=[], uuid="disk1") ] self.ext_params = { "provider": "pvdr", "param1": "value1", "param2": "value2" } self.default_vg = "ganeti-vg"
def testFileDisks(self): disks = [ (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=[constants.FD_LOOP]), "/tmp/diskFirst", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDONLY, logical_id=[constants.FD_BLKTAP]), "/tmp/diskTwo", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=[constants.FD_LOOP]), "/tmp/diskThree", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDONLY, logical_id=[constants.FD_BLKTAP2]), "/tmp/diskFour", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=[constants.FD_BLKTAP]), "/tmp/diskLast", NotImplemented), ] result = hv_xen._GetConfigFileDiskData(disks, "sd") self.assertEqual(result, [ "'file:/tmp/diskFirst,sda,w'", "'tap:aio:/tmp/diskTwo,sdb,r'", "'file:/tmp/diskThree,sdc,w'", "'tap2:tapdisk:aio:/tmp/diskFour,sdd,r'", "'tap:aio:/tmp/diskLast,sde,w'", ])
def __init__(self, cluster=NotImplemented): self._cluster = cluster self._disks = [ objects.Disk(dev_type=constants.DT_PLAIN, size=4096, logical_id=("vg", "disk6120"), uuid="disk_uuid_1"), objects.Disk(dev_type=constants.DT_PLAIN, size=1024, logical_id=("vg", "disk8508"), uuid="disk_uuid_2"), ] for disk in self._disks: disk.UpgradeConfig()
def testTwoLvDisksWithMode(self): disks = [ (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDWR), "/tmp/diskFirst", NotImplemented), (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDONLY), "/tmp/diskLast", NotImplemented), ] result = hv_xen._GetConfigFileDiskData(disks, "hd") self.assertEqual(result, [ "'phy:/tmp/diskFirst,hda,w'", "'phy:/tmp/diskLast,hdb,r'", ])
def testAttachDetachDisks(self): """Test if the attach/detach wrappers work properly. This test checks if the configuration remains in a consistent state after a series of detach/attach ops """ # construct instance cfg = self._get_object_mock() inst = self._create_instance(cfg) disk = objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "disk25494"), uuid="disk0") cfg.AddInstance(inst, "my-job") cfg.AddInstanceDisk(inst.uuid, disk) # Detach disk from non-existent instance self.assertRaises(errors.ConfigurationError, cfg.DetachInstanceDisk, "1134", "disk0") # Detach non-existent disk self.assertRaises(errors.ConfigurationError, cfg.DetachInstanceDisk, "test-uuid", "disk1") # Detach disk cfg.DetachInstanceDisk("test-uuid", "disk0") instance_disks = cfg.GetInstanceDisks("test-uuid") self.assertEqual(instance_disks, []) # Detach disk again self.assertRaises(errors.ProgrammerError, cfg.DetachInstanceDisk, "test-uuid", "disk0") # Attach disk cfg.AttachInstanceDisk("test-uuid", "disk0") instance_disks = cfg.GetInstanceDisks("test-uuid") self.assertEqual(instance_disks, [disk])
def GetInstanceDisks(self, _): return [ objects.Disk(size=512, spindles=13, uuid="disk_uuid", dev_type=constants.DT_PLAIN) ]
def testUpgradeEnabledDiskTemplates(self): cfg = objects.ConfigData() cfg.cluster = objects.Cluster() cfg.cluster.volume_group_name = "myvg" instance1 = objects.Instance() instance1.disk_template = constants.DT_DISKLESS instance2 = objects.Instance() instance2.disk_template = constants.DT_RBD cfg.instances = {"myinstance1": instance1, "myinstance2": instance2} disk2 = objects.Disk(dev_type=constants.DT_RBD) cfg.disks = {"pinkbunnydisk": disk2} nodegroup = objects.NodeGroup() nodegroup.ipolicy = {} nodegroup.ipolicy[constants.IPOLICY_DTS] = [instance1.disk_template, \ constants.DT_BLOCK] cfg.cluster.ipolicy = {} cfg.cluster.ipolicy[constants.IPOLICY_DTS] = \ [constants.DT_EXT, constants.DT_DISKLESS] cfg.nodegroups = {"mynodegroup": nodegroup} cfg._UpgradeEnabledDiskTemplates() expected_disk_templates = [ constants.DT_DRBD8, constants.DT_PLAIN, instance1.disk_template, instance2.disk_template ] self.assertEqual(set(expected_disk_templates), set(cfg.cluster.enabled_disk_templates)) self.assertEqual(set([instance1.disk_template]), set(cfg.cluster.ipolicy[constants.IPOLICY_DTS]))
def testInstNodesDrbdDisks(self): # construct a second node cfg = self._get_object_mock() node_group = cfg.LookupNodeGroup(None) master_uuid = cfg.GetMasterNode() node2 = objects.Node(name="node2.example.com", group=node_group, ndparams={}, uuid="node2-uuid") cfg.AddNode(node2, "my-job") # construct instance inst = self._create_instance(cfg) disks = [ objects.Disk(dev_type=constants.DT_DRBD8, size=786432, logical_id=(master_uuid, node2.uuid, 12300, 0, 0, "secret"), children=[ objects.Disk(dev_type=constants.DT_PLAIN, size=786432, logical_id=("myxenvg", "disk0"), uuid="data0"), objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "meta0"), uuid="meta0") ], iv_name="disk/0", uuid="disk0") ] cfg.AddInstance(inst, "my-job") for disk in disks: cfg.AddInstanceDisk(inst.uuid, disk) # Drbd Disks all_nodes = cfg.GetInstanceNodes(inst.uuid) secondary_nodes = cfg.GetInstanceSecondaryNodes(inst.uuid) self._GenericNodesCheck(inst, all_nodes, secondary_nodes) self.assertEqual(set(secondary_nodes), set([node2.uuid])) self.assertEqual(set(all_nodes), set([inst.primary_node, node2.uuid])) self.assertEqual( cfg.GetInstanceLVsByNode(inst.uuid), { master_uuid: ["myxenvg/disk0", "myxenvg/meta0"], node2.uuid: ["myxenvg/disk0", "myxenvg/meta0"], })
def testUpgradeConfigDevTypeLegacy(self): for old, new in [("drbd8", constants.DT_DRBD8), ("lvm", constants.DT_PLAIN)]: disk = objects.Disk() disk.dev_type = old self.addChild(disk) disk.UpgradeConfig() self.assertEqual(new, disk.dev_type) self.assertEqual(new, disk.children[0].dev_type)
def testInvalidFileDisk(self): disks = [ (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=["#unknown#"]), "/tmp/diskinvalid", NotImplemented), ] self.assertRaises(KeyError, hv_xen._GetConfigFileDiskData, disks, "sd")
def testDisk(self): device = objects.Disk() target = constants.HOTPLUG_TARGET_DISK runtime = self._GetRuntime() self._fail(target, device, runtime) device.uuid = "9f5c5bd4-6f60-480b-acdc-9bb1a4b7df79" (devinfo, _, __) = hv_kvm._GetExistingDeviceInfo(target, device, runtime) self.assertTrue(devinfo.hvinfo["addr"] == "0xa")
def _CreateInstanceDisk(self, cfg): # Construct instance and add a plain disk inst = self._create_instance(cfg) cfg.AddInstance(inst, "my-job") disk = objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "disk25494"), uuid="disk0", name="name0") cfg.AddInstanceDisk(inst.uuid, disk) return inst, disk
def testNodesPlainDisks(self): inst = objects.Instance(name="fakeinstplain.example.com", primary_node="node3.example.com", disks=[ objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "disk25494")), objects.Disk(dev_type=constants.DT_PLAIN, size=512, logical_id=("myxenvg", "disk29071")), ]) self._GenericCheck(inst) self.assertEqual(len(inst.secondary_nodes), 0) self.assertEqual(set(inst.all_nodes), set([inst.primary_node])) self.assertEqual(inst.MapLVsByNode(), { inst.primary_node: ["myxenvg/disk25494", "myxenvg/disk29071"], })
def testUpgradeConfigDevTypeLegacyUnchanged(self): dev_types = [ constants.DT_FILE, constants.DT_SHARED_FILE, constants.DT_BLOCK, constants.DT_EXT, constants.DT_RBD, constants.DT_GLUSTER ] for dev_type in dev_types: disk = objects.Disk() disk.dev_type = dev_type self.addChild(disk) disk.UpgradeConfig() self.assertEqual(dev_type, disk.dev_type) self.assertEqual(dev_type, disk.children[0].dev_type)
def testNodesDrbdDisks(self): inst = objects.Instance( name="fakeinstdrbd.example.com", primary_node="node10.example.com", disks=[ objects.Disk(dev_type=constants.DT_DRBD8, size=786432, logical_id=("node10.example.com", "node15.example.com", 12300, 0, 0, "secret"), children=[ objects.Disk(dev_type=constants.DT_PLAIN, size=786432, logical_id=("myxenvg", "disk0")), objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "meta0")) ], iv_name="disk/0") ]) self._GenericCheck(inst) self.assertEqual(set(inst.secondary_nodes), set(["node15.example.com"])) self.assertEqual(set(inst.all_nodes), set([inst.primary_node, "node15.example.com"])) self.assertEqual( inst.MapLVsByNode(), { inst.primary_node: ["myxenvg/disk0", "myxenvg/meta0"], "node15.example.com": ["myxenvg/disk0", "myxenvg/meta0"], }) self.assertEqual(inst.FindDisk(0), inst.disks[0]) self.assertRaises(errors.OpPrereqError, inst.FindDisk, "hello") self.assertRaises(errors.OpPrereqError, inst.FindDisk, 100) self.assertRaises(errors.OpPrereqError, inst.FindDisk, 1)
def testDiskInfoDuplicateName(self): """Assert that GetDiskInfo raises exception on duplicate names.""" # Create mock config writer cfg = self._get_object_mock() # Create an instance and attach a disk to it inst, disk = self._CreateInstanceDisk(cfg) # Create a disk with the same name and attach it to the instance. disk = objects.Disk(dev_type=constants.DT_PLAIN, size=128, logical_id=("myxenvg", "disk25494"), uuid="disk1", name="name0") cfg.AddInstanceDisk(inst.uuid, disk) self.assertRaises(errors.ConfigurationError, cfg.GetDiskInfoByName, "name0")
def CreateSnapshots(self): """Creates a snapshot for every disk of the instance. Currently support drbd, plain and ext disk templates. """ assert not self._snap_disks instance = self._instance src_node = instance.primary_node src_node_name = self._lu.cfg.GetNodeName(src_node) inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid) for idx, disk in enumerate(inst_disks): self._feedback_fn("Creating a snapshot of disk/%s on node %s" % (idx, src_node_name)) # result.payload will be a snapshot of an lvm leaf of the one we # passed result = self._lu.rpc.call_blockdev_snapshot(src_node, (disk, instance), None, None) new_dev = False msg = result.fail_msg if msg: self._lu.LogWarning("Could not snapshot disk/%s on node %s: %s", idx, src_node_name, msg) elif (not isinstance(result.payload, (tuple, list)) or len(result.payload) != 2): self._lu.LogWarning("Could not snapshot disk/%s on node %s: invalid" " result '%s'", idx, src_node_name, result.payload) else: disk_id = tuple(result.payload) # Snapshot is currently supported for ExtStorage and LogicalVolume. # In case disk is of type drbd the snapshot will be of type plain. if disk.dev_type == constants.DT_EXT: dev_type = constants.DT_EXT else: dev_type = constants.DT_PLAIN disk_params = constants.DISK_LD_DEFAULTS[dev_type].copy() new_dev = objects.Disk(dev_type=dev_type, size=disk.size, logical_id=disk_id, iv_name=disk.iv_name, params=disk_params) self._snap_disks.append(new_dev) assert len(self._snap_disks) == len(instance.disks) assert len(self._removed_snaps) == len(instance.disks)
def testManyDisks(self): for offset in [0, 1, 10]: disks = [(objects.Disk(dev_type=constants.DT_PLAIN), "/tmp/disk/%s" % idx, NotImplemented) for idx in range(len(hv_xen._DISK_LETTERS) + offset)] if offset == 0: result = hv_xen._GetConfigFileDiskData(disks, "hd") self.assertEqual(result, [ "'phy:/tmp/disk/%s,hd%s,r'" % (idx, string.ascii_lowercase[idx]) for idx in range(len(hv_xen._DISK_LETTERS) + offset) ]) else: try: hv_xen._GetConfigFileDiskData(disks, "hd") except errors.HypervisorError, err: self.assertEqual(str(err), "Too many disks") else: self.fail("Exception was not raised")
def test(self): beparams = { constants.BE_MAXMEM: 2048, constants.BE_VCPUS: 2, constants.BE_SPINDLE_USE: 4, } disks = [objects.Disk(size=512, spindles=13)] cfg = _FakeConfigForComputeIPolicyInstanceViolation(beparams, False) instance = objects.Instance(beparams=beparams, disks=disks, nics=[], disk_template=constants.DT_PLAIN) stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4, constants.DT_PLAIN) ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance, cfg, _compute_fn=stub) self.assertEqual(ret, []) instance2 = objects.Instance(beparams={}, disks=disks, nics=[], disk_template=constants.DT_PLAIN) ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2, cfg, _compute_fn=stub) self.assertEqual(ret, []) cfg_es = _FakeConfigForComputeIPolicyInstanceViolation(beparams, True) stub_es = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 13, constants.DT_PLAIN) ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance, cfg_es, _compute_fn=stub_es) self.assertEqual(ret, []) ret = common.ComputeIPolicyInstanceViolation(NotImplemented, instance2, cfg_es, _compute_fn=stub_es) self.assertEqual(ret, [])
def _MakeDisks(names): return [objects.Disk(iv_name=name) for name in names]
def testEncodeInstance(self): cluster = objects.Cluster(hvparams={ constants.HT_KVM: { constants.HV_CDROM_IMAGE_PATH: "foo", }, }, beparams={ constants.PP_DEFAULT: { constants.BE_MAXMEM: 8192, }, }, os_hvp={}, osparams={ "linux": { "role": "unknown", }, }) cluster.UpgradeConfig() inst = objects.Instance(name="inst1.example.com", hypervisor=constants.HT_KVM, os="linux", hvparams={ constants.HV_CDROM_IMAGE_PATH: "bar", constants.HV_ROOT_PATH: "/tmp", }, beparams={ constants.BE_MINMEM: 128, constants.BE_MAXMEM: 256, }, nics=[ objects.NIC(nicparams={ constants.NIC_MODE: "mymode", }), ], disk_template=constants.DT_PLAIN, disks=[ objects.Disk(dev_type=constants.DT_PLAIN, size=4096, logical_id=("vg", "disk6120")), objects.Disk(dev_type=constants.DT_PLAIN, size=1024, logical_id=("vg", "disk8508")), ]) inst.UpgradeConfig() cfg = _FakeConfigForRpcRunner(cluster=cluster) runner = rpc.RpcRunner(cfg, None, _req_process_fn=NotImplemented, _getents=mocks.FakeGetentResolver) def _CheckBasics(result): self.assertEqual(result["name"], "inst1.example.com") self.assertEqual(result["os"], "linux") self.assertEqual(result["beparams"][constants.BE_MINMEM], 128) self.assertEqual(len(result["nics"]), 1) self.assertEqual( result["nics"][0]["nicparams"][constants.NIC_MODE], "mymode") # Generic object serialization result = runner._encoder(NotImplemented, (rpc_defs.ED_OBJECT_DICT, inst)) _CheckBasics(result) self.assertEqual(len(result["hvparams"]), 2) result = runner._encoder(NotImplemented, (rpc_defs.ED_OBJECT_DICT_LIST, 5 * [inst])) map(_CheckBasics, result) map(lambda r: self.assertEqual(len(r["hvparams"]), 2), result) # Just an instance result = runner._encoder(NotImplemented, (rpc_defs.ED_INST_DICT, inst)) _CheckBasics(result) self.assertEqual(result["beparams"][constants.BE_MAXMEM], 256) self.assertEqual(result["hvparams"][constants.HV_CDROM_IMAGE_PATH], "bar") self.assertEqual(result["hvparams"][constants.HV_ROOT_PATH], "/tmp") self.assertEqual(result["osparams"], { "role": "unknown", }) self.assertEqual(len(result["hvparams"]), len(constants.HVC_DEFAULTS[constants.HT_KVM])) # Instance with OS parameters result = runner._encoder(NotImplemented, (rpc_defs.ED_INST_DICT_OSP_DP, (inst, { "role": "webserver", "other": "field", }))) _CheckBasics(result) self.assertEqual(result["beparams"][constants.BE_MAXMEM], 256) self.assertEqual(result["hvparams"][constants.HV_CDROM_IMAGE_PATH], "bar") self.assertEqual(result["hvparams"][constants.HV_ROOT_PATH], "/tmp") self.assertEqual(result["osparams"], { "role": "webserver", "other": "field", }) # Instance with hypervisor and backend parameters result = runner._encoder(NotImplemented, (rpc_defs.ED_INST_DICT_HVP_BEP_DP, (inst, { constants.HT_KVM: { constants.HV_BOOT_ORDER: "xyz", }, }, { constants.BE_VCPUS: 100, constants.BE_MAXMEM: 4096, }))) _CheckBasics(result) self.assertEqual(result["beparams"][constants.BE_MAXMEM], 4096) self.assertEqual(result["beparams"][constants.BE_VCPUS], 100) self.assertEqual(result["hvparams"][constants.HT_KVM], { constants.HV_BOOT_ORDER: "xyz", }) self.assertEqual( result["disks"], [{ "dev_type": constants.DT_PLAIN, "dynamic_params": {}, "size": 4096, "logical_id": ("vg", "disk6120"), "params": constants.DISK_DT_DEFAULTS[inst.disk_template], }, { "dev_type": constants.DT_PLAIN, "dynamic_params": {}, "size": 1024, "logical_id": ("vg", "disk8508"), "params": constants.DISK_DT_DEFAULTS[inst.disk_template], }]) self.assertTrue(compat.all(disk.params == {} for disk in inst.disks), msg="Configuration objects were modified")
def CreateDisk(self, uuid=None, name=None, dev_type=constants.DT_PLAIN, logical_id=None, children=None, nodes=None, iv_name=None, size=1024, mode=constants.DISK_RDWR, params=None, spindles=None, primary_node=None, secondary_node=None, create_nodes=False, instance_disk_index=0): """Create a new L{objecs.Disk} object @rtype: L{objects.Disk} @return: the newly create disk object """ disk_id = self._cur_disk_id self._cur_disk_id += 1 if uuid is None: uuid = self._GetUuid() if name is None: name = "mock_disk_%d" % disk_id if params is None: params = {} if dev_type == constants.DT_DRBD8: pnode_uuid = self._GetObjUuid(primary_node) snode_uuid = self._GetObjUuid(secondary_node) if logical_id is not None: pnode_uuid = logical_id[0] snode_uuid = logical_id[1] if pnode_uuid is None and create_nodes: pnode_uuid = self.AddNewNode().uuid if snode_uuid is None and create_nodes: snode_uuid = self.AddNewNode().uuid if pnode_uuid is None or snode_uuid is None: raise AssertionError( "Trying to create DRBD disk without nodes!") if logical_id is None: logical_id = (pnode_uuid, snode_uuid, constants.FIRST_DRBD_PORT + disk_id, disk_id, disk_id, "mock_secret") if children is None: data_child = self.CreateDisk(dev_type=constants.DT_PLAIN, size=size) meta_child = self.CreateDisk(dev_type=constants.DT_PLAIN, size=constants.DRBD_META_SIZE) children = [data_child, meta_child] if nodes is None: nodes = [pnode_uuid, snode_uuid] elif dev_type == constants.DT_PLAIN: if logical_id is None: logical_id = ("mockvg", "mock_disk_%d" % disk_id) if nodes is None and primary_node is not None: nodes = [primary_node] elif dev_type in constants.DTS_FILEBASED: if logical_id is None: logical_id = (constants.FD_LOOP, "/file/storage/disk%d" % disk_id) if (nodes is None and primary_node is not None and dev_type == constants.DT_FILE): nodes = [primary_node] elif dev_type == constants.DT_BLOCK: if logical_id is None: logical_id = (constants.BLOCKDEV_DRIVER_MANUAL, "/dev/disk/disk%d" % disk_id) elif dev_type == constants.DT_EXT: if logical_id is None: provider = params.get(constants.IDISK_PROVIDER, None) if provider is None: raise AssertionError( "You must specify a 'provider' for 'ext' disks") logical_id = (provider, "mock_disk_%d" % disk_id) elif logical_id is None: raise NotImplementedError if children is None: children = [] if nodes is None: nodes = [] if iv_name is None: iv_name = "disk/%d" % instance_disk_index return objects.Disk(uuid=uuid, name=name, dev_type=dev_type, logical_id=logical_id, children=children, nodes=nodes, iv_name=iv_name, size=size, mode=mode, params=params, spindles=spindles)
def addChild(self, disk): """Adds a child of the same device type as the parent.""" disk.children = [] child = objects.Disk() child.dev_type = disk.dev_type disk.children.append(child)
def GetInstanceDisks(self, _): return [objects.Disk(size=512, spindles=13, uuid="disk_uuid")]