def test_constraint_place_engine_conflicting_constraints(self): """ constraint place engine should fail if multiple constraints conflict """ # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastores) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place constraints = [ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]), ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) ] disk = Disk(disk_id="ds1", capacity_gb=1) place_result = engine.place(DisksPlacement([disk], selector), constraints) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NO_SUCH_RESOURCE))
def test_constraint_place_engine_no_constraints(self): """ constraint place engine should not handle placement with no constraints """ # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0), "ds3": DatastoreInfo(30, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place disks = [ Disk(disk_id="disk1", capacity_gb=1), Disk(disk_id="disk2", capacity_gb=1), Disk(capacity_gb=1), ] disks_placement = DisksPlacement(disks, selector) place_result = engine.place(disks_placement, []) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.OK)) # Verify unplaced list disks_placement = place_result.disks_placement assert_that(disks_placement.placement_list, has_length(0)) assert_that(disks_placement.disks, has_length(3))
def test_place_image_disk_best_effort(self): ds_map = { "datastore_id_1": (DatastoreInfo(1 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(2 * 1024, 0), set([])) } manager = PMBuilder(ds_map=ds_map).build() image = DiskImage("disk_image1", DiskImage.COPY_ON_WRITE) # Place a image disk that is linked cloned from image. The size # should not be included in calculation. disk1 = Disk(new_id(), DISK_FLAVOR, False, True, 512, image) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, 2048) disk3 = Disk(new_id(), DISK_FLAVOR, False, True, 512) vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, [disk1, disk2, disk3]) score, placement_list = manager.place(vm, None) # storage score is 1. 17/10. divided by 10 which is not optimal penalty assert_that(score.utilization, equal_to(1)) assert_that(score.transfer, equal_to(100)) assert_that(placement_list, has_length(4)) assert_that(placement_list[0].container_id, equal_to("datastore_id_2")) assert_that(placement_list[0].resource_id, equal_to(vm.id)) assert_that(placement_list[1].container_id, equal_to("datastore_id_2")) assert_that(placement_list[1].resource_id, equal_to(disk2.id)) assert_that(placement_list[2].container_id, equal_to("datastore_id_1")) assert_that(placement_list[2].resource_id, equal_to(disk3.id)) assert_that(placement_list[3].container_id, equal_to("datastore_id_1")) assert_that(placement_list[3].resource_id, equal_to(disk1.id))
def test_place_with_conflicting_constraints(self): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set(["tag1", "tag2"])), "datastore_id_2": (DatastoreInfo(16 * 1024, 0), set(["tag3", "tag2"])), "datastore_id_3": (DatastoreInfo(16 * 1024, 0), set([])) } ds_with_image = ["datastore_id_1", "datastore_id_2"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) # The following 2 constraints conflict constraint1 = ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["tag1"]) constraint2 = ResourceConstraint(ResourceConstraintType.DATASTORE, ["datastore_id_2"]) disk1 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=[constraint1, constraint2]) disks = [disk1] manager.place(None, disks)
def test_place_with_disks_tagging_constraints(self, tags, expected): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set(["tag1", "tag2"])), "datastore_id_2": (DatastoreInfo(16 * 1024, 0), set(["tag3", "tag2"])), "datastore_id_3": (DatastoreInfo(16 * 1024, 0), set([])) } ds_with_image = ["datastore_id_1", "datastore_id_2"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() # Disable freespace based admission control. manager.FREESPACE_THRESHOLD = 0 image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) constraints = [ ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, [t]) for t in tags ] disk = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=constraints) score, placement_list = manager.place(None, [disk]) base_index = 0 assert_that(placement_list[base_index].resource_id, is_(disk.id)) assert_that(placement_list[base_index].container_id, is_(expected))
def test_place_vm_existing_image_two_matching_datastores(self): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(16 * 1024, 0), set([])), "datastore_id_3": (DatastoreInfo(15 * 1024, 0), set([])) } ds_with_image = ["datastore_id_1", "datastore_id_2"] manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() total_storage = sum(t[0].total for t in ds_map.values()) image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) # disk1 and disk2 both take 1/100 disk1 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image) vm = Vm(new_id(), self._vm_flavor_mb(1), VmPowerState.STOPPED, None, None, [disk1, disk2]) score, placement_list = manager.place(vm, None) # There are 2 disk of approx size 1/100 of avail space # the score should be 98 assert_that(score, is_(AgentPlacementScore(98, 100))) base_index = 0 assert_that(placement_list[base_index].resource_id, is_(vm.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_2")) base_index += 1 assert_that(placement_list[base_index].resource_id, is_(disk1.id)) assert_that(placement_list[base_index + 1].resource_id, is_(disk2.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_2")) assert_that(placement_list[base_index + 1].container_id, is_("datastore_id_2"))
def test_placement_vm_on_image_datastore(self, use_image_ds, use_vm): ds_map = { "datastore_id_1": (DatastoreInfo(7 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(8 * 1024, 0), set([])), "image_datastore": (DatastoreInfo(16 * 1024, 0), set([])) } manager = PMBuilder(im_ds_for_vm=use_image_ds, image_ds="image_datastore", ds_map=ds_map).build() image = DiskImage("disk_image", DiskImage.FULL_COPY) disk1 = Disk(new_id(), DISK_FLAVOR, False, True, 7 * 1024, image) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, 7 * 1024, image) disk3 = Disk(new_id(), DISK_FLAVOR, False, True, 7 * 1024, image) disk4 = Disk(new_id(), DISK_FLAVOR, False, True, 7 * 1024, image) disk_list = [disk1, disk2, disk3, disk4] if use_vm: vm = Vm(new_id(), VM_FLAVOR, State.STOPPED, None, None, disk_list) disks = None else: vm = None disks = disk_list if not use_image_ds: self.assertRaises(NotEnoughDatastoreCapacityException, manager.place, vm, disks) else: (score, placement_list) = manager.place(vm, disks) container_ids = [item.container_id for item in placement_list] resource_ids = [item.resource_id for item in placement_list] if use_vm: assert_that( container_ids, contains( "image_datastore", # vm "image_datastore", # 16T, 8T, 7T "image_datastore", # 9T, 8T, 7T "datastore_id_2", # 2T, 8T, 7T "datastore_id_1")) # 2T, 1T, 7T assert_that( resource_ids, contains(vm.id, disk1.id, disk2.id, disk3.id, disk4.id)) else: assert_that( container_ids, contains( "image_datastore", # 16T, 8T, 7T "image_datastore", # 9T, 8T, 7T "datastore_id_2", # 2T, 8T, 7T "datastore_id_1")) # 2T, 1T, 7T assert_that(resource_ids, contains(disk1.id, disk2.id, disk3.id, disk4.id))
def test_constraint_place_engine(self): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0), "ds3": DatastoreInfo(30, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place disks = [ Disk(disk_id="disk", capacity_gb=5, constraints=[]), Disk(disk_id="disk-in-ds1", capacity_gb=5, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]) ]), Disk(disk_id="disk-in-ds2", capacity_gb=5, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) ]), Disk(disk_id="disk-in-ds3", capacity_gb=5, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds3"]) ]), Disk(capacity_gb=5, constraints=[]), ] disks_placement = DisksPlacement(disks, selector) place_result = engine.place(disks_placement) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.OK)) # Verify placement list and unplaced list disks_placement = place_result.disks_placement assert_that(disks_placement.placement_list, has_length(3)) assert_that(disks_placement.disks, has_length(2)) assert_that([d.resource_id for d in disks_placement.placement_list], contains_inanyorder("disk-in-ds1", "disk-in-ds2", "disk-in-ds3")) for placement in disks_placement.placement_list: assert_that(placement.type, equal_to(AgentResourcePlacement.DISK)) assert_that("disk-in-" + placement.container_id, equal_to(placement.resource_id))
def test_place_large_disks(self, use_vm): ds_map = { "datastore_id_1": (DatastoreInfo(1 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(2 * 1024, 0), set([])), "datastore_id_3": (DatastoreInfo(3 * 1024, 0), set([])) } ds_with_image = ["datastore_id_3"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() image = DiskImage("disk_image", DiskImage.FULL_COPY) disk1 = Disk(new_id(), DISK_FLAVOR, False, True, 1024, image) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, 1024, image) disk3 = Disk(new_id(), DISK_FLAVOR, False, True, 1024, image) disk4 = Disk(new_id(), DISK_FLAVOR, False, True, 1024, image) disk_list = [disk1, disk2, disk3, disk4] used_storage = sum(disk.capacity_gb for disk in disk_list) if use_vm: vm = Vm(new_id(), VM_FLAVOR, State.STOPPED, None, None, disk_list) disks = None else: vm = None disks = disk_list score, placement_list = manager.place(vm, disks) # optimal placement cannot be achieved, # divide the expected score by NOT_OPTIMAL_DIVIDE_FACTOR expected_score = 100 * (total_storage - used_storage) / \ total_storage / PlacementManager.NOT_OPTIMAL_DIVIDE_FACTOR assert_that(score, is_(AgentPlacementScore(expected_score, 100))) base_index = 0 if vm: assert_that(placement_list[base_index].resource_id, is_(vm.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_3")) base_index += 1 assert_that(placement_list[base_index].resource_id, is_(disk1.id)) assert_that(placement_list[base_index + 1].resource_id, is_(disk2.id)) assert_that(placement_list[base_index + 2].resource_id, is_(disk3.id)) assert_that(placement_list[base_index + 3].resource_id, is_(disk4.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_3")) assert_that(placement_list[base_index + 1].container_id, is_("datastore_id_3")) assert_that(placement_list[base_index + 2].container_id, is_("datastore_id_3")) assert_that(placement_list[base_index + 3].container_id, is_("datastore_id_2"))
def test_place_with_disks_constraints(self): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(16 * 1024, 0), set([])), "datastore_id_3": (DatastoreInfo(16 * 1024, 0), set([])) } ds_name_id_map = { "ds1": "datastore_id_1", "ds2": "datastore_id_2", "ds3": "datastore_id_3" } ds_with_image = ["datastore_id_1", "datastore_id_2"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image, ds_name_id_map=ds_name_id_map).build() # Disable freespace based admission control. manager.FREESPACE_THRESHOLD = 0 image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) constraint1 = ResourceConstraint(ResourceConstraintType.DATASTORE, ["datastore_id_1"]) constraint2 = ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) disk1 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=[constraint1]) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=[constraint2]) disks = [disk1, disk2] score, placement_list = manager.place(None, disks) # Image disks doesn't count in utilization score assert_that(score, is_(AgentPlacementScore(100, 100))) base_index = 0 assert_that(placement_list[base_index].resource_id, is_(disk1.id)) assert_that(placement_list[base_index + 1].resource_id, is_(disk2.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_1")) assert_that(placement_list[base_index + 1].container_id, is_("datastore_id_2"))
def total_datastore_info(self): total = DatastoreInfo(0, 0) for ds_id in self.datastore_manager.get_datastore_ids(): ds_info = self.system.datastore_info(ds_id) total.total += ds_info.total total.used += ds_info.used return total
def test_place_large_disks_image_datastore(self): ds_map = {"datastore_id_1": (DatastoreInfo(3 * 1024, 0), set([]))} ds_with_image = ["datastore_id_1", "datastore_id_2"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() # Disable freespace based admission control. manager.FREESPACE_THRESHOLD = 0 disk1 = Disk(new_id(), DISK_FLAVOR, False, True, 1024) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, 1024) disk_list = [disk1, disk2] used_storage = sum(disk.capacity_gb for disk in disk_list) vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, disk_list) disks = None score, placement_list = manager.place(vm, disks) expected_score = 100 * (total_storage - used_storage) / total_storage assert_that(score, is_(AgentPlacementScore(expected_score, 100))) base_index = 0 assert_that(placement_list[base_index].resource_id, is_(vm.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_1")) base_index += 1 assert_that(placement_list[base_index].resource_id, is_(disk1.id)) assert_that(placement_list[base_index + 1].resource_id, is_(disk2.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_1")) assert_that(placement_list[base_index + 1].container_id, is_("datastore_id_1"))
def test_constraint_place_engine_constraint_violated(self): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = {image_datastore: DatastoreInfo(5, 0)} ds_mgr = self.create_datastore_manager(ds_map, image_datastores) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place disks = [ Disk(disk_id="ds1", capacity_gb=1, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) ]), ] place_result = engine.place(DisksPlacement(disks, selector)) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NO_SUCH_RESOURCE))
def test_place_vm_fail_disk_too_large(self): ds_map = { "datastore_id_1": (DatastoreInfo(1 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(2 * 1024, 0), set([])), "datastore_id_3": (DatastoreInfo(3 * 1024, 0), set([])) } ds_with_image = ["datastore_id_1", "datastore_id_2"] manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() disk1 = Disk(new_id(), DISK_FLAVOR, False, True, 2 * 1024) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, 2 * 1024) disk3 = Disk(new_id(), DISK_FLAVOR, False, True, 2 * 1024) disk4 = Disk(new_id(), DISK_FLAVOR, False, True, 2 * 1024) disk_list = [disk1, disk2, disk3, disk4] vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, disk_list) disks = None manager.place(vm, disks)
def test_place_storage_constraint(self): disk = Disk(new_id(), DISK_FLAVOR, True, True, 1024) vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STARTED, None) vm.disks = [disk] ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 7 * 1024), set([])) } manager = PMBuilder(ds_map=ds_map).build() manager.place(vm, None)
def datastore_info(self, datastore_id): if datastore_id not in self.datastores.keys(): self._logger.warning("Datastore (%s) not connected" % datastore_id) raise Exception capacity = self.datastores[datastore_id] used_storage = self._disk_manager.used_storage(datastore_id) self._logger.debug("used_storage: %d" % used_storage) return DatastoreInfo(capacity, used_storage)
def _fetch_datastore_info(self, datastore_id): # XXX: datastore_id is a misnomer, the parameter is expected # to be a datastore name self._logger.debug("Fetching fresh datastore info: %s" % datastore_id) ds = self._vim_client.get_datastore(datastore_id).summary total = float(ds.capacity) / (1024 ** 3) free = float(ds.freeSpace) / (1024 ** 3) result = DatastoreInfo(total, total - free) with self._lock: self._datastore_info_cache[datastore_id] = CacheEntry(result) del self._pending_datastore_updates[datastore_id] return result
def test_place_vm_in_no_image_datastore(self): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(8 * 1024, 0), set([])), "datastore_id_3": (DatastoreInfo(16 * 1024, 0), set([])) } ds_with_image = ["datastore_id_1", "datastore_id_2"] manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image, image_size=100 * 1024 * 1024).build() total_storage = sum(t[0].total for t in ds_map.values()) image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) # disk1 and disk2 both take 1/100 disk1 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100) vm = Vm(new_id(), self._vm_flavor_mb(1), VmPowerState.STOPPED, None, None, [disk1, disk2]) score, placement_list = manager.place(vm, None) # disk1 and disk2 both take 1% space, so utilization score is 98 # image takes 100MB, which is 10% of 1GB, so transfer score is 90 assert_that(score, is_(AgentPlacementScore(98, 90))) assert_that(placement_list, has_length(3)) for placement in placement_list: assert_that(placement.container_id, is_("datastore_id_3")) # no other datastores except only image datastore available, # and use_image_datastore_for_vms:False, thus should fail. ds_map = {"image_datastore": (DatastoreInfo(8 * 1024, 0), set())} manager = PMBuilder(ds_map=ds_map, im_ds_for_vm=False).build() # vm with disks self.assertRaises(NoSuchResourceException, manager.place, vm, None) # vm without disks vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED) self.assertRaises(NoSuchResourceException, manager.place, vm, None)
def test_optimal_place_engine(self, result, ratio, disk_sizes, use_image_ds): # Create optimal place engine image_datastore = "datastore_id_1" image_datastores = [{ "name": image_datastore, "used_for_vms": use_image_ds }] option = PlacementOption(1, 1, image_datastores) ds_map = { "datastore_id_1": DatastoreInfo(1 * 1024, 0), "datastore_id_2": DatastoreInfo(2 * 1024, 0), "datastore_id_3": DatastoreInfo(3 * 1024, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = OptimalPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) disks_placement = DisksPlacement(self.create_disks(disk_sizes), selector) # Verify place result place_result = engine.place(disks_placement) assert_that(place_result.result, equal_to(result)) assert_that(place_result.disks_placement.selector.ratio(), equal_to(ratio)) # Verify placements if disk_sizes and place_result.result == PlaceResultCode.OK: placement_list = place_result.disks_placement.placement_list assert_that(placement_list, has_length(1)) assert_that(placement_list[0].type, equal_to(AgentResourcePlacement.DISK)) assert_that(placement_list[0].container_id, equal_to("datastore_id_3"))
def test_best_effort_place_engine(self, result, ratio, disk_sizes, places, use_image_ds): # Create best effort place engine image_datastore = "ds1" image_datastores = [{ "name": image_datastore, "used_for_vms": use_image_ds }] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0), "ds3": DatastoreInfo(30, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = BestEffortPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) disks_placement = DisksPlacement(self.create_disks(disk_sizes), selector) # Try place place_result = engine.place(disks_placement) # Verify place result assert_that(place_result.result, equal_to(result)) # Verify placements if disk_sizes and place_result.result == PlaceResultCode.OK: assert_that(place_result.disks_placement.selector.ratio(), equal_to(ratio)) for i, place in enumerate(places): placement_list = place_result.disks_placement.placement_list assert_that(placement_list[0].type, equal_to(AgentResourcePlacement.DISK)) assert_that(placement_list[i].container_id, equal_to(place))
def test_place_disks_with_threshold(self, threshold, expected): ds_map = {"datastore_id_1": (DatastoreInfo(1 * 1024, 0), set([]))} manager = PMBuilder(ds_map=ds_map).build() manager.FREESPACE_THRESHOLD = threshold disk1 = Disk(new_id(), DISK_FLAVOR, False, True, 512, None) disk_list = [disk1] vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, disk_list) disks = None score, placement_list = manager.place(vm, disks) # optimal placement cannot be achieved, # divide the expected score by NOT_OPTIMAL_DIVIDE_FACTOR assert_that(score, is_(AgentPlacementScore(expected, 100)))
def test_constraint_place_engine_cannot_fit(self): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = {image_datastore: DatastoreInfo(5, 0)} ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place constraint = ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]) disk = Disk(disk_id="ds1", capacity_gb=6) place_result = engine.place(DisksPlacement([disk], selector), [constraint]) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY))
def test_place_image_disk_not_included(self): ds_map = {"datastore_id_1": (DatastoreInfo(1 * 1024, 0), set([]))} manager = PMBuilder(ds_map=ds_map).build() image = DiskImage("disk_image1", DiskImage.COPY_ON_WRITE) # Place a image disk that is linked cloned from image. The size # should not be included in calculation. disk = Disk(new_id(), DISK_FLAVOR, False, True, 2048, image) vm = Vm(new_id(), VM_FLAVOR, State.STOPPED, None, None, [disk]) score, placement_list = manager.place(vm, None) assert_that(score.utilization, equal_to(97)) assert_that(score.transfer, equal_to(100)) assert_that(placement_list, has_length(2)) assert_that(placement_list[0].container_id, equal_to("datastore_id_1")) assert_that(placement_list[0].resource_id, equal_to(vm.id)) assert_that(placement_list[1].container_id, equal_to("datastore_id_1")) assert_that(placement_list[1].resource_id, equal_to(disk.id)) # Try the same place with constraints constraint = ResourceConstraint(ResourceConstraintType.DATASTORE, ["datastore_id_1"]) disk = Disk(new_id(), DISK_FLAVOR, False, True, 2048, image, constraints=[constraint]) vm = Vm(new_id(), VM_FLAVOR, State.STOPPED, None, None, [disk]) score, placement_list = manager.place(vm, None) assert_that(score.utilization, equal_to(97)) assert_that(score.transfer, equal_to(100)) assert_that(placement_list, has_length(2)) assert_that(placement_list[0].container_id, equal_to("datastore_id_1")) assert_that(placement_list[0].resource_id, equal_to(vm.id)) assert_that(placement_list[1].container_id, equal_to("datastore_id_1")) assert_that(placement_list[1].resource_id, equal_to(disk.id))
class PMBuilder(object): DEFAULT_DS_MAP = {"datastore_id_1": (DatastoreInfo(8 * 1024, 0), set([]))} def __init__(self, total_mem=64 * 1024, image_id='image_id', image_ds=['image_datastore'], mem_overcommit=1.0, ds_map=None, ds_with_image=None, cpu_overcommit=None, im_ds_for_vm=False, image_size=100 * 1024 * 1024, ds_name_id_map=None, vm_networks=[], host_version="version1"): self._logger = logging.getLogger(__name__) self.total_mem = total_mem self.host_version = host_version self.image_id = image_id self.image_ds = image_ds self.mem_overcommit = mem_overcommit self.cpu_overcommit = cpu_overcommit if not self.cpu_overcommit: # Large number to prevent tests that do not specify overcommit, # from failing. self.cpu_overcommit = 100.0 self.ds_map = ds_map or self.DEFAULT_DS_MAP if ds_with_image is None: self.ds_with_image = self.ds_map.keys() else: self.ds_with_image = ds_with_image self.image_size = image_size self._ds_name_id_map = ds_name_id_map self.vm_networks = vm_networks self.image_datastores = [{ "name": ds, "used_for_vms": im_ds_for_vm } for ds in image_ds] def normalize(self, ds_name_or_id): # if test does not set ds_name_id_map, simply return name as id if not self._ds_name_id_map: return ds_name_or_id if ds_name_or_id in self._ds_name_id_map: return self._ds_name_id_map[ds_name_or_id] if ds_name_or_id in self._ds_name_id_map.values(): return ds_name_or_id raise DatastoreNotFoundException("%s not found" % ds_name_or_id) def build(self): hypervisor = MagicMock() hypervisor.datastore_manager = MagicMock() hypervisor.datastore_manager.vm_datastores.return_value = \ [ds for ds in self.ds_map.keys() if ds not in self.image_ds] hypervisor.datastore_manager.get_datastore_ids.return_value = \ self.ds_map.keys() hypervisor.datastore_manager.datastore_info = self.datastore_info hypervisor.datastore_manager.normalize.side_effect = self.normalize hypervisor.datastore_manager.get_datastores.return_value = \ [Datastore(id=ds_id, tags=self.ds_map[ds_id][1]) for ds_id in self.ds_map.keys()] hypervisor.network_manager.get_vm_networks.return_value = \ self.vm_networks hypervisor.system = MagicMock() hypervisor.system.total_vmusable_memory_mb.return_value = \ self.total_mem hypervisor.system.host_version.return_value = \ self.host_version hypervisor.system.num_physical_cpus.return_value = 1 hypervisor.image_manager = MagicMock() hypervisor.image_manager.get_image_id_from_disks.return_value = \ self.image_id hypervisor.image_manager.check_image = self.check_image hypervisor.image_manager.image_size.return_value = self.image_size hypervisor.vm_manager = MagicMock() hypervisor.vm_manager.get_used_memory_mb.return_value = 0 placement_option = PlacementOption(self.mem_overcommit, self.cpu_overcommit, self.image_datastores) return PlacementManager(hypervisor, placement_option) def datastore_info(self, datastore_id): if datastore_id not in self.ds_map.keys(): self._logger.warning("Datastore (%s) not connected" % datastore_id) raise Exception return self.ds_map[datastore_id][0] def check_image(self, image_id, datastore_id): if datastore_id in self.ds_with_image: return True else: return False
def test_get_inactive_images(self): handler = HostHandler(MagicMock()) image_monitor = MagicMock() image_monitor.get_image_scanner = MagicMock() handler._hypervisor.image_monitor = image_monitor image_scanner = MagicMock() # Mock datastore manager and datastore_info() datastore_manager = MagicMock() datastore_manager.datastore_info = MagicMock() datastore_manager.\ datastore_info.return_value = DatastoreInfo(10.2, 6.1) handler._hypervisor.datastore_manager = datastore_manager # Mock image manager and get_timestamp_mod_time_from_dir() image_manager = MagicMock() image_manager.get_timestamp_mod_time_from_dir = MagicMock() image_manager.get_timestamp_mod_time_from_dir.side_effect = \ self._local_get_mod_time handler._hypervisor.image_manager = image_manager # Setup request request = GetInactiveImagesRequest() request.datastore_id = "DS_ID_1" # Test success image_scanner.get_state = MagicMock() image_scanner.\ get_state.return_value = DatastoreImageScanner.State.IDLE image_scanner.get_unused_images = MagicMock() image_scanner.\ get_unused_images.side_effect = self._local_get_unused_images image_monitor.get_image_scanner.return_value = image_scanner response = handler.get_inactive_images(request) assert_that(response.result is GetMonitoredImagesResultCode.OK) assert_that(response.totalMB == 10L) assert_that(response.usedMB == 6L) image_descriptors = response.image_descs assert_that(image_descriptors[0].image_id is "Image_Id_1") assert_that(image_descriptors[0].timestamp == 10001) assert_that(image_descriptors[1].image_id is "Image_Id_2") assert_that(image_descriptors[1].timestamp == 10002) assert_that(len(image_descriptors) is 2) # Test exception from get_timestamp_mod_time_from_dir image_manager.get_timestamp_mod_time_from_dir.side_effect = \ OSError response = handler.get_inactive_images(request) assert_that(response.result is GetMonitoredImagesResultCode.OK) assert_that(response.totalMB == 10L) assert_that(response.usedMB == 6L) image_descriptors = response.image_descs assert_that(image_descriptors[0].image_id is "Image_Id_1") assert_that(image_descriptors[0].timestamp == 0) assert_that(image_descriptors[1].image_id is "Image_Id_2") assert_that(image_descriptors[1].timestamp == 0) assert_that(len(image_descriptors) is 2) # Test operation in progress image_scanner.\ get_state.return_value = DatastoreImageSweeper.State.IMAGE_SWEEP response = handler.get_inactive_images(request) assert_that(response.result is GetMonitoredImagesResultCode.OPERATION_IN_PROGRESS) # Test invalid datastore image_monitor.\ get_image_scanner.side_effect = DatastoreNotFoundException response = handler.get_inactive_images(request) assert_that( response.result is GetMonitoredImagesResultCode.DATASTORE_NOT_FOUND )