def test_constraint_place_engine_conflicting_constraints(self): """ constraint place engine should fail if multiple constraints conflict """ # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastores) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place constraints = [ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]), ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) ] disk = Disk(disk_id="ds1", capacity_gb=1) place_result = engine.place(DisksPlacement([disk], selector), constraints) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NO_SUCH_RESOURCE))
def __init__(self, agent_config): self.logger = logging.getLogger(__name__) # If VimClient's housekeeping thread failed to update its own cache, # call errback to commit suicide. Watchdog will bring up the agent # again. self.host_client = Hypervisor.create_host_client( errback=lambda: suicide()) self.host_client.connect_local() atexit.register(lambda client: client.disconnect(), self.host_client) self.datastore_manager = DatastoreManager( self, agent_config.datastores, agent_config.image_datastores) # datastore manager needs to update the cache when there is a change. self.host_client.add_update_listener(self.datastore_manager) self.vm_manager = VmManager(self.host_client, self.datastore_manager) self.disk_manager = DiskManager(self.host_client, self.datastore_manager) self.image_manager = ImageManager(self.host_client, self.datastore_manager) self.network_manager = NetworkManager(self.host_client) self.system = System(self.host_client) options = PlacementOption(agent_config.memory_overcommit, agent_config.cpu_overcommit, agent_config.image_datastores) self.placement_manager = PlacementManager(self, options) self.image_monitor = ImageMonitor(self.datastore_manager, self.image_manager, self.vm_manager) self.image_manager.monitor_for_cleanup() self.image_transferer = NfcImageTransferer(self.host_client) atexit.register(self.image_manager.cleanup)
def test_constraint_place_engine_no_constraints(self): """ constraint place engine should not handle placement with no constraints """ # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0), "ds3": DatastoreInfo(30, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place disks = [ Disk(disk_id="disk1", capacity_gb=1), Disk(disk_id="disk2", capacity_gb=1), Disk(capacity_gb=1), ] disks_placement = DisksPlacement(disks, selector) place_result = engine.place(disks_placement, []) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.OK)) # Verify unplaced list disks_placement = place_result.disks_placement assert_that(disks_placement.placement_list, has_length(0)) assert_that(disks_placement.disks, has_length(3))
def test_constraint_place_engine_cannot_fit(self): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = {image_datastore: DatastoreInfo(5, 0)} ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place constraint = ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]) disk = Disk(disk_id="ds1", capacity_gb=6) place_result = engine.place(DisksPlacement([disk], selector), [constraint]) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY))
def test_constraint_place_engine(self, ds_constraint): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0), "ds3": DatastoreInfo(30, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place constraint = ResourceConstraint(ResourceConstraintType.DATASTORE, [ds_constraint]) disks = [ Disk(disk_id="disk1", capacity_gb=1), Disk(disk_id="disk2", capacity_gb=1), Disk(capacity_gb=1), ] disks_placement = DisksPlacement(disks, selector) place_result = engine.place(disks_placement, [constraint]) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.OK)) # Verify placement list and unplaced list disks_placement = place_result.disks_placement assert_that(disks_placement.placement_list, has_length(3)) assert_that(disks_placement.disks, has_length(0)) assert_that([ d.resource_id for d in disks_placement.placement_list if d.resource_id ], contains_inanyorder("disk1", "disk2")) for placement in disks_placement.placement_list: assert_that(placement.type, equal_to(AgentResourcePlacement.DISK)) assert_that(placement.container_id, equal_to(constraint.values[0]))
def build(self): hypervisor = MagicMock() hypervisor.datastore_manager = MagicMock() hypervisor.datastore_manager.vm_datastores.return_value = \ [ds for ds in self.ds_map.keys() if ds not in self.image_ds] hypervisor.datastore_manager.get_datastore_ids.return_value = \ self.ds_map.keys() hypervisor.datastore_manager.datastore_info = self.datastore_info hypervisor.datastore_manager.normalize.side_effect = self.normalize hypervisor.datastore_manager.get_datastores.return_value = \ [Datastore(id=ds_id, tags=self.ds_map[ds_id][1]) for ds_id in self.ds_map.keys()] hypervisor.network_manager.get_vm_networks.return_value = \ self.vm_networks hypervisor.system = MagicMock() hypervisor.system.total_vmusable_memory_mb.return_value = \ self.total_mem hypervisor.system.host_version.return_value = \ self.host_version hypervisor.system.num_physical_cpus.return_value = 1 hypervisor.image_manager = MagicMock() hypervisor.image_manager.get_image_id_from_disks.return_value = \ self.image_id hypervisor.image_manager.check_image = self.check_image hypervisor.image_manager.image_size.return_value = self.image_size hypervisor.vm_manager = MagicMock() hypervisor.vm_manager.get_used_memory_mb.return_value = 0 placement_option = PlacementOption(self.mem_overcommit, self.cpu_overcommit, self.image_datastores) return PlacementManager(hypervisor, placement_option)
def test_optimal_place_engine(self, result, ratio, disk_sizes, use_image_ds): # Create optimal place engine image_datastore = "datastore_id_1" image_datastores = [{ "name": image_datastore, "used_for_vms": use_image_ds }] option = PlacementOption(1, 1, image_datastores) ds_map = { "datastore_id_1": DatastoreInfo(1 * 1024, 0), "datastore_id_2": DatastoreInfo(2 * 1024, 0), "datastore_id_3": DatastoreInfo(3 * 1024, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = OptimalPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) disks_placement = DisksPlacement(self.create_disks(disk_sizes), selector) # Verify place result place_result = engine.place(disks_placement, []) assert_that(place_result.result, equal_to(result)) assert_that(place_result.disks_placement.selector.ratio(), equal_to(ratio)) # Verify placements if disk_sizes and place_result.result == PlaceResultCode.OK: placement_list = place_result.disks_placement.placement_list assert_that(placement_list, has_length(1)) assert_that(placement_list[0].type, equal_to(AgentResourcePlacement.DISK)) assert_that(placement_list[0].container_id, equal_to("datastore_id_3"))
def test_best_effort_place_engine(self, result, ratio, disk_sizes, places, use_image_ds): # Create best effort place engine image_datastore = "ds1" image_datastores = [{ "name": image_datastore, "used_for_vms": use_image_ds }] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0), "ds3": DatastoreInfo(30, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = BestEffortPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) disks_placement = DisksPlacement(self.create_disks(disk_sizes), selector) # Try place place_result = engine.place(disks_placement, []) # Verify place result assert_that(place_result.result, equal_to(result)) # Verify placements if disk_sizes and place_result.result == PlaceResultCode.OK: assert_that(place_result.disks_placement.selector.ratio(), equal_to(ratio)) for i, place in enumerate(places): placement_list = place_result.disks_placement.placement_list assert_that(placement_list[0].type, equal_to(AgentResourcePlacement.DISK)) assert_that(placement_list[i].container_id, equal_to(place))