def place(self, disks_placement): disks_placement = copy.deepcopy(disks_placement) selector = disks_placement.selector optimal_datastore = selector.get_datastore() disks = sorted(disks_placement.disks, key=lambda d: self.disk_util.disk_capacity(d), reverse=True) for disk in disks: disk_capacity_gb = self.disk_util.disk_capacity(disk) if selector.free_space(optimal_datastore) < disk_capacity_gb: optimal_datastore = selector.get_datastore() if selector.free_space(optimal_datastore) < disk_capacity_gb: return DiskPlaceResult( result=PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY, disks_placement=disks_placement) disks_placement.selector.consume_datastore_space(optimal_datastore, disk_capacity_gb) disks_placement.placement_list.append( AgentResourcePlacement(AgentResourcePlacement.DISK, disk.id, optimal_datastore)) return DiskPlaceResult(result=PlaceResultCode.OK, disks_placement=disks_placement)
def place(self, disks_placement): disks_total_size = self.disk_util.disks_capacity_gb( disks_placement.disks) # Get the optimal datastore optimal_datastore = disks_placement.selector.get_datastore() # Fail if the optimal datastore cannot fit all the remaining disks if disks_total_size > \ disks_placement.selector.free_space(optimal_datastore): return DiskPlaceResult( result=PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY, disks_placement=disks_placement) # Find the optimal placement. Prepare the return value disks_placement.selector.consume_datastore_space(optimal_datastore, disks_total_size) for disk in disks_placement.disks: disks_placement.placement_list.append( AgentResourcePlacement(AgentResourcePlacement.DISK, disk.id, optimal_datastore) ) return DiskPlaceResult(result=PlaceResultCode.OK, disks_placement=disks_placement)
def local_consume_disk_reservation(reservation_id): assert_that(reservation_id is "reservation_id_1") host_disk = HostDisk() host_disk.id = "disk_id_1" host_disk.flavor = HostFlavor("disk_flavor_1") host_disk.persistent = True host_disk.new_disk = True host_disk.capacity_gb = 2 if placement: host_disk.placement = AgentResourcePlacement( AgentResourcePlacement.DISK, host_disk.id, placement) return [host_disk]
def _pick_available_resources(self, vm): """ Pick host's resources that match vm's resource constraints. :param vm: Vm :rtype: Placement List :raise: NoSuchResourceException """ placement_list = [] # supports network and virtual_network constraints. mapping = { ResourceConstraintType.NETWORK: AgentResourcePlacement.NETWORK, ResourceConstraintType.VIRTUAL_NETWORK: AgentResourcePlacement.VIRTUAL_NETWORK } if vm.resource_constraints: # resource type to extract from VM's resource_constraint for matching extract_resources_type = set([ResourceConstraintType.NETWORK]) # host available resources. host_available_resources = { ResourceConstraintType.NETWORK: set(self._hypervisor.network_manager.get_vm_networks()) } constraints = self._extract_resource_constraints( vm.resource_constraints, extract_resources_type) matched_resources = self._collect_matched_resource( constraints, host_available_resources) for resource_type, values in matched_resources.iteritems(): placement_list.extend([ AgentResourcePlacement(mapping[resource_type], vm.id, value) for value in values ]) # resource type to extract from VM's resource_constraint for copying virtual_network_resource_placement_list = self._pick_resources( vm, ResourceConstraintType.VIRTUAL_NETWORK, AgentResourcePlacement.VIRTUAL_NETWORK) placement_list.extend(virtual_network_resource_placement_list) return placement_list
def place(self, disks_placement): disks_placement = copy.deepcopy(disks_placement) unplaced_disks = [] for disk in disks_placement.disks: if not disk.constraints: unplaced_disks.append(disk) continue try: datastore_id = self.get_datastore_constraint(disk) except DatastoreNotFoundException: self._logger.warning( "Data store constraint failed: %s" % disk) return DiskPlaceResult(PlaceResultCode.NO_SUCH_RESOURCE) except ConflictedConstraintException: self._logger.info("Conflicted constraints", exc_info=True) return DiskPlaceResult(PlaceResultCode.NO_SUCH_RESOURCE) # if the datastore_id is not visible by this host throw an # exception if datastore_id not in self.placeable_datastores(): self._logger.warning( "Data store constraint failed: %s" % datastore_id) return DiskPlaceResult(PlaceResultCode.NO_SUCH_RESOURCE) disk_capacity_gb = self.disk_util.disk_capacity(disk) try: disks_placement.selector.consume_datastore_space( datastore_id, disk_capacity_gb) except NotEnoughSpaceException: return DiskPlaceResult( PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY) # Append in placement list disks_placement.placement_list.append( AgentResourcePlacement(AgentResourcePlacement.DISK, disk.id, datastore_id)) disks_placement.disks = unplaced_disks return DiskPlaceResult(result=PlaceResultCode.OK, disks_placement=disks_placement)
def _compute_vm_utilization_score(self, vm): memory_score = self._compute_memory_score(vm) cpu_score = self._compute_cpu_score(vm) storage_score, placement_list = self._compute_storage_score( vm.disks, vm.resource_constraints) # If vm is not None, need to put vm placement in placement list if vm: if placement_list: vm_datastore = placement_list[0].container_id else: vm_datastore = self._optimal_datastore() vm_placement = AgentResourcePlacement(AgentResourcePlacement.VM, vm.id, vm_datastore) placement_list = [vm_placement] + placement_list self._logger.debug("Scores: memory: %d, cpu: %d, storage: %d" % (memory_score, cpu_score, storage_score)) return min(memory_score, cpu_score, storage_score), placement_list
def _pick_resources(self, vm, constraintType, placementType): """ Pick vm's specific resource constraints. :param vm: Vm :param constraintType: ResourceConstraintType :param placementType: AgentResourcePlacement :rtype: Placement List """ placement_list = [] constraints = self._extract_resource_constraints( vm.resource_constraints, set([constraintType])) if len(constraints) > 0: constraint_list = constraints[constraintType] for constraint in constraint_list: placement_list.extend([ AgentResourcePlacement(placementType, vm.id, value) for value in constraint.values ]) return placement_list
def test_create_vm_on_correct_resource(self): """Check that we create the vm on the correct datastore""" vm = MagicMock() vm.id = str(uuid.uuid4()) vm.networks = ["net_1", "net_2"] vm.project_id = "p1" vm.tenant_id = "t1" mock_env = MagicMock() mock_reservation = MagicMock() mock_net_spec = MagicMock() req = CreateVmRequest(reservation=mock_reservation, environment=mock_env, network_connection_spec=mock_net_spec) image_id = stable_uuid('image_id') handler = HostHandler(MagicMock()) pm = handler.hypervisor.placement_manager pm.consume_vm_reservation.return_value = vm handler._datastores_for_image = MagicMock() handler.hypervisor.datastore_manager.datastore_type.\ return_value = DatastoreType.EXT3 handler.hypervisor.datastore_manager.image_datastores = MagicMock( return_value=set("ds2")) im = handler.hypervisor.image_manager im.get_image_refcount_filename.return_value = \ os.path.join(self.agent_conf_dir, vm.id) im.get_image_id_from_disks.return_value = image_id # No placement descriptor vm.placement = None response = handler.create_vm(req) pm.remove_vm_reservation.assert_called_once_with(mock_reservation) assert_that(response.result, equal_to(CreateVmResultCode.PLACEMENT_NOT_FOUND)) # If vm reservation has placement datastore info, it should # be placed there handler.hypervisor.vm_manager.create_vm_spec.reset_mock() pm.remove_vm_reservation.reset_mock() vm.placement = AgentResourcePlacement(AgentResourcePlacement.VM, "vm_ids", "ds2") response = handler.create_vm(req) spec = handler.hypervisor.vm_manager.create_vm_spec.return_value metadata = handler.hypervisor.image_manager.image_metadata.return_value handler.hypervisor.vm_manager.create_vm_spec.assert_called_once_with( vm.id, "ds2", vm.flavor, metadata, mock_env, image_id=image_id) handler.hypervisor.vm_manager.create_vm.assert_called_once_with( vm.id, spec) handler.hypervisor.vm_manager.set_vminfo.assert_called_once_with( spec, { handler.VMINFO_PROJECT_KEY: 'p1', handler.VMINFO_TENANT_KEY: 't1' }) pm.remove_vm_reservation.assert_called_once_with(mock_reservation) assert_that(response.result, equal_to(CreateVmResultCode.OK)) # Test create_vm honors vm.networks information # Host has the provisioned networks required by placement_list, # should succeed. handler.hypervisor.network_manager.get_vm_networks.return_value = \ ["net_2", "net_1"] add_nic_mock = MagicMock() handler.hypervisor.vm_manager.add_nic = add_nic_mock handler.hypervisor.vm_manager.create_vm_spec.reset_mock() pm.remove_vm_reservation.reset_mock() spec = handler.hypervisor.vm_manager.create_vm_spec.return_value req = CreateVmRequest(reservation=mock_reservation) response = handler.create_vm(req) called_networks = add_nic_mock.call_args_list expected_networks = [call(spec, 'net_1'), call(spec, 'net_2')] assert_that(called_networks == expected_networks, is_(True)) pm.remove_vm_reservation.assert_called_once_with(mock_reservation) assert_that(response.result, equal_to(CreateVmResultCode.OK)) # Host does not have the provisioned networks # required by placement_list, should fail. handler.hypervisor.network_manager.get_vm_networks.return_value = \ ["net_1", "net_7"] handler.hypervisor.vm_manager.add_nic.reset_mock() pm.remove_vm_reservation.reset_mock() req = CreateVmRequest(reservation=mock_reservation) response = handler.create_vm(req) pm.remove_vm_reservation.assert_called_once_with(mock_reservation) assert_that(response.result, equal_to(CreateVmResultCode.NETWORK_NOT_FOUND))