def resource_request(self, disks=None, vm_disks=None): assert(disks is None or vm_disks is None) if disks is not None: return Resource(None, disks) resource = Resource(self._vm, None) resource.vm.disks = vm_disks return resource
def resource_request(self, disk=None, vm_disks=None, vm_constraints=None): assert(disk is None or vm_disks is None) if disk is not None: return Resource(None, [disk]) if vm_constraints is None: vm_constraints = [] vm_constraints.append(ResourceConstraint(ResourceConstraintType.NETWORK, ["VM Network"])) resource = Resource(self._vm, None) resource.vm.disks = vm_disks resource.vm.resource_constraints = vm_constraints return resource
def test_place(self): handler = HostHandler(MagicMock()) score = Score(100, 100) place_list = [MagicMock()] address = ServerAddress(host="localhost", port=1234) request = PlaceRequest(resource=Resource(self._sample_vm(), [])) handler.hypervisor.placement_manager.place.return_value = (score, place_list) response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.OK)) assert_that(response.score, is_(score)) assert_that(response.placementList.placements, is_([item.to_thrift() for item in place_list])) assert_that(response.address, is_(address)) common.services.get(ServiceName.MODE).set_mode( MODE.ENTERING_MAINTENANCE) response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.INVALID_STATE)) common.services.get(ServiceName.MODE).set_mode(MODE.MAINTENANCE) response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.INVALID_STATE))
def create_resource(self, id): flavor = Flavor(name="flavor", cost=[QuotaLineItem("a", "b", 1)]) disks = [ Disk(stable_uuid("%s-1" % id), flavor, False, False, 1, datastore=Datastore("ds1")), Disk(stable_uuid("%s-2" % id), flavor, False, False, 1, datastore=Datastore("ds2")), ] vm = Vm() vm.id = id vm.flavor = "flavor" vm.flavor_info = flavor vm.state = State().STARTED vm.datastore = Datastore("ds1") resource = Resource(vm, disks) return resource
def test_place_resource_constraint(self): handler = HostHandler(MagicMock()) request = PlaceRequest(resource=Resource(self._sample_vm(), [])) handler.hypervisor.placement_manager.place.side_effect = \ NoSuchResourceException("DATASTORE", "Datastore not available.") response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.NO_SUCH_RESOURCE))
def _place_request_with_params(self, ratio, min_fanout, max_fanout): place_params = PlaceParams() place_params.fanoutRatio = ratio place_params.minFanoutCount = min_fanout place_params.maxFanoutCount = max_fanout place_request = PlaceRequest(resource=Resource(Vm(), [Disk()]), leafSchedulerParams=place_params) return place_request
def test_place_system_error(self): scheduler = MagicMock() scheduler.place.side_effect = ValueError handler = SchedulerHandler() handler._schedulers["foo"] = scheduler response = handler.place(PlaceRequest(Resource(), "foo")) assert_that(response.result, is_(PlaceResultCode.SYSTEM_ERROR))
def _loop(): while True: actual_response = handler.place(PlaceRequest( Resource(), "foo")) assert_that(actual_response.result, is_not(PlaceResultCode.SYSTEM_ERROR)) if done[0]: results[threading.current_thread().name] = True break
def test_place_response(self): response = PlaceResponse(PlaceResultCode.OK) scheduler = MagicMock() scheduler.place.return_value = response handler = SchedulerHandler() handler._schedulers["foo"] = scheduler actual_response = handler.place(PlaceRequest(Resource(), "foo")) assert_that(actual_response, is_(same_instance(response)))
def __init__(self, requests_file): self.requests = [] content = open(Universe.get_path(requests_file), 'r').read() requests = yaml.load(content) request_id = 1 disk_id = 1 if 'auto' in requests: requests = self.generate_requests(requests['auto']) for request in requests: place_request = PlaceRequest() resource = Resource() resource.disks = [] env_info = {} if 'vm' in request: resource.vm = Vm() # Make the vm id look like a uuid by zero-padding. Otherwise # reference counting doesn't work. resource.vm.id = "{0:032d}".format(request_id) resource.vm.state = State.STARTED flavor = Universe.vm_flavors[request['vm']['flavor']] resource.vm.flavor = flavor.name resource.vm.flavor_info = flavor.to_thrift() resource.vm.disks = [] if 'constraints' in request: constraints = [] for c in request['constraints']: constraint = ResourceConstraint() constraint.type = RCT._NAMES_TO_VALUES[c['type']] constraint.values = c['values'] if 'negative' in c: constraint.negative = c['negative'] else: constraint.negative = False constraints.append(constraint) if constraints: resource.vm.resource_constraints = constraints if 'load' in request['vm']: env_info['mem_load'] = request['vm']['load']['mem'] if 'disks' in request: for d in request['disks']: disk = Disk() flavor = Universe.ephemeral_disk_flavors[d['flavor']] disk.flavor = flavor.name disk.flavor_info = flavor.to_thrift() disk.id = str(disk_id) disk.persistent = False disk.new_disk = True disk.capacity_gb = 1024 # hard coded in FakeVmManager disk_id += 1 resource.vm.disks.append(disk) place_request.resource = resource tracing_info = TracingInfo() tracing_info.request_id = request_id place_request.tracing_info = tracing_info request_id += 1 self.requests.append(PsimVmRequest(place_request, env_info))
def _place_request(self): return PlaceRequest(resource=Resource(Vm(), [Disk()]))
def test_place_missing_scheduler(self): handler = SchedulerHandler() response = handler.place(PlaceRequest(Resource())) assert_that(response.result, is_(PlaceResultCode.INVALID_SCHEDULER))
def __init__(self, requests_file): self.requests = [] content = open(Universe.get_path(requests_file), 'r').read() requests = yaml.load(content) request_id = 1 disk_id = 1 if 'auto' in requests: requests = self.generate_requests(requests['auto']) for request in requests: place_request = PlaceRequest() resource = Resource() resource.disks = [] env_info = {} if 'vm' in request: resource.vm = Vm() # Make the vm id look like a uuid by zero-padding. Otherwise # reference counting doesn't work. resource.vm.id = "{0:032d}".format(request_id) resource.vm.state = State.STARTED flavor = Universe.vm_flavors[request['vm']['flavor']] resource.vm.flavor = flavor.name resource.vm.flavor_info = flavor.to_thrift() resource.vm.disks = [] if 'constraints' in request: constraints = [] for c in request['constraints']: constraint = ResourceConstraint() constraint.type = RCT._NAMES_TO_VALUES[c['type']] constraint.values = c['values'] if 'negative' in c: constraint.negative = c['negative'] else: constraint.negative = False constraints.append(constraint) if constraints: resource.vm.resource_constraints = constraints if 'load' in request['vm']: env_info['mem_load'] = request['vm']['load']['mem'] if 'disks' in request: for d in request['disks']: disk = Disk() flavor = Universe.ephemeral_disk_flavors[d['flavor']] disk.flavor = flavor.name disk.flavor_info = flavor.to_thrift() disk.id = str(disk_id) disk.persistent = False disk.new_disk = True disk.capacity_gb = 1024 # hard coded in FakeVmManager disk_id += 1 resource.vm.disks.append(disk) place_request.resource = resource tracing_info = TracingInfo() tracing_info.request_id = request_id place_request.tracing_info = tracing_info request_id += 1 self.requests.append(PsimVmRequest(place_request, env_info))
def test_reserve_disk(self, constraint_value, placement_id, expected): disk_id = "disk_id_1" disk_flavor = "disk_flavor_1" def reserve_disk_validate(vm, disks): assert_that(vm is None) assert isinstance(disks, list) assert_that(len(disks) is 1) disk = disks[0] assert isinstance(disk, HostDisk) assert_that(disk.id, equal_to(disk_id)) assert_that(disk.flavor.name, equal_to(disk_flavor)) reserve_constraints = disk.constraints if reserve_constraints: assert isinstance(reserve_constraints, list) assert_that(len(reserve_constraints) is 1) reserve_constraint = reserve_constraints[0] assert_that( reserve_constraint.type is ResourceConstraintType.DATASTORE ) assert_that(reserve_constraint.values, equal_to([ expected, ])) reserve_placement = disk.placement if reserve_placement: assert_that( reserve_placement.type is ResourcePlacementType.DISK) assert_that(reserve_placement.resource_id, equal_to(disk_id)) assert_that(reserve_placement.container_id, equal_to(expected)) return "reservation_id" handler = HostHandler(MagicMock()) mocked_reserve = MagicMock() mocked_reserve.side_effect = reserve_disk_validate handler.hypervisor.placement_manager = MagicMock() handler.hypervisor.placement_manager.reserve = mocked_reserve constraints = None placement_list = None if constraint_value: constraint = ResourceConstraint() constraint.values = [constraint_value] constraint.type = ResourceConstraintType.DATASTORE constraints = [constraint] if placement_id: placement = ResourcePlacement() placement.type = ResourcePlacementType.DISK placement.container_id = placement_id placement.resource_id = disk_id placement_list = ResourcePlacementList([placement]) flavor_info = Flavor(name=disk_flavor, cost=[QuotaLineItem("a", "b", 1)]) disk = Disk(id=disk_id, flavor=disk_flavor, persistent=True, new_disk=True, capacity_gb=2, flavor_info=flavor_info, resource_constraints=constraints) request = ReserveRequest() request.generation = 1 request.resource = Resource(vm=None, disks=[disk], placement_list=placement_list) response = handler.reserve(request) assert_that(response.result, equal_to(ReserveResultCode.OK))
def test_reserve_vm(self): disk_ids = ["disk_id_1", "disk_id_2", "disk_id_3"] datastore_ids = ["datastore_1", "datastore_2", "datastore_3"] disk_flavor = "disk_flavor_1" networks = ["net_1", "net_2"] vm_flavor = "vm_flavor_1" vm_id = "vm_id_1" def reserve_vm_validate(vm, disks): assert_that(vm) assert_that(not disks) assert_that(vm.id, equal_to(vm_id)) # Check VM placement vm_placement = vm.placement assert_that(vm_placement is not None) assert_that(vm_placement.type is ResourcePlacementType.VM) assert_that(vm_placement.resource_id, equal_to(vm_id)) assert_that(vm_placement.container_id, equal_to(datastore_ids[0])) # Check VM networks vm_networks = vm.networks assert_that(vm_networks is not None) assert_that(set(networks), equal_to(set(vm_networks))) disks = vm.disks assert_that(len(disks) is 3) disk_index = 0 for vm_disk in disks: assert_that(vm_disk.id, equal_to(disk_ids[disk_index])) assert_that(vm_disk.placement is not None) disk_placement = vm_disk.placement assert_that(disk_placement.type is ResourcePlacementType.DISK) assert_that(disk_placement.resource_id, equal_to(vm_disk.id)) assert_that(disk_placement.container_id, equal_to(datastore_ids[disk_index])) disk_index += 1 return "reservation_id" handler = HostHandler(MagicMock()) mocked_reserve = MagicMock() mocked_reserve.side_effect = reserve_vm_validate handler.hypervisor.placement_manager = MagicMock() handler.hypervisor.placement_manager.reserve = mocked_reserve placements = [] # Add VM placement info placement = ResourcePlacement() placement.type = ResourcePlacementType.VM placement.resource_id = vm_id placement.container_id = datastore_ids[0] placements.append(placement) # Add Network placement info : net_1 placement = ResourcePlacement() placement.type = ResourcePlacementType.NETWORK placement.resource_id = vm_id placement.container_id = networks[0] placements.append(placement) # Add Network placement info : net_2 placement = ResourcePlacement() placement.type = ResourcePlacementType.NETWORK placement.resource_id = vm_id placement.container_id = networks[1] placements.append(placement) # Add disks placement info index = 0 for disk_id in disk_ids: placement = ResourcePlacement() placement.type = ResourcePlacementType.DISK placement.container_id = datastore_ids[index] index += 1 placement.resource_id = disk_id placements.append(placement) placement_list = ResourcePlacementList(placements) disk_flavor_info = Flavor(name=disk_flavor, cost=[QuotaLineItem("size", "1", 1)]) disks = [] for disk_id in disk_ids: disk = Disk(id=disk_id, flavor=disk_flavor, persistent=True, new_disk=True, capacity_gb=2, flavor_info=disk_flavor_info) disks.append(disk) vm_flavor_info = Flavor(name=vm_flavor, cost=[QuotaLineItem("cpu", "1", 5)]) vm = Vm(vm_id, vm_flavor, State.STOPPED, None, None, disks, vm_flavor_info) request = ReserveRequest() request.generation = 1 request.resource = Resource(vm=vm, disks=None, placement_list=placement_list) response = handler.reserve(request) assert_that(response.result, equal_to(ReserveResultCode.OK)) # test reserve under entering-maintenance-mode and maintenance mode state = common.services.get(ServiceName.MODE) state.set_mode(MODE.ENTERING_MAINTENANCE) request = MagicMock() response = handler.reserve(request) assert_that(response.result, equal_to(ReserveResultCode.OPERATION_NOT_ALLOWED)) state.set_mode(MODE.MAINTENANCE) response = handler.reserve(request) assert_that(response.result, equal_to(ReserveResultCode.OPERATION_NOT_ALLOWED))