def test_place_clears_scheduler_id(self, client_class): client_class.side_effect = self.create_fake_client client = MagicMock() self._clients["bar"] = client scheduler = LeafScheduler("foo", 9, False) scheduler.configure([ChildInfo(id="bar", address="bar")]) scheduler.place(PlaceRequest(scheduler_id="foo")) client.host_place.assert_called_with(PlaceRequest())
def test_place(self): handler = HostHandler(MagicMock()) score = Score(100, 100) place_list = [MagicMock()] address = ServerAddress(host="localhost", port=1234) request = PlaceRequest(resource=Resource(self._sample_vm(), [])) handler.hypervisor.placement_manager.place.return_value = (score, place_list) response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.OK)) assert_that(response.score, is_(score)) assert_that(response.placementList.placements, is_([item.to_thrift() for item in place_list])) assert_that(response.address, is_(address)) common.services.get(ServiceName.MODE).set_mode( MODE.ENTERING_MAINTENANCE) response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.INVALID_STATE)) common.services.get(ServiceName.MODE).set_mode(MODE.MAINTENANCE) response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.INVALID_STATE))
def test_place_resource_constraint(self): handler = HostHandler(MagicMock()) request = PlaceRequest(resource=Resource(self._sample_vm(), [])) handler.hypervisor.placement_manager.place.side_effect = \ NoSuchResourceException("DATASTORE", "Datastore not available.") response = handler.place(request) assert_that(response.result, is_(PlaceResultCode.NO_SUCH_RESOURCE))
def _place_request_with_params(self, ratio, min_fanout, max_fanout): place_params = PlaceParams() place_params.fanoutRatio = ratio place_params.minFanoutCount = min_fanout place_params.maxFanoutCount = max_fanout place_request = PlaceRequest(resource=Resource(Vm(), [Disk()]), leafSchedulerParams=place_params) return place_request
def test_place_system_error(self): scheduler = MagicMock() scheduler.place.side_effect = ValueError handler = SchedulerHandler() handler._schedulers["foo"] = scheduler response = handler.place(PlaceRequest(Resource(), "foo")) assert_that(response.result, is_(PlaceResultCode.SYSTEM_ERROR))
def setUp(self): self.child_1 = ChildInfo("child_1", "1.1.1.1", 8835) self.child_2 = ChildInfo("child_2", "1.1.1.2", 8835) self.child_3 = ChildInfo("child_3", "1.1.1.3", 8835) self.child_4 = ChildInfo("child_4", "1.1.1.3", 8835) self.child_5 = ChildInfo("child_5", "1.1.1.3", 8835) self.child_6 = ChildInfo("child_5", "1.1.1.3", 8835) self.child_7 = ChildInfo("child_5", "1.1.1.3", 8835) self.request = PlaceRequest()
def _loop(): while True: actual_response = handler.place(PlaceRequest( Resource(), "foo")) assert_that(actual_response.result, is_not(PlaceResultCode.SYSTEM_ERROR)) if done[0]: results[threading.current_thread().name] = True break
def test_place_response(self): response = PlaceResponse(PlaceResultCode.OK) scheduler = MagicMock() scheduler.place.return_value = response handler = SchedulerHandler() handler._schedulers["foo"] = scheduler actual_response = handler.place(PlaceRequest(Resource(), "foo")) assert_that(actual_response, is_(same_instance(response)))
def place(self, disk=None, vm_disks=None, expect=PlaceResultCode.OK, vm_constraints=None): resource = self.resource_request(disk, vm_disks, vm_constraints) response = rpc_call(self.host_client.place, PlaceRequest(resource)) assert_that(response.result, equal_to(expect)) return response
def __init__(self, requests_file): self.requests = [] content = open(Universe.get_path(requests_file), 'r').read() requests = yaml.load(content) request_id = 1 disk_id = 1 if 'auto' in requests: requests = self.generate_requests(requests['auto']) for request in requests: place_request = PlaceRequest() resource = Resource() resource.disks = [] env_info = {} if 'vm' in request: resource.vm = Vm() # Make the vm id look like a uuid by zero-padding. Otherwise # reference counting doesn't work. resource.vm.id = "{0:032d}".format(request_id) resource.vm.state = State.STARTED flavor = Universe.vm_flavors[request['vm']['flavor']] resource.vm.flavor = flavor.name resource.vm.flavor_info = flavor.to_thrift() resource.vm.disks = [] if 'constraints' in request: constraints = [] for c in request['constraints']: constraint = ResourceConstraint() constraint.type = RCT._NAMES_TO_VALUES[c['type']] constraint.values = c['values'] if 'negative' in c: constraint.negative = c['negative'] else: constraint.negative = False constraints.append(constraint) if constraints: resource.vm.resource_constraints = constraints if 'load' in request['vm']: env_info['mem_load'] = request['vm']['load']['mem'] if 'disks' in request: for d in request['disks']: disk = Disk() flavor = Universe.ephemeral_disk_flavors[d['flavor']] disk.flavor = flavor.name disk.flavor_info = flavor.to_thrift() disk.id = str(disk_id) disk.persistent = False disk.new_disk = True disk.capacity_gb = 1024 # hard coded in FakeVmManager disk_id += 1 resource.vm.disks.append(disk) place_request.resource = resource tracing_info = TracingInfo() tracing_info.request_id = request_id place_request.tracing_info = tracing_info request_id += 1 self.requests.append(PsimVmRequest(place_request, env_info))
def _place_request(self): return PlaceRequest(resource=Resource(Vm(), [Disk()]))
def test_place_missing_scheduler(self): handler = SchedulerHandler() response = handler.place(PlaceRequest(Resource())) assert_that(response.result, is_(PlaceResultCode.INVALID_SCHEDULER))