def test_place_with_resource_constraints(self, client_class): client_class.side_effect = self.create_fake_client bar_client = MagicMock() bar_response = PlaceResponse(PlaceResultCode.OK, agent_id="bar", score=Score(5, 90)) bar_client.place.return_value = bar_response self._clients["bar"] = bar_client baz_client = MagicMock() baz_response = PlaceResponse(PlaceResultCode.OK, agent_id="baz", score=Score(30, 80)) baz_client.place.return_value = baz_response self._clients["baz"] = baz_client scheduler = BranchScheduler("foo", 9) scheduler.configure([ ChildInfo(id="bar", address="bar", constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["1"])]), ChildInfo(id="baz", address="baz", constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["2"])])]) request = self._place_request() request.resource.vm.resource_constraints = [ResourceConstraint( ResourceConstraintType.DATASTORE, ["1"])] response = scheduler.place(request) assert_that(response.result, is_(PlaceResultCode.OK)) assert_that(response.agent_id, is_("bar"))
def test_constraint_place_engine_conflicting_constraints(self): """ constraint place engine should fail if multiple constraints conflict """ # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastores) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place constraints = [ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]), ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) ] disk = Disk(disk_id="ds1", capacity_gb=1) place_result = engine.place(DisksPlacement([disk], selector), constraints) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NO_SUCH_RESOURCE))
def test_resource_constraints_negative_two_select_one(self): # Test that child 4 is the only one picked as # child 5 has only one host ("host4") which is included # in the negative constraints strategy = RandomSubsetStrategy(0.5, 2) strategy._get_constraints = self._get_constraints constraints = [ResourceConstraint(ResourceConstraintType.HOST, ['host1'], True), ResourceConstraint(ResourceConstraintType.HOST, ['host4'], True)] result = strategy.filter_child( [self.child_4, self.child_5], self.request, constraints) assert_that(result, has_length(1)) assert_that(result, contains_inanyorder(self.child_4)) constraints = [ResourceConstraint(ResourceConstraintType.HOST, ['host1', 'host4'], True)] result = strategy.filter_child( [self.child_4, self.child_5], self.request, constraints) assert_that(result, has_length(1)) assert_that(result, contains_inanyorder(self.child_4))
def test_resource_constraints_negative_two_select_two(self): # Test that both children are picked as # child 4 has more than just "host1" # while child 5 has "host4" which is # not included in the negative constraints strategy = RandomSubsetStrategy(0.5, 2) strategy._get_constraints = self._get_constraints constraints = [ResourceConstraint(ResourceConstraintType.HOST, ['host1'], True), ResourceConstraint(ResourceConstraintType.HOST, ['host7'], True)] result = strategy.filter_child( [self.child_4, self.child_5], self.request, constraints) assert_that(result, has_length(2)) assert_that(result, contains_inanyorder(self.child_4, self.child_5)) # Now try with a single constraint constraints = [ResourceConstraint(ResourceConstraintType.HOST, ['host1', 'host7'], True)] result = strategy.filter_child( [self.child_4, self.child_5], self.request, constraints) assert_that(result, has_length(2)) assert_that(result, contains_inanyorder(self.child_4, self.child_5))
def test_place_with_conflicting_constraints(self): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set(["tag1", "tag2"])), "datastore_id_2": (DatastoreInfo(16 * 1024, 0), set(["tag3", "tag2"])), "datastore_id_3": (DatastoreInfo(16 * 1024, 0), set([])) } ds_with_image = ["datastore_id_1", "datastore_id_2"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) # The following 2 constraints conflict constraint1 = ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["tag1"]) constraint2 = ResourceConstraint(ResourceConstraintType.DATASTORE, ["datastore_id_2"]) disk1 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=[constraint1, constraint2]) disks = [disk1] manager.place(None, disks)
def test_constraint_place_engine(self): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = { image_datastore: DatastoreInfo(10, 0), "ds2": DatastoreInfo(20, 0), "ds3": DatastoreInfo(30, 0) } ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place disks = [ Disk(disk_id="disk", capacity_gb=5, constraints=[]), Disk(disk_id="disk-in-ds1", capacity_gb=5, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]) ]), Disk(disk_id="disk-in-ds2", capacity_gb=5, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) ]), Disk(disk_id="disk-in-ds3", capacity_gb=5, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds3"]) ]), Disk(capacity_gb=5, constraints=[]), ] disks_placement = DisksPlacement(disks, selector) place_result = engine.place(disks_placement) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.OK)) # Verify placement list and unplaced list disks_placement = place_result.disks_placement assert_that(disks_placement.placement_list, has_length(3)) assert_that(disks_placement.disks, has_length(2)) assert_that([d.resource_id for d in disks_placement.placement_list], contains_inanyorder("disk-in-ds1", "disk-in-ds2", "disk-in-ds3")) for placement in disks_placement.placement_list: assert_that(placement.type, equal_to(AgentResourcePlacement.DISK)) assert_that("disk-in-" + placement.container_id, equal_to(placement.resource_id))
def _create_constraints(self, datastores, tags): constraints = [] for datastore in datastores: constraints.append( ResourceConstraint(type=ResourceConstraintType.DATASTORE, values=[datastore])) for tag in tags: constraints.append( ResourceConstraint(type=ResourceConstraintType.DATASTORE_TAG, values=[tag])) return constraints
def test_place_with_resource_constraints_no_match(self): scheduler = BranchScheduler("foo", 9) scheduler.configure([ ChildInfo(id="bar", constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["1"])]), ChildInfo(id="baz", constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["2"])])]) request = self._place_request() request.resource.vm.resource_constraints = [ResourceConstraint( "datastore", ["never_found"])] response = scheduler.place(request) assert_that(response.result, is_(PlaceResultCode.RESOURCE_CONSTRAINT))
def test_resource_constraints_availability_zone_no_match(self): strategy = RandomSubsetStrategy(0.5, 2) strategy._get_constraints = self._get_constraints constraints = [ ResourceConstraint(ResourceConstraintType.AVAILABILITY_ZONE, ['zone1'], False), ResourceConstraint(ResourceConstraintType.DATASTORE, ['datastore2'], False)] result = strategy.filter_child( [self.child_1, self.child_6, self.child_7], self.request, constraints) assert_that(result, has_length(0))
def test_place_with_disks_constraints(self): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set([])), "datastore_id_2": (DatastoreInfo(16 * 1024, 0), set([])), "datastore_id_3": (DatastoreInfo(16 * 1024, 0), set([])) } ds_name_id_map = { "ds1": "datastore_id_1", "ds2": "datastore_id_2", "ds3": "datastore_id_3" } ds_with_image = ["datastore_id_1", "datastore_id_2"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image, ds_name_id_map=ds_name_id_map).build() # Disable freespace based admission control. manager.FREESPACE_THRESHOLD = 0 image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) constraint1 = ResourceConstraint(ResourceConstraintType.DATASTORE, ["datastore_id_1"]) constraint2 = ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) disk1 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=[constraint1]) disk2 = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=[constraint2]) disks = [disk1, disk2] score, placement_list = manager.place(None, disks) # Image disks doesn't count in utilization score assert_that(score, is_(AgentPlacementScore(100, 100))) base_index = 0 assert_that(placement_list[base_index].resource_id, is_(disk1.id)) assert_that(placement_list[base_index + 1].resource_id, is_(disk2.id)) assert_that(placement_list[base_index].container_id, is_("datastore_id_1")) assert_that(placement_list[base_index + 1].container_id, is_("datastore_id_2"))
def create_host(id, cpu, mem, disk, constraint_set, overcommit): from psim.universe import Universe networks = [constraint for constraint in constraint_set if constraint.type == RCT.NETWORK] datastores = [constraint for constraint in constraint_set if constraint.type == RCT.DATASTORE] if not networks: # create a dummy network so host handler doesn't throw warnings networks.append(ResourceConstraint(RCT.NETWORK, ["VM Network"])) local_ds_name = Universe.add_local_ds(id, disk) datastores.append(ResourceConstraint(RCT.DATASTORE, [local_ds_name])) host = Host(id, networks, datastores, cpu, mem, disk, overcommit) return host
def test_resource_constraints_two_constraints(self): strategy = RandomSubsetStrategy(0.5, 2) strategy._get_constraints = self._get_constraints constraints = [ ResourceConstraint(ResourceConstraintType.DATASTORE, ['datastore1']), ResourceConstraint(ResourceConstraintType.DATASTORE, ['datastore2'])] result = strategy.filter_child( [self.child_1, self.child_2, self.child_3], self.request, constraints) assert_that(result, has_length(1)) assert_that(result, contains_inanyorder(self.child_2))
def _coalesce_resources(self, children): """Coalesce resources by resource type. Build the coalesced ResourceConstraint with same type. i.e With multiple input ResourceConstraints, we'll aggregate all the values with same resource type. :param children: list of ChildInfo """ for child_info in children: # No constraints, skip child if not child_info.constraints: continue constraints = {} for constraint in child_info.constraints: if constraint.type not in constraints: constraints[constraint.type] = set() constraints[constraint.type].update(set(constraint.values)) child_info.constraints = [] for constraint_type, values in constraints.iteritems(): child_info.constraints.append( ResourceConstraint(constraint_type, list(values)))
def __init__(self, id, networks, datastores, cpu, mem, disk, overcommit): self.id = id self.cpu = cpu self.mem = mem self.disk = disk self.parent = "" self.constraints = set() host_constraint = ResourceConstraint(ResourceConstraintType.HOST, ["host-" + str(id)]) self.constraints.add(host_constraint) [self.constraints.add(net) for net in networks] [self.constraints.add(ds) for ds in datastores] self.address = "" self.port = "" conf_dir = mkdtemp(delete=True) state = State(os.path.join(conf_dir, "state.json")) common.services.register(ServiceName.MODE, Mode(state)) self.hv = self._get_hypervisor_instance( id, cpu, mem, disk, [ds.values[0] for ds in datastores], [network.values[0] for network in networks], overcommit) # need agent_config for create/delete vm. agent_config = AgentConfig([ "--config-path", conf_dir, "--hostname", "localhost", "--port", "1234", "--host-id", id ]) common.services.register(ServiceName.AGENT_CONFIG, agent_config) super(Host, self).__init__(self.hv)
def createVmResource(image, network): disk = Disk() disk.flavor = "some-disk-flavor" disk.id = str(uuid.uuid4()) disk.persistent = False disk.new_disk = True disk.capacity_gb = 0 disk.image = DiskImage() disk.image.id = image disk.image.clone_type = CloneType.COPY_ON_WRITE disk.flavor_info = Flavor() disk.flavor_info.name = "some-disk-flavor" disk.flavor_info.cost = [] vm = Vm() vm.id = str(uuid.uuid4()) vm.flavor = "some-vm-flavor" vm.state = VmPowerState.STOPPED vm.flavor_info = Flavor() vm.flavor_info.name = "some-vm-flavor" vm.flavor_info.cost = [ QuotaLineItem("vm.cpu", "1", QuotaUnit.COUNT), QuotaLineItem("vm.memory", "0.5", QuotaUnit.GB) ] vm.disks = [disk] if network: vm.resource_constraints = [ ResourceConstraint(ResourceConstraintType.NETWORK, [network]) ] return vm
def test_constraint_place_engine_constraint_violated(self): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = {image_datastore: DatastoreInfo(5, 0)} ds_mgr = self.create_datastore_manager(ds_map, image_datastores) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place disks = [ Disk(disk_id="ds1", capacity_gb=1, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds2"]) ]), ] place_result = engine.place(DisksPlacement(disks, selector)) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NO_SUCH_RESOURCE))
def test_place_with_disks_tagging_constraints(self, tags, expected): ds_map = { "datastore_id_1": (DatastoreInfo(8 * 1024, 0), set(["tag1", "tag2"])), "datastore_id_2": (DatastoreInfo(16 * 1024, 0), set(["tag3", "tag2"])), "datastore_id_3": (DatastoreInfo(16 * 1024, 0), set([])) } ds_with_image = ["datastore_id_1", "datastore_id_2"] total_storage = sum(t[0].total for t in ds_map.values()) manager = PMBuilder(ds_map=ds_map, ds_with_image=ds_with_image).build() # Disable freespace based admission control. manager.FREESPACE_THRESHOLD = 0 image = DiskImage("disk_image", DiskImage.COPY_ON_WRITE) constraints = [ ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, [t]) for t in tags ] disk = Disk(new_id(), DISK_FLAVOR, False, True, total_storage / 100, image, constraints=constraints) score, placement_list = manager.place(None, [disk]) base_index = 0 assert_that(placement_list[base_index].resource_id, is_(disk.id)) assert_that(placement_list[base_index].container_id, is_(expected))
def test_resource_constraints_no_match(self): strategy = RandomSubsetStrategy(0.5, 2) strategy._get_constraints = self._get_constraints result = strategy.filter_child( [self.child_1, self.child_2, self.child_3], self.request, [ResourceConstraint(ResourceConstraintType.DATASTORE, ['never_found'])]) assert_that(result, has_length(0))
def test_resource_constraints_negative_all_match(self): strategy = RandomSubsetStrategy(0.5, 2) strategy._get_constraints = self._get_constraints constraints = [ResourceConstraint(ResourceConstraintType.HOST, ['host6', 'host7'], True)] result = strategy.filter_child( [self.child_4, self.child_5], self.request, constraints) assert_that(result, has_length(2)) assert_that(result, contains_inanyorder(self.child_4, self.child_5))
def test_resource_constraints_negative_no_match(self): strategy = RandomSubsetStrategy(0.5, 2) strategy._get_constraints = self._get_constraints constraints = [ResourceConstraint( ResourceConstraintType.HOST, ['host1', 'host2', 'host3', 'host4'], True)] result = strategy.filter_child( [self.child_4, self.child_5], self.request, constraints) assert_that(result, has_length(0))
def resource_request(self, disk=None, vm_disks=None, vm_constraints=None): assert(disk is None or vm_disks is None) if disk is not None: return Resource(None, [disk]) if vm_constraints is None: vm_constraints = [] vm_constraints.append(ResourceConstraint(ResourceConstraintType.NETWORK, ["VM Network"])) resource = Resource(self._vm, None) resource.vm.disks = vm_disks resource.vm.resource_constraints = vm_constraints return resource
def _make_constraints_hashable(self, thrift_constraints): """ Before adding constraints to set(), we need to convert ResourceConstraint.values from unhashable list type to hashable frozenset type. set() uses objects' hash to compare them. :param thrift_constraints: :return: hashable_constraints """ hashable_constraints = [] for thrift_constraint in thrift_constraints: hashable_constraints.append( ResourceConstraint(thrift_constraint.type, frozenset(thrift_constraint.values), thrift_constraint.negative)) return hashable_constraints
def test_place_vm_with_virtual_network_constraint(self): manager = PMBuilder().build() # place with resource constraints that come back in placement_list. resource_constraints = [ ResourceConstraint(ResourceConstraintType.VIRTUAL_NETWORK, ["virtual_net"]) ] vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, None, None, resource_constraints) score, placement_list = manager.place(vm, None) assert_that(len(placement_list) is 2) assert_that(placement_list[1].resource_id, equal_to(vm.id)) assert_that(placement_list[1].container_id == 'virtual_net', True)
def test_constraint_place_engine_cannot_fit(self): # Create constraint place engine image_datastore = "ds1" image_datastores = [{"name": image_datastore, "used_for_vms": True}] option = PlacementOption(1, 1, image_datastores) ds_map = {image_datastore: DatastoreInfo(5, 0)} ds_mgr = self.create_datastore_manager(ds_map, image_datastore) engine = ConstraintDiskPlaceEngine(ds_mgr, option) ds = engine.placeable_datastores() selector = DatastoreSelector.init_datastore_selector(ds_mgr, ds) # Try place constraint = ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds1"]) disk = Disk(disk_id="ds1", capacity_gb=6) place_result = engine.place(DisksPlacement([disk], selector), [constraint]) # Verify place result assert_that(place_result.result, equal_to(PlaceResultCode.NOT_ENOUGH_DATASTORE_CAPACITY))
def test_place_vm_with_network_constraint(self): host_vm_network_list = ["net_1", "net_2", "net_3", "net_4"] manager = PMBuilder(vm_networks=host_vm_network_list).build() # place with resource constraints that host can match, should succeed. resource_constraints = [ ResourceConstraint(ResourceConstraintType.NETWORK, ["net_2", "net_1"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_4"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_3", "net_5"]) ] vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, None, None, resource_constraints) score, placement_list = manager.place(vm, None) assert_that(len(placement_list) is 4) assert_that(placement_list[1].resource_id, equal_to(vm.id)) # randomly pick matched resources from host. assert_that( placement_list[1].container_id == 'net_1' or placement_list[1].container_id == 'net_2', True) assert_that(placement_list[2].container_id == 'net_4', True) assert_that(placement_list[3].container_id == 'net_3', True) # place with resource constraints AND logic that host can not match, # should fail. resource_constraints = [ ResourceConstraint(ResourceConstraintType.NETWORK, ["net_2", "net_1"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_5"]) ] vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, None, None, resource_constraints) self.assertRaises(NoSuchResourceException, manager.place, vm, None) # place with resource constraints OR logic that host can not match, # should fail. resource_constraints = [ ResourceConstraint(ResourceConstraintType.NETWORK, ["net_6", "net_7"]) ] vm = Vm(new_id(), VM_FLAVOR, VmPowerState.STOPPED, None, None, None, None, resource_constraints) self.assertRaises(NoSuchResourceException, manager.place, vm, None)
def test_place_image_disk_not_included(self): ds_map = {"datastore_id_1": (DatastoreInfo(1 * 1024, 0), set([]))} manager = PMBuilder(ds_map=ds_map).build() image = DiskImage("disk_image1", DiskImage.COPY_ON_WRITE) # Place a image disk that is linked cloned from image. The size # should not be included in calculation. disk = Disk(new_id(), DISK_FLAVOR, False, True, 2048, image) vm = Vm(new_id(), VM_FLAVOR, State.STOPPED, None, None, [disk]) score, placement_list = manager.place(vm, None) assert_that(score.utilization, equal_to(97)) assert_that(score.transfer, equal_to(100)) assert_that(placement_list, has_length(2)) assert_that(placement_list[0].container_id, equal_to("datastore_id_1")) assert_that(placement_list[0].resource_id, equal_to(vm.id)) assert_that(placement_list[1].container_id, equal_to("datastore_id_1")) assert_that(placement_list[1].resource_id, equal_to(disk.id)) # Try the same place with constraints constraint = ResourceConstraint(ResourceConstraintType.DATASTORE, ["datastore_id_1"]) disk = Disk(new_id(), DISK_FLAVOR, False, True, 2048, image, constraints=[constraint]) vm = Vm(new_id(), VM_FLAVOR, State.STOPPED, None, None, [disk]) score, placement_list = manager.place(vm, None) assert_that(score.utilization, equal_to(97)) assert_that(score.transfer, equal_to(100)) assert_that(placement_list, has_length(2)) assert_that(placement_list[0].container_id, equal_to("datastore_id_1")) assert_that(placement_list[0].resource_id, equal_to(vm.id)) assert_that(placement_list[1].container_id, equal_to("datastore_id_1")) assert_that(placement_list[1].resource_id, equal_to(disk.id))
def test_reserve_disk(self, constraint_value, placement_id, expected): disk_id = "disk_id_1" disk_flavor = "disk_flavor_1" def reserve_disk_validate(vm, disks): assert_that(vm is None) assert isinstance(disks, list) assert_that(len(disks) is 1) disk = disks[0] assert isinstance(disk, HostDisk) assert_that(disk.id, equal_to(disk_id)) assert_that(disk.flavor.name, equal_to(disk_flavor)) reserve_constraints = disk.constraints if reserve_constraints: assert isinstance(reserve_constraints, list) assert_that(len(reserve_constraints) is 1) reserve_constraint = reserve_constraints[0] assert_that(reserve_constraint.type is ResourceConstraintType.DATASTORE) assert_that(reserve_constraint.values, equal_to([expected, ])) reserve_placement = disk.placement if reserve_placement: assert_that(reserve_placement.type is ResourcePlacementType.DISK) assert_that(reserve_placement.resource_id, equal_to(disk_id)) assert_that(reserve_placement.container_id, equal_to(expected)) return "reservation_id" handler = HostHandler(MagicMock()) mocked_reserve = MagicMock() mocked_reserve.side_effect = reserve_disk_validate handler.hypervisor.placement_manager = MagicMock() handler.hypervisor.placement_manager.reserve = mocked_reserve constraints = None placement_list = None if constraint_value: constraint = ResourceConstraint() constraint.values = [constraint_value] constraint.type = ResourceConstraintType.DATASTORE constraints = [constraint] if placement_id: placement = ResourcePlacement() placement.type = ResourcePlacementType.DISK placement.container_id = placement_id placement.resource_id = disk_id placement_list = ResourcePlacementList([placement]) flavor_info = Flavor(name=disk_flavor, cost=[QuotaLineItem("a", "b", 1)]) disk = Disk(id=disk_id, flavor=disk_flavor, persistent=True, new_disk=True, capacity_gb=2, flavor_info=flavor_info, resource_constraints=constraints) request = ReserveRequest() request.generation = 1 request.resource = Resource(vm=None, disks=[disk], placement_list=placement_list) response = handler.reserve(request) assert_that(response.result, equal_to(ReserveResultCode.OK))
def load_schedulers(self, tree_config, errback=lambda *args: 0): schedulers = tree_config["schedulers"] overcommit = tree_config["overcommit"] if "root_config" in tree_config: root_config = tree_config["root_config"] else: root_config = None # Load all schedulers sys.stdout.write("Loading schedulers...") sys.stdout.flush() for (i, scheduler) in enumerate(schedulers): id = scheduler['id'] role = scheduler['role'] children = [] if 'children' in scheduler: children = scheduler['children'] if id in self.schedulers: errback("duplicated id '%d'" % id) continue if scheduler['role'] not in self.ROLES: errback("invalid role '%s', should be among %s" % (role, self.ROLES)) continue if role in FakeBranchScheduler.ROLES: if role == 'root': self.schedulers[id] = FakeBranchScheduler(id, role, children, root_config) if self.root_scheduler is None: self.root_scheduler = self.schedulers[id] else: errback("duplicated root scheduler '%d'" % id) else: self.schedulers[id] = FakeBranchScheduler(id, role, children) elif role == FakeLeafScheduler.ROLE: self.schedulers[id] = FakeLeafScheduler(id, children) elif role == Host.ROLE: from psim.universe import Universe cpu = self._get_key(scheduler, 'cpu') disk = self._get_key(scheduler, 'disk') mem = self._get_key(scheduler, 'mem') constraint_list = self._get_key(scheduler, 'constraints') constraint_set = set() for c in constraint_list: type = RCT._NAMES_TO_VALUES[c['type']] c_id = c['values'][0] if type == RCT.DATASTORE: ds_uuid = Universe.get_ds_uuid(c_id) if ds_uuid not in Universe.datastores.keys(): raise ValueError("Invalid Datastore: " + c_id) constraint_set.add( ResourceConstraint(type=type, values=[c_id])) self.schedulers[id] = SchedulerTree.create_host( id, cpu, mem, disk, constraint_set, overcommit) # configure schedulers self.root_scheduler.update() print "Done." print "Loaded %d schedulers." % len(schedulers)
def test_reserve_disk(self, constraint_value, placement_id, expected): disk_id = "disk_id_1" disk_flavor = "disk_flavor_1" def reserve_disk_validate(vm, disks): assert_that(vm is None) assert isinstance(disks, list) assert_that(len(disks) is 1) disk = disks[0] assert isinstance(disk, HostDisk) assert_that(disk.id, equal_to(disk_id)) assert_that(disk.flavor.name, equal_to(disk_flavor)) reserve_constraints = disk.constraints if reserve_constraints: assert isinstance(reserve_constraints, list) assert_that(len(reserve_constraints) is 1) reserve_constraint = reserve_constraints[0] assert_that( reserve_constraint.type is ResourceConstraintType.DATASTORE ) assert_that(reserve_constraint.values, equal_to([ expected, ])) reserve_placement = disk.placement if reserve_placement: assert_that( reserve_placement.type is ResourcePlacementType.DISK) assert_that(reserve_placement.resource_id, equal_to(disk_id)) assert_that(reserve_placement.container_id, equal_to(expected)) return "reservation_id" handler = HostHandler(MagicMock()) mocked_reserve = MagicMock() mocked_reserve.side_effect = reserve_disk_validate handler.hypervisor.placement_manager = MagicMock() handler.hypervisor.placement_manager.reserve = mocked_reserve constraints = None placement_list = None if constraint_value: constraint = ResourceConstraint() constraint.values = [constraint_value] constraint.type = ResourceConstraintType.DATASTORE constraints = [constraint] if placement_id: placement = ResourcePlacement() placement.type = ResourcePlacementType.DISK placement.container_id = placement_id placement.resource_id = disk_id placement_list = ResourcePlacementList([placement]) flavor_info = Flavor(name=disk_flavor, cost=[QuotaLineItem("a", "b", 1)]) disk = Disk(id=disk_id, flavor=disk_flavor, persistent=True, new_disk=True, capacity_gb=2, flavor_info=flavor_info, resource_constraints=constraints) request = ReserveRequest() request.generation = 1 request.resource = Resource(vm=None, disks=[disk], placement_list=placement_list) response = handler.reserve(request) assert_that(response.result, equal_to(ReserveResultCode.OK))
def __init__(self, requests_file): self.requests = [] content = open(Universe.get_path(requests_file), 'r').read() requests = yaml.load(content) request_id = 1 disk_id = 1 if 'auto' in requests: requests = self.generate_requests(requests['auto']) for request in requests: place_request = PlaceRequest() resource = Resource() resource.disks = [] env_info = {} if 'vm' in request: resource.vm = Vm() # Make the vm id look like a uuid by zero-padding. Otherwise # reference counting doesn't work. resource.vm.id = "{0:032d}".format(request_id) resource.vm.state = State.STARTED flavor = Universe.vm_flavors[request['vm']['flavor']] resource.vm.flavor = flavor.name resource.vm.flavor_info = flavor.to_thrift() resource.vm.disks = [] if 'constraints' in request: constraints = [] for c in request['constraints']: constraint = ResourceConstraint() constraint.type = RCT._NAMES_TO_VALUES[c['type']] constraint.values = c['values'] if 'negative' in c: constraint.negative = c['negative'] else: constraint.negative = False constraints.append(constraint) if constraints: resource.vm.resource_constraints = constraints if 'load' in request['vm']: env_info['mem_load'] = request['vm']['load']['mem'] if 'disks' in request: for d in request['disks']: disk = Disk() flavor = Universe.ephemeral_disk_flavors[d['flavor']] disk.flavor = flavor.name disk.flavor_info = flavor.to_thrift() disk.id = str(disk_id) disk.persistent = False disk.new_disk = True disk.capacity_gb = 1024 # hard coded in FakeVmManager disk_id += 1 resource.vm.disks.append(disk) place_request.resource = resource tracing_info = TracingInfo() tracing_info.request_id = request_id place_request.tracing_info = tracing_info request_id += 1 self.requests.append(PsimVmRequest(place_request, env_info))
def _build_scheduler_configure(self): child_1 = ChildInfo( id="baz_1", address="baz_1", port=1024, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_1"]), ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_2"]), ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_3"]), ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["ds_tag_1"]), ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["ds_tag_2"]), ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["ds_tag_3"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_1"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_2"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_3"]) ]) child_2 = ChildInfo( id="baz_2", address="baz_2", port=1024, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_4"]), ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_5"]), ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_6"]), # duplicate on purpose ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_6"]), ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["ds_tag_4"]), ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["ds_tag_5"]), ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["ds_tag_6"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_4"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_5"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_6"]) ]) child_3 = ChildInfo( id="baz_3", address="baz_3", port=1024, constraints=[ ResourceConstraint(ResourceConstraintType.DATASTORE, ["ds_7", "ds_8", "ds_9"]), ResourceConstraint(ResourceConstraintType.DATASTORE_TAG, ["ds_tag_7", "ds_tag_8", "ds_tag_9"]), ResourceConstraint(ResourceConstraintType.NETWORK, ["net_7", "net_8", "net_9"])]) return [child_1, child_2, child_3]
def __init__(self, requests_file): self.requests = [] content = open(Universe.get_path(requests_file), 'r').read() requests = yaml.load(content) request_id = 1 disk_id = 1 if 'auto' in requests: requests = self.generate_requests(requests['auto']) for request in requests: place_request = PlaceRequest() resource = Resource() resource.disks = [] env_info = {} if 'vm' in request: resource.vm = Vm() # Make the vm id look like a uuid by zero-padding. Otherwise # reference counting doesn't work. resource.vm.id = "{0:032d}".format(request_id) resource.vm.state = State.STARTED flavor = Universe.vm_flavors[request['vm']['flavor']] resource.vm.flavor = flavor.name resource.vm.flavor_info = flavor.to_thrift() resource.vm.disks = [] if 'constraints' in request: constraints = [] for c in request['constraints']: constraint = ResourceConstraint() constraint.type = RCT._NAMES_TO_VALUES[c['type']] constraint.values = c['values'] if 'negative' in c: constraint.negative = c['negative'] else: constraint.negative = False constraints.append(constraint) if constraints: resource.vm.resource_constraints = constraints if 'load' in request['vm']: env_info['mem_load'] = request['vm']['load']['mem'] if 'disks' in request: for d in request['disks']: disk = Disk() flavor = Universe.ephemeral_disk_flavors[d['flavor']] disk.flavor = flavor.name disk.flavor_info = flavor.to_thrift() disk.id = str(disk_id) disk.persistent = False disk.new_disk = True disk.capacity_gb = 1024 # hard coded in FakeVmManager disk_id += 1 resource.vm.disks.append(disk) place_request.resource = resource tracing_info = TracingInfo() tracing_info.request_id = request_id place_request.tracing_info = tracing_info request_id += 1 self.requests.append(PsimVmRequest(place_request, env_info))