def test_unmanaged_nodes(node_mgr: NodeManager) -> None: assert len(node_mgr.get_buckets()) == 2 tux = SchedulerNode("tux", bucket_id=ht.BucketId("tuxid")) node_mgr.add_unmanaged_nodes([tux]) assert len(node_mgr.get_buckets()) == 3 assert node_mgr.get_buckets_by_id()[tux.bucket_id].nodes == [tux] tux2 = SchedulerNode("tux2", bucket_id=tux.bucket_id) node_mgr.add_unmanaged_nodes([tux2]) assert len(node_mgr.get_buckets()) == 3 assert node_mgr.get_buckets_by_id()[tux.bucket_id].nodes == [tux, tux2] node_mgr.add_unmanaged_nodes([tux, tux2]) assert len(node_mgr.get_buckets()) == 3 assert node_mgr.get_buckets_by_id()[tux.bucket_id].nodes == [tux, tux2]
def validate_hg_intersections(ge_env: GridEngineEnvironment, node_mgr: NodeManager, warn_function: WarnFunction) -> bool: bucket_to_hgs: Dict[str, Set[str]] = {} for bucket in node_mgr.get_buckets(): if bucket.bucket_id not in bucket_to_hgs: bucket_to_hgs[str(bucket)] = set() by_str = partition_single(node_mgr.get_buckets(), str) for queue in ge_env.queues.values(): if not queue.autoscale_enabled: continue for hostgroup in queue.bound_hostgroups.values(): for bucket in node_mgr.get_buckets(): is_satisfied = True for constraint in hostgroup.constraints: result = constraint.satisfied_by_bucket(bucket) if not result: is_satisfied = False break if is_satisfied: bucket_to_hgs[str(bucket)].add(hostgroup.name) failure = False for bkey, matches in bucket_to_hgs.items(): bucket = by_str[bkey] if not matches: warn_function( "%s is not matched by any hostgroup. This is not an error.", bucket, ) elif len(matches) > 1: # seq_no will be used to determine ties if not ge_env.scheduler.sort_by_seqno: warn_function( "%s is matched by more than one hostgroup %s. This is not an error.", bucket, ",".join(matches), ) return failure
def add_default_placement_groups(config: Dict, node_mgr: NodeManager) -> None: nas = config.get("nodearrays", {}) for name, child in nas.items(): if child.get("placement_groups"): return by_pg = partition(node_mgr.get_buckets(), lambda b: (b.nodearray, b.placement_group)) by_na_vm = partition(node_mgr.get_buckets(), lambda b: (b.nodearray, b.vm_size)) for key, buckets in by_na_vm.items(): nodearray, vm_size = key non_pg_buckets = [b for b in buckets if not b.placement_group] if not non_pg_buckets: # hardcoded PlacementGroupId logging.debug( "Nodearray %s defines PlacementGroupId, so no additional " + "placement groups will be created automatically.", nodearray, ) continue bucket = non_pg_buckets[0] if not bucket.supports_colocation: continue buf_size = int( nas.get(nodearray, {}).get("generated_placement_group_buffer", 2)) buf_remaining = buf_size pgi = 0 while buf_remaining > 0: pg_name = ht.PlacementGroup("{}_pg{}".format(vm_size, pgi)) pg_key = (nodearray, pg_name) if pg_key not in by_pg: logging.fine("Adding placement group %s", pg_name) node_mgr.add_placement_group(pg_name, bucket) buf_remaining -= 1 pgi += 1
def test_node_software_configuration_alias(node_mgr: NodeManager) -> None: node_mgr.add_default_resource({}, "int_alias", "node.software_configuration.custom_int") b = node_mgr.get_buckets()[0] assert b.resources["int_alias"] == b.software_configuration["custom_int"]
def test_node_resources_alias(node_mgr: NodeManager) -> None: node_mgr.add_default_resource({}, "memgb_alias", "node.resources.memgb") b = node_mgr.get_buckets()[0] assert b.resources["memgb_alias"] == b.resources["memgb"]