示例#1
0
def test_partition() -> None:
    objs = [{"id": 1, "name": "A"}, {"id": 2}, {"id": 3}]
    by_id = partition(objs, lambda x: x["id"])
    assert set([1, 2, 3]) == set(by_id.keys())
    assert by_id[1] == [objs[0]]
    assert by_id[2] == [objs[1]]
    assert by_id[3] == [objs[2]]

    by_name = partition(objs, lambda x: x.get("name"))
    assert set(["A", None]) == set(by_name.keys())
    assert by_name["A"] == [objs[0]]
    assert by_name[None] == [objs[1], objs[2]]
示例#2
0
    def _setup_shell_locals(self, config: Dict) -> Dict:
        ctx = DefaultContextHandler("[interactive-readonly]")

        driver = self._driver(config)
        dcalc, jobs_list = self._demand_calc(config, driver)
        nodes_list = dcalc.node_mgr.get_nodes()
        for node in nodes_list:
            node.shellify()
        nodes = partition_single(nodes_list, lambda n: n.name)
        nodes.update(
            partition_single([x for x in nodes_list if x.hostname],
                             lambda n: n.hostname))
        jobs: Dict[str, Any]
        try:
            jobs = partition_single(jobs_list, lambda j: j.name)
        except Exception:
            jobs = partition(jobs_list, lambda j: j.name)

        return {
            "config": config,
            "cli": self,
            "ctx": ctx,
            "demand_calc": dcalc,
            "node_mgr": dcalc.node_mgr,
            "jobs": ShellDict(jobs),
            "nodes": ShellDict(nodes),
        }
示例#3
0
    def add_nodes(self, nodes: List["Node"]) -> None:
        new_by_id = partition(nodes, lambda n: n.delayed_node_id.transient_id)
        cur_by_id = partition(self.nodes,
                              lambda n: n.delayed_node_id.transient_id)

        filtered = []
        for new_id, new_nodes in new_by_id.items():
            if new_id not in cur_by_id:
                filtered.append(new_nodes[0])

        new_by_hostname = partition(filtered, lambda n: n.hostname_or_uuid)
        cur_by_hostname = partition(self.nodes, lambda n: n.hostname_or_uuid)

        for new_hostname, new_nodes in new_by_hostname.items():
            if new_hostname not in cur_by_hostname:
                self.nodes.append(new_nodes[0])
示例#4
0
def test_choice_ordering() -> None:
    bindings = MockClusterBinding()
    bindings.add_nodearray("array-a", {"nodetype": "A"})
    bindings.add_bucket("array-a", "Standard_F4", 10, 10)
    bindings.add_nodearray("array-b", {"nodetype": "B"})
    bindings.add_bucket("array-b", "Standard_F4s", 10, 10)

    register_result_handler(DefaultContextHandler("[test_or_ordering]"))
    for ordering in [["A", "B"], ["B", "A"]]:
        node_mgr = _node_mgr(bindings)
        hi, lo = node_mgr.get_buckets()

        if hi.resources["nodetype"] != ordering[0]:
            hi, lo = lo, hi

        assert hi.available_count == 10
        assert lo.available_count == 10
        result = node_mgr.allocate(
            {
                "nodetype": ordering,
                "exclusive": True,
            },
            node_count=15,  # noqa: E231
        )
        assert hi.available_count == 0
        assert lo.available_count == 5
        assert result

        by_array = partition(result.nodes, lambda n: n.resources["nodetype"])
        assert len(by_array[ordering[0]]) == 10
        assert len(by_array[ordering[1]]) == 5
示例#5
0
def common_cluster_test(qsub_commands: List[str],
                        pg_counts: Optional[Dict[str, int]] = None,
                        previous_dcalc: Optional[DemandCalculator] = None,
                        **array_counts: int) -> DemandCalculator:
    pg_counts = pg_counts or {}
    dcalc = common_cluster(qsub_commands, previous_dcalc)
    demand = dcalc.get_demand()
    demandprinter.print_demand(["name", "job_ids", "placement_group"], demand)

    # sanity check that we don't recreate the same node
    partition_single(demand.new_nodes, lambda n: n.name)
    by_array = partition(demand.new_nodes, lambda n: n.nodearray)
    by_pg = partition(demand.new_nodes, lambda n: n.placement_group)
    if set(by_pg.keys()) != set([None]):
        if set(by_pg.keys()) != set(pg_counts.keys()):
            assert False, "\n%s\n%s" % (
                [(x, len(y)) for x, y in by_pg.items()],
                pg_counts,
            )
        assert set(by_pg.keys()) == set(pg_counts.keys())
        assert not (bool(by_pg) ^ bool(pg_counts))

    if pg_counts:
        for pg_name, count in pg_counts.items():
            assert pg_name in by_pg
            assert (
                len(by_pg[pg_name]) == count
            ), "Expected pg {} to have {} nodes. Found {}. Full {}".format(
                pg_name,
                count,
                len(by_pg[pg_name]),
                [(x, len(y)) for x, y in by_pg.items()],
            )

        for pg_name in by_pg:
            assert pg_name in pg_counts

    for nodearray_name, count in array_counts.items():
        assert nodearray_name in by_array
        assert len(by_array[nodearray_name]) == count, [
            n.name for n in by_array[nodearray_name]
        ]

    for nodearray_name, node_list in by_array.items():
        assert nodearray_name in array_counts

    return dcalc
示例#6
0
    def create_nodes(self, nodes: List[Node]) -> NodeCreationResult:
        creation_request = NodeCreationRequest()
        creation_request.sets = []
        # the node attributes aren't hashable, so a string representation
        # is good enough to ensure they are all the same across the list.
        p_nodes_dict = partition(
            nodes,
            lambda n: (
                n.nodearray,
                n.vm_size,
                n.placement_group,
                str(n.node_attribute_overrides),
            ),
        )

        request_tuples: List[Tuple[Node, NodeCreationRequestSet]] = []

        def _node_key(n: Node) -> Tuple[str, int]:
            try:
                index = int(n.name.split("-")[-1])
                return (n.nodearray, index)
            except ValueError:
                return (n.nodearray, -1)

        for key, p_nodes in p_nodes_dict.items():
            nodearray, vm_size, pg, _ = key
            request_set = NodeCreationRequestSet()

            request_set.nodearray = nodearray
            request_set.count = len(p_nodes)
            request_set.placement_group_id = pg
            request_set.definition = NodeCreationRequestSetDefinition()
            request_set.definition.machine_type = vm_size

            if p_nodes[0].node_attribute_overrides:
                request_set.node_attributes = p_nodes[0].node_attribute_overrides

            first_node = sorted(p_nodes, key=_node_key)[0]

            request_tuples.append((first_node, request_set))

        sorted_tuples = sorted(request_tuples, key=lambda t: _node_key(t[0]))
        for _, request_set in sorted_tuples:
            creation_request.sets.append(request_set)

        creation_request.validate()

        logging.fine(json.dumps(creation_request.to_dict()))

        http_response, result = self.clusters_module.create_nodes(
            self.session, self.cluster_name, creation_request
        )

        self._log_response(http_response, result)

        return result
示例#7
0
def add_default_placement_groups(config: Dict, node_mgr: NodeManager) -> None:
    nas = config.get("nodearrays", {})
    for name, child in nas.items():
        if child.get("placement_groups"):
            return

    by_pg = partition(node_mgr.get_buckets(), lambda b:
                      (b.nodearray, b.placement_group))
    by_na_vm = partition(node_mgr.get_buckets(), lambda b:
                         (b.nodearray, b.vm_size))

    for key, buckets in by_na_vm.items():
        nodearray, vm_size = key
        non_pg_buckets = [b for b in buckets if not b.placement_group]
        if not non_pg_buckets:
            # hardcoded PlacementGroupId
            logging.debug(
                "Nodearray %s defines PlacementGroupId, so no additional " +
                "placement groups will be created automatically.",
                nodearray,
            )
            continue
        bucket = non_pg_buckets[0]
        if not bucket.supports_colocation:
            continue

        buf_size = int(
            nas.get(nodearray, {}).get("generated_placement_group_buffer", 2))
        buf_remaining = buf_size
        pgi = 0
        while buf_remaining > 0:
            pg_name = ht.PlacementGroup("{}_pg{}".format(vm_size, pgi))
            pg_key = (nodearray, pg_name)
            if pg_key not in by_pg:
                logging.fine("Adding placement group %s", pg_name)
                node_mgr.add_placement_group(pg_name, bucket)
                buf_remaining -= 1
            pgi += 1
def create_vm_sizes(cache_path: Optional[str] = None) -> None:

    if cache_path and os.path.exists(cache_path):
        raw = open(cache_path).read()
    else:
        az_path = which("az")
        if az_path:
            raw = check_output([
                az_path,
                "vm",
                "list-skus",
                "--all",
            ]).decode()
        else:
            print("You need az cli installed.", file=sys.stderr)
            sys.exit(1)

        if cache_path:
            with open(cache_path, "w") as fw:
                fw.write(raw)

    print("Parsing list-skus...")
    try:
        skus = json.loads(raw)
    except Exception as e:

        toks = str(e).split()
        line_no = int(toks[toks.index("line") + 1])
        print("{}: '{}'".format(e, raw.splitlines()[line_no - 1]))
        return

    print("done")

    skus = [
        s for s in skus
        if s.get("family") and s.get("resourceType") == "virtualMachines"
    ]

    min_skus = []
    for sku in skus:
        min_sku = {}
        for key in ["name", "family", "size", "tier"]:
            min_sku[key] = sku[key]

        assert min_sku["family"], sku
        if not sku["locationInfo"]:
            print("WARNING: Missing location info. See", min_sku)
            continue
        min_sku["location"] = sku["locationInfo"][0]["location"]

        cap_list = sku["capabilities"]
        cap_dict = {}
        for entry in cap_list:
            value = entry["value"]
            if value.isdigit():
                value = int(value)
            elif value in ["True", "False"]:
                value = value == "True"
            elif "," in value:
                value = value.split(",")
            else:
                try:
                    value = float(value)
                except ValueError:
                    pass
            cap_dict[entry["name"]] = value
        min_sku["capabilities"] = cap_dict
        min_skus.append(min_sku)

    by_location = partition(min_skus, lambda s: s["location"])
    if os.path.exists("src/hpc/autoscale/node/vm_sizes.json"):
        print("reload")
        vm_sizes = json.load(open("src/hpc/autoscale/node/vm_sizes.json"))
    else:
        vm_sizes = {}
    locs = list(by_location.keys())
    a = sorted(by_location.items(),
               key=lambda x: locs.index(x[0]) if x[0] in locs else -1)
    for loc, loc_skus in a:
        vm_sizes[loc] = partition_single(loc_skus, lambda s: s["name"])

    if which("cycle_server"):
        cs_mts = json.loads(
            check_output([
                "cycle_server",
                "execute",
                "--format",
                "json",
                "select * from Azure.MachineType",
            ]).decode())
    else:
        print(
            "Warning: cycle_server found! Skipping validation",
            file=sys.stderr,
        )
        cs_mts = []

    for row in cs_mts:
        try:
            aux_info = AuxVMSizeInfo(vm_sizes[row["Location"]][row["Name"]])
            if aux_info.vcpu_count != row["CoreCount"]:

                print(
                    row,
                    aux_info.vcpu_count,
                    json.dumps(getattr(aux_info, "_AuxVMSizeInfo__record"),
                               indent=2),
                )
                if row["Location"] not in vm_sizes:
                    vm_sizes[row["Location"]] = {}

                rec = {
                    "name": row.pop("Name"),
                    "family": row.pop("Family"),
                    "size": row.pop("SKU"),
                    "tier": row.pop("Tier"),
                    "location": row.pop("Location"),
                    "linux_price": row.get("Linux", {}).get("Regular", 0.0),
                    "windows_price": row.get("Linux", {}).get("Regular", 0.0),
                    "capabilities": row,
                }
                vm_sizes[row["Location"]][row["Name"]] = rec
                sys.exit(1)
            continue
        except KeyError:
            pass

        if row["Location"] not in vm_sizes:
            vm_sizes[row["Location"]] = {}

    final_vm_sizes: Dict = {}
    for loc in sorted(vm_sizes):
        final_vm_sizes[loc] = loc_dict = {}
        for vm_size in sorted(vm_sizes[loc]):
            loc_dict[vm_size] = vm_sizes[loc][vm_size]

    with open("new_vm_sizes.json", "w") as fw:
        json.dump(final_vm_sizes, fw, indent=2)

    with open("../src/hpc/autoscale/node/vm_sizes.json") as fr:
        old_data = json.load(fr)

    missing_locations = set(old_data.keys()) - set(final_vm_sizes.keys())
    new_locations = set(final_vm_sizes.keys()) - set(old_data.keys())
    if missing_locations:
        print("WARNING: Missing locations:", ",".join(missing_locations))
    if missing_locations:
        print("INFO: New locations:", ",".join(new_locations))

    all_locations = list(old_data.keys()) + list(new_locations)

    for location in all_locations:
        old_loc_data = old_data.get(location, {})
        new_loc_data = final_vm_sizes.get(location, {})
        missing_skus = set(old_loc_data.keys()) - set(new_loc_data.keys())
        new_skus = set(new_loc_data.keys()) - set(old_loc_data.keys())
        if missing_skus and location not in missing_locations:
            print(
                "WARNING: Missing SKUs for location",
                location,
                ":",
                ",".join(missing_skus),
            )
        if new_skus and location not in new_locations:
            print("INFO: New SKUs for location", location, ":",
                  ",".join(new_skus))

    print(
        "Copy ./new_vm_sizes.json to ./src/hpc/autoscale/node/vm_sizes.json to complete the creation."
    )
示例#9
0
    def add_nodes_to_cluster(self, nodes: List[Node]) -> List[Node]:
        self.initialize()

        all_nodes = self.pbscmd.pbsnodes_parsed("-a")
        by_ccnodeid = partition(
            all_nodes, lambda x: x.get("resources_available.ccnodeid"))

        ret = []
        for node in nodes:
            if not node.hostname:
                continue

            if not node.private_ip:
                continue

            node_id = node.delayed_node_id.node_id
            if not node_id:
                logging.error("%s does not have a nodeid! Skipping", node)
                continue

            if node_id in by_ccnodeid:
                skip_node = False
                for ndict in by_ccnodeid[node_id]:
                    if ndict["name"].lower() != node.hostname.lower():
                        logging.error(
                            "Duplicate hostname found for the same node id! %s and %s. See 'valid_hostnames' in autoscale as a possible workaround.",
                            node,
                            ndict["name"],
                        )
                        skip_node = True
                        break
                if skip_node:
                    continue

            if not is_valid_hostname(self.config, node):
                continue

            if not self._validate_reverse_dns(node):
                logging.fine(
                    "%s still has a hostname that can not be looked via reverse dns. This should repair itself.",
                    node,
                )
                continue

            if not node.resources.get("ccnodeid"):
                logging.info(
                    "%s is not managed by CycleCloud, or at least 'ccnodeid' is not defined. Ignoring",
                    node,
                )
                continue
            try:
                try:
                    ndicts = self.pbscmd.qmgr_parsed("list", "node",
                                                     node.hostname)
                    if ndicts and ndicts[0].get(
                            "resources_available.ccnodeid"):
                        logging.info(
                            "ccnodeid is already defined on %s. Skipping",
                            node)
                        continue
                    # TODO RDH should we just delete it instead?
                    logging.info(
                        "%s already exists in this cluster. Setting resources.",
                        node)
                except CalledProcessError:
                    logging.info(
                        "%s does not exist in this cluster yet. Creating.",
                        node)
                    self.pbscmd.qmgr("create", "node", node.hostname)

                for res_name, res_value in node.resources.items():
                    # we set ccnodeid last, so that we can see that we have completely joined a node
                    # if and only if ccnodeid has been set
                    if res_name == "ccnodeid":
                        continue

                    if res_value is None:
                        continue

                    # TODO RDH track down
                    if res_name == "group_id" and res_value == "None":
                        continue

                    # skip things like host which are useful to set default resources on non-existent
                    # nodes for autoscale packing, but not on actual nodes
                    if res_name in self.read_only_resources:
                        continue

                    if res_name not in self.resource_definitions:
                        # TODO bump to a warning?
                        logging.fine(
                            "%s is an unknown PBS resource for node %s. Skipping this resource",
                            res_name,
                            node,
                        )
                        continue
                    res_value_str: str

                    # pbs size does not support decimals
                    if isinstance(res_value, ht.Size):
                        res_value_str = "{}{}".format(int(res_value.value),
                                                      res_value.magnitude)
                    elif isinstance(res_value, bool):
                        res_value_str = "1" if bool else "0"
                    else:
                        res_value_str = str(res_value)

                    self.pbscmd.qmgr(
                        "set",
                        "node",
                        node.hostname,
                        "resources_available.{}={}".format(
                            res_name, res_value_str),
                    )

                self.pbscmd.qmgr(
                    "set",
                    "node",
                    node.hostname,
                    "resources_available.{}={}".format(
                        "ccnodeid", node.resources["ccnodeid"]),
                )
                self.pbscmd.pbsnodes("-r", node.hostname)
                ret.append(node)
            except SubprocessError as e:
                logging.error(
                    "Could not fully add %s to cluster: %s. Will attempt next cycle",
                    node,
                    e,
                )

        return ret
示例#10
0
    def create_nodes(self, nodes: List[Node]) -> NodeCreationResult:
        if self.read_only:
            ret = NodeCreationResult()
            ret.operation_id = str(uuid.uuid4())
            ret.sets = [NodeCreationResultSet(added=len(nodes))]
            for n in nodes:
                n.exists = True
                n.target_state = ht.NodeStatus("Started")
                n.delayed_node_id.node_id = ht.NodeId("dryrun-" + str(uuid.uuid4()))
            node_records = [_node_to_ccnode(n) for n in nodes]
            self._read_only_nodes[ht.OperationId(ret.operation_id)] = node_records
            return ret

        creation_request = NodeCreationRequest()
        creation_request.sets = []
        # the node attributes aren't hashable, so a string representation
        # is good enough to ensure they are all the same across the list.
        p_nodes_dict = partition(
            nodes,
            lambda n: (
                n.nodearray,
                n.vm_size,
                n.placement_group,
                str(n.node_attribute_overrides),
                n.keep_alive,
            ),
        )

        request_tuples: List[Tuple[Node, NodeCreationRequestSet]] = []

        def _node_key(n: Node) -> Tuple[str, int]:
            try:
                index = int(n.name.split("-")[-1])
                return (n.nodearray, index)
            except ValueError:
                return (n.nodearray, -1)

        for key, p_nodes in p_nodes_dict.items():
            nodearray, vm_size, pg, _, keep_alive = key
            request_set = NodeCreationRequestSet()

            request_set.nodearray = nodearray
            request_set.count = len(p_nodes)
            request_set.placement_group_id = pg
            request_set.definition = NodeCreationRequestSetDefinition()
            request_set.definition.machine_type = vm_size

            if p_nodes[0].node_attribute_overrides:
                request_set.node_attributes = deepcopy(
                    p_nodes[0].node_attribute_overrides
                )

            if keep_alive:
                if not request_set.node_attributes:
                    request_set.node_attributes = {}
                request_set.node_attributes["KeepAlive"] = keep_alive

            first_node = sorted(p_nodes, key=_node_key)[0]

            request_tuples.append((first_node, request_set))

        sorted_tuples = sorted(request_tuples, key=lambda t: _node_key(t[0]))
        for _, request_set in sorted_tuples:
            creation_request.sets.append(request_set)

        creation_request.validate()

        logging.fine(json.dumps(creation_request.to_dict()))

        http_response, result = self.clusters_module.create_nodes(
            self.session, self.cluster_name, creation_request
        )

        self._log_response(http_response, result)

        return result
示例#11
0
def test_family_and_spots() -> None:
    bindings = MockClusterBinding("clusty")
    bindings.add_nodearray("htc", {},
                           spot=False,
                           max_count=10,
                           max_core_count=400)
    bindings.add_nodearray("hpc", {}, spot=False, max_placement_group_size=7)
    bindings.add_bucket(
        "htc",
        "Standard_F4s",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    bindings.add_bucket(
        "htc",
        "Standard_D4s_v3",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    bindings.add_bucket(
        "hpc",
        "Standard_D4s_v3",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    bindings.add_bucket(
        "hpc",
        "Standard_D4s_v3",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
        placement_groups=["123"],
    )

    bindings.add_nodearray("htcspot", {}, spot=True)
    bindings.add_bucket(
        "htcspot",
        "Standard_F4s",
        max_count=20,
        available_count=10,
        family_consumed_core_count=0,
        family_quota_core_count=0,
        family_quota_count=0,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    node_mgr = new_node_manager({"_mock_bindings": bindings})
    by_key: Dict[str,
                 NodeBucket] = partition(node_mgr.get_buckets(), lambda b:
                                         (b.nodearray, b.vm_size))
    htc = by_key[("htc", "Standard_F4s")][0]
    htc2 = by_key[("htc", "Standard_D4s_v3")][0]
    htcspot = by_key[("htcspot", "Standard_F4s")][0]
    hpcs = by_key[("hpc", "Standard_D4s_v3")]
    hpc_pg = [x for x in hpcs if x.placement_group][0]

    # ondemand instances use actual family quota
    assert htc.limits.family_max_count == 20
    assert htc2.limits.family_max_count == 20
    assert htc.limits.family_available_count == 10
    assert htc2.limits.family_available_count == 10

    # spot instances replace family with regional
    assert htcspot.limits.family_max_count == 25
    assert htcspot.limits.family_available_count == 13

    assert node_mgr.allocate(
        {
            "node.nodearray": "htc",
            "node.vm_size": "Standard_F4s"
        },
        node_count=1)
    # ondemand instances use actual family quota
    assert htc.limits.family_max_count == 20
    assert htc2.limits.family_max_count == 20
    assert htc.limits.family_available_count == 9
    assert htc2.limits.family_available_count == 10
    assert htc.limits.nodearray_available_count == 9
    assert htc2.limits.nodearray_available_count == 9
    assert htc.available_count == 9
    # nodearray limit affects htc2 since max_count=10
    assert htc2.available_count == 9

    # now the regional is affected by our allocation
    assert htcspot.limits.family_max_count == 25
    assert htcspot.limits.family_available_count == 13 - 1

    assert hpc_pg.available_count == 7
示例#12
0
def test_overscaling_error() -> None:
    bindings = MockClusterBinding()
    bindings.add_nodearray("htc", {})

    bindings.add_bucket("htc", "Standard_D16s_v3", 10, 10)
    bindings.add_bucket("htc", "Standard_E16_v3", 10, 1)
    bindings.add_bucket("htc",
                        "Standard_E2s_v3",
                        max_count=80,
                        available_count=4)

    node_mgr = _node_mgr(bindings)
    node_mgr.set_system_default_resources()
    result = node_mgr.allocate(
        {
            "ncpus": 1,
            "node.vm_size": ["Standard_E16_v3", "Standard_E2s_v3"]
        },
        slot_count=20,
        assignment_id="slots",
    )
    assert result
    assert len(result.nodes) == 3

    result = node_mgr.allocate(
        {
            "exclusive": True,
            "node.vm_size": "Standard_D16s_v3"
        },
        node_count=10,
        assignment_id="nodes",
    )
    assert result

    by_size = partition(node_mgr.new_nodes, lambda b: b.vm_size)

    assert "Standard_E2s_v3" in by_size
    assert len(by_size["Standard_E2s_v3"]) == 2
    assert len(by_size["Standard_E16_v3"]) == 1
    assert len(by_size["Standard_D16s_v3"]) == 10

    for node in node_mgr.get_nodes():
        print(node.name, node.vm_size, node.assignments)

    node_mgr.bootup()

    # recreate it - the bindings 'remembers' that we already created nodes
    node_mgr = _node_mgr(bindings)
    assert len(node_mgr.get_nodes()) == 13

    result = node_mgr.allocate({"ncpus": 1},
                               slot_count=100,
                               assignment_id="slots")
    assert result

    result = node_mgr.allocate(
        {
            "exclusive": True,
            "node.vm_size": "Standard_D16s_v3"
        },
        node_count=10,
        assignment_id="nodes",
    )
    assert result
    print()
    print()
    for node in node_mgr.get_nodes():
        print(node.name, node.vm_size, node.assignments)

    assert len(node_mgr.new_nodes) == 0
    assert len(node_mgr.get_nodes()) == 13

    by_size = partition(node_mgr.get_nodes(), lambda b: b.vm_size)

    assert "Standard_E2s_v3" in by_size
    assert len(by_size["Standard_E2s_v3"]) == 2
    assert len(by_size["Standard_E16_v3"]) == 1
    assert len(by_size["Standard_D16s_v3"]) == 10
示例#13
0
def test_default_resources() -> None:

    # set a global default
    node_mgr = _node_mgr(_bindings())

    for bucket in node_mgr.get_buckets():
        assert "vcpus" not in bucket.resources

    node_mgr.add_default_resource({}, "vcpus", 1)

    for bucket in node_mgr.get_buckets():
        assert 1 == bucket.resources["vcpus"]

    node_mgr.add_default_resource({}, "vcpus", 2)

    for bucket in node_mgr.get_buckets():
        assert 1 == bucket.resources["vcpus"]

    b = _bindings()
    b.add_nodearray("other", {"nodetype": "C"})
    b.add_bucket("other", "Standard_F16", 1, 1)

    # a few specific with finally applying a global default
    node_mgr = _node_mgr(b)

    node_mgr.add_default_resource({"nodetype": "A"}, "vcpus", 2)
    node_mgr.add_default_resource({"nodetype": "B"}, "vcpus",
                                  "node.vcpu_count")
    node_mgr.add_default_resource({}, "vcpus",
                                  lambda node: node.vcpu_count - 2)

    by_nodetype = partition(node_mgr.get_buckets(),
                            lambda b: b.resources["nodetype"])
    assert by_nodetype.get("A")[0].resources["vcpus"] == 2
    assert by_nodetype.get("B")[0].resources["vcpus"] == 8
    assert by_nodetype.get("C")[0].resources["vcpus"] == 14

    # use a Node function, which is essentially the same as the next
    node_mgr = _node_mgr(_bindings())
    node_mgr.add_default_resource({}, "vcpus", Node.vcpu_count)
    assert by_nodetype.get("A")[0].resources["vcpus"] == 2
    assert by_nodetype.get("B")[0].resources["vcpus"] == 8

    # use a node reference
    node_mgr = _node_mgr(_bindings())
    node_mgr.add_default_resource({}, "vcpus", "node.vcpu_count")

    by_nodetype = partition(node_mgr.get_buckets(),
                            lambda b: b.resources["nodetype"])
    assert by_nodetype.get("A")[0].resources["vcpus"] == 4
    assert by_nodetype.get("B")[0].resources["vcpus"] == 8

    node_mgr = _node_mgr(_bindings())
    node_mgr.add_default_resource({}, "add_vcpus", "node.vcpu_count", "add", 4)
    node_mgr.add_default_resource({}, "subtract_vcpus", "node.vcpu_count",
                                  "subtract", 4)
    node_mgr.add_default_resource({}, "multiply_vcpus", "node.vcpu_count",
                                  "multiply", 4)
    node_mgr.add_default_resource({}, "divide_vcpus", "node.vcpu_count",
                                  "divide", 4)
    node_mgr.add_default_resource({}, "divide_floor_vcpus", "node.vcpu_count",
                                  "divide_floor", 1.5)

    by_nodetype = partition(node_mgr.get_buckets(),
                            lambda b: b.resources["nodetype"])
    assert by_nodetype.get("A")[0].resources["add_vcpus"] == 8
    assert by_nodetype.get("B")[0].resources["add_vcpus"] == 12
    assert by_nodetype.get("A")[0].resources["subtract_vcpus"] == 0
    assert by_nodetype.get("B")[0].resources["subtract_vcpus"] == 4
    assert by_nodetype.get("A")[0].resources["multiply_vcpus"] == 16
    assert by_nodetype.get("B")[0].resources["multiply_vcpus"] == 32
    assert by_nodetype.get("A")[0].resources["divide_vcpus"] == 1
    assert by_nodetype.get("B")[0].resources["divide_vcpus"] == 2
    assert by_nodetype.get("A")[0].resources["divide_floor_vcpus"] == 2
    assert by_nodetype.get("B")[0].resources["divide_floor_vcpus"] == 5