Example #1
0
def nopgbindings():
    bindings = MockClusterBinding()
    bindings.add_nodearray("twocpus", {"ncpus": 2})
    bindings.add_nodearray("fourcpus", {"ncpus": 4})
    bindings.add_bucket("twocpus", "Standard_F2", 100, 100)
    bindings.add_bucket("fourcpus", "Standard_F2", 100, 100)
    return bindings
Example #2
0
def test_choice_ordering() -> None:
    bindings = MockClusterBinding()
    bindings.add_nodearray("array-a", {"nodetype": "A"})
    bindings.add_bucket("array-a", "Standard_F4", 10, 10)
    bindings.add_nodearray("array-b", {"nodetype": "B"})
    bindings.add_bucket("array-b", "Standard_F4s", 10, 10)

    register_result_handler(DefaultContextHandler("[test_or_ordering]"))
    for ordering in [["A", "B"], ["B", "A"]]:
        node_mgr = _node_mgr(bindings)
        hi, lo = node_mgr.get_buckets()

        if hi.resources["nodetype"] != ordering[0]:
            hi, lo = lo, hi

        assert hi.available_count == 10
        assert lo.available_count == 10
        result = node_mgr.allocate(
            {
                "nodetype": ordering,
                "exclusive": True,
            },
            node_count=15,  # noqa: E231
        )
        assert hi.available_count == 0
        assert lo.available_count == 5
        assert result

        by_array = partition(result.nodes, lambda n: n.resources["nodetype"])
        assert len(by_array[ordering[0]]) == 10
        assert len(by_array[ordering[1]]) == 5
Example #3
0
    def _bindings() -> MockClusterBinding:
        if previous_dcalc:
            return previous_dcalc.node_mgr.cluster_bindings
        mock_bindings = MockClusterBinding()
        mock_bindings.add_nodearray("hpc", {}, max_placement_group_size=5)
        mock_bindings.add_bucket("hpc", "Standard_F4", 100, 100)

        mock_bindings.add_nodearray("htc", {}, max_count=10)
        mock_bindings.add_bucket("htc", "Standard_F4", 10, 10)
        return mock_bindings
Example #4
0
def mixedbindings():
    bindings = MockClusterBinding()
    bindings.add_nodearray("hpc", {"ncpus": 2})
    bindings.add_nodearray("htc", {"ncpus": 4})
    bindings.add_bucket("hpc", "Standard_F2", 100, 100, placement_groups=["pg0"])
    bindings.add_bucket("htc", "Standard_F2", 100, 100)
    return bindings
Example #5
0
def test_delete_internally(bindings: MockClusterBinding) -> None:
    bindings.add_node("htc-1", "htc")
    node_mgr = _node_mgr(bindings)
    assert len(node_mgr.get_nodes()) == 1
    node = node_mgr.get_nodes()[0]
    assert node.name == "htc-1"
    result = node_mgr.delete([node])
    assert result

    assert len(result.nodes) == 1
    assert result.nodes[0].name == "htc-1"
    assert result.nodes[0].state == "Terminating"

    assert len(node_mgr.get_nodes()) == 0
Example #6
0
def test_custom_node_attrs_and_node_config() -> None:
    b = MockClusterBinding()
    b.add_nodearray("htc", {},
                    software_configuration={"myscheduler": {
                        "A": 1
                    }})
    b.add_bucket("htc", "Standard_F2", 10, 10)
    b.add_node("htc-1", "htc")
    node_mgr = new_node_manager({"_mock_bindings": b})
    (existing_node, ) = node_mgr.get_nodes()

    try:
        existing_node.node_attribute_overrides["willfail"] = 123
        assert False
    except TypeError:
        pass

    result = node_mgr.allocate({"exclusive": True}, node_count=2)
    assert result
    (node, ) = [n for n in result.nodes if not n.exists]

    assert node.software_configuration.get("test_thing") is None
    node.node_attribute_overrides["Configuration"] = {"test_thing": "is set"}
    assert node.software_configuration.get("test_thing") == "is set"
    try:
        node.software_configuration["willfail"] = 123
        assert not node.software_configuration.get("willfail")
    except TypeError:
        pass

    # we won't handle dict merges here.
    assert node.software_configuration.get("myscheduler") == {"A": 1}

    node.node_attribute_overrides["Configuration"] = {"myscheduler": {"B": 2}}
    assert node.software_configuration.get("myscheduler") == {"B": 2}

    # if you want to add to the existing software_configuration, use
    # the node.software_configuration
    node.node_attribute_overrides["Configuration"][
        "myscsheduler"] = node.software_configuration.get("myscheduler", {})
    node.node_attribute_overrides["Configuration"]["myscheduler"]["B"] = 2

    node.node_attribute_overrides["Configuration"] = {
        "myscheduler": {
            "A": 1,
            "B": 2
        }
    }

    node.software_configuration["willsucceed"] = 123
    node.exists = True
    try:
        node.software_configuration["willfail"] = 123
        assert False
    except TypeError:
        pass
Example #7
0
def test_no_buckets():
    node_mgr = NodeManager(MockClusterBinding(), [])
    dc = DemandCalculator(
        node_mgr, NullNodeHistory(), singleton_lock=util.NullSingletonLock()
    )
    result = dc._add_job(Job("1", {"ncpus": 2}))
    assert not result
    assert "NoBucketsDefined" == result.status
Example #8
0
def test_vm_family_limit(bindings: MockClusterBinding) -> None:
    bindings = MockClusterBinding("clusty")
    bindings.add_nodearray("htc", {"nodetype": "A"})
    bindings.add_bucket(
        "htc",
        "Standard_F4",
        available_count=20,
        max_count=20,
        family_quota_count=30,
        family_quota_core_count=120,
        family_consumed_core_count=0,
    )
    bindings.add_bucket(
        "htc",
        "Standard_F2",
        available_count=20,
        max_count=20,
        family_quota_count=30,
        family_quota_core_count=120,
        family_consumed_core_count=0,
    )
    nm = _node_mgr(bindings)
    result = nm.allocate({}, node_count=100, all_or_nothing=False)
    assert len(result.nodes) == 40
    pass
def _bindings():
    mock_bindings = MockClusterBinding()
    mock_bindings.add_nodearray(
        "htc2",
        {
            "slots": 2,
            "customer_htc_flag": True
        },
    )
    mock_bindings.add_nodearray(
        "htc4",
        {
            "slots": 4,
            "customer_htc_flag": True
        },
    )
    mock_bindings.add_bucket("htc2", "Standard_F2", 10, 8)
    mock_bindings.add_bucket("htc4", "Standard_F4", 5, 4)
    return mock_bindings
Example #10
0
def _bindings() -> MockClusterBinding:
    bindings = MockClusterBinding("clusty")
    bindings.add_nodearray(
        "htc",
        {
            "nodetype": "A",
            "pcpus": 2
        },
        software_configuration={
            "custom_int": 100,
            "custom_str": "abc"
        },
    )
    bindings.add_bucket(
        "htc",
        "Standard_F4s",
        max_count=20,
        available_count=10,
        family_consumed_core_count=0,
        family_quota_core_count=80,
        family_quota_count=80 // 4,
        regional_consumed_core_count=0,
        regional_quota_core_count=80,
        regional_quota_count=80 // 4,
    )

    bindings.add_nodearray("hpc", {"nodetype": "B", "pcpus": 4})
    bindings.add_bucket(
        "hpc",
        "Standard_F8s",
        max_count=20,
        available_count=10,
        family_consumed_core_count=0,
        family_quota_core_count=80,
        family_quota_count=80 // 8,
        regional_consumed_core_count=0,
        regional_quota_core_count=80,
        regional_quota_count=80 // 8,
    )

    return bindings
Example #11
0
def test_one_bucket_match_mpi():
    bindings = MockClusterBinding()
    bindings.add_nodearray("twocpus", {"ncpus": 2})
    bindings.add_bucket("twocpus", "Standard_F2", 100, 100, placement_groups=["pg0"])
    dc = _new_dc(bindings)
    result = dc._add_job(_mpi_job(nodes=2, resources={"ncpus": 2}))
    _assert_success(result, ["twocpus"] * 2)
Example #12
0
def test_one_bucket_no_match_mpi() -> None:
    bindings = MockClusterBinding()
    bindings.add_nodearray("twocpus", {"ncpus": 2})
    bindings.add_bucket("twocpus", "Standard_F2", 100, 100)
    dc = _new_dc(bindings)
    result = dc._add_job(_mpi_job(resources={"ncpus": 3}))
    assert not result
Example #13
0
def test_one_bucket_no_match_htc():
    bindings = MockClusterBinding()
    bindings.add_nodearray("twocpus", {"ncpus": 2})
    bindings.add_bucket("twocpus", "Standard_F2", 100, 100)
    dc = _new_dc(bindings)
    result = dc._add_job(_htc_job(resources={"ncpus": 3}))
    _assert_insufficient_resource(result)
Example #14
0
def test_one_bucket_match_htc_reuse():
    bindings = MockClusterBinding()
    bindings.add_nodearray("twocpus", {"ncpus": 2})
    bindings.add_bucket("twocpus", "Standard_F2", 100, 100)
    dc = _new_dc(bindings)

    result = dc._add_job(_htc_job(job_name="1", resources={"ncpus": 1}))
    _assert_success(result, ["twocpus"])
    result = dc._add_job(_htc_job(job_name="2", resources={"ncpus": 1}))
    _assert_success(result, ["twocpus"])
Example #15
0
    def next_dc(existing_nodes: List[Node]) -> DemandCalculator:
        bindings = MockClusterBinding()
        for_region = list(vm_sizes.VM_SIZES["southcentralus"].keys())

        for n in range(num_arrays):
            nodearray = "nodearray{}".format(n)
            bindings.add_nodearray(nodearray, {}, location="southcentralus")
            for b in range(num_buckets):
                vm_size = for_region[vm_indices[n * num_buckets + b]]
                bindings.add_bucket(
                    nodearray, vm_size, max_count=10, available_count=10,
                )

        return _new_dc(bindings)
Example #16
0
    def next_node_mgr(existing_nodes: List[Node]) -> NodeManager:
        bindings = MockClusterBinding()

        for n in range(num_arrays):
            nodearray = "nodearray{}".format(n)
            bindings.add_nodearray(nodearray, {}, location="southcentralus")
            for b in range(num_buckets):
                vm_size = vm_size_choices[n * num_buckets + b]
                bindings.add_bucket(
                    nodearray,
                    vm_size,
                    max_count=10,
                    available_count=10,
                )

        return _node_mgr(bindings)
Example #17
0
def test_basic() -> None:
    binding = MockClusterBinding()
    binding.add_nodearray("hpc", {"ncpus": "node.vcpu_count"})
    binding.add_bucket("hpc", "Standard_F4", max_count=100, available_count=100)
    node_mgr = new_node_manager({"_mock_bindings": binding})
    bucket = node_mgr.get_buckets()[0]

    assert 100 == bucket.available_count
    bucket.decrement(5)
    assert 95 == bucket.available_count
    bucket.rollback()
    assert 100 == bucket.available_count
    bucket.decrement(5)
    assert 95 == bucket.available_count
    bucket.commit()
    assert 95 == bucket.available_count
    bucket.decrement(5)
    assert 90 == bucket.available_count
    bucket.rollback()
    assert 95 == bucket.available_count
Example #18
0
def test_mock_bindings2() -> None:
    bindings = MockClusterBinding()
    bindings.add_nodearray("w", {}, location="westus2", max_count=8)
    bindings.add_bucket(
        "w",
        "Standard_E2_v3",
        max_count=80,
        available_count=8,
        family_consumed_core_count=72 * 2,
        family_quota_core_count=160,
        family_quota_count=80,
    )
    bindings.add_bucket(
        "w",
        "Standard_E4_v3",
        max_count=40,
        available_count=4,
        family_consumed_core_count=72 * 2,
        family_quota_core_count=160,
        family_quota_count=80,
    )
    bindings.add_bucket("w",
                        "Standard_D8s_v3",
                        max_count=80,
                        available_count=8)

    bindings.add_nodearray("e", {}, location="eastus")
    bindings.add_bucket("e", "Standard_E2_v3", max_count=20, available_count=4)
    node_mgr = _node_mgr(bindings)
    by_size = partition_single(node_mgr.get_buckets(), lambda b:
                               (b.location, b.vm_size))

    assert by_size[("westus2", "Standard_E2_v3")].available_count == 8
    assert by_size[("westus2",
                    "Standard_E2_v3")].limits.nodearray_available_count == 8
    assert by_size[("westus2", "Standard_E2_v3")].limits.family_max_count == 80
    assert by_size[("westus2", "Standard_E4_v3")].available_count == 4
    assert by_size[("westus2", "Standard_D8s_v3")].available_count == 8
    assert by_size[("eastus", "Standard_E2_v3")].available_count == 4

    result = node_mgr.allocate(
        {
            "node.vm_size": "Standard_E4_v3",
            "exclusive": True,
            "node.location": "westus2",
        },
        node_count=1,
    )

    assert result, "\n".join(result.reasons)

    assert by_size[("westus2",
                    "Standard_E2_v3")].limits.nodearray_available_count == 7
    assert by_size[("westus2", "Standard_E2_v3")].available_count == 6
    assert by_size[("westus2", "Standard_E4_v3")].available_count == 3
    assert by_size[("westus2", "Standard_D8s_v3")].available_count == 7
    assert by_size[("eastus", "Standard_E2_v3")].available_count == 4
Example #19
0
def test_overscaling_error() -> None:
    bindings = MockClusterBinding()
    bindings.add_nodearray("htc", {})

    bindings.add_bucket("htc", "Standard_D16s_v3", 10, 10)
    bindings.add_bucket("htc", "Standard_E16_v3", 10, 1)
    bindings.add_bucket("htc",
                        "Standard_E2s_v3",
                        max_count=80,
                        available_count=4)

    node_mgr = _node_mgr(bindings)
    node_mgr.set_system_default_resources()
    result = node_mgr.allocate(
        {
            "ncpus": 1,
            "node.vm_size": ["Standard_E16_v3", "Standard_E2s_v3"]
        },
        slot_count=20,
        assignment_id="slots",
    )
    assert result
    assert len(result.nodes) == 3

    result = node_mgr.allocate(
        {
            "exclusive": True,
            "node.vm_size": "Standard_D16s_v3"
        },
        node_count=10,
        assignment_id="nodes",
    )
    assert result

    by_size = partition(node_mgr.new_nodes, lambda b: b.vm_size)

    assert "Standard_E2s_v3" in by_size
    assert len(by_size["Standard_E2s_v3"]) == 2
    assert len(by_size["Standard_E16_v3"]) == 1
    assert len(by_size["Standard_D16s_v3"]) == 10

    for node in node_mgr.get_nodes():
        print(node.name, node.vm_size, node.assignments)

    node_mgr.bootup()

    # recreate it - the bindings 'remembers' that we already created nodes
    node_mgr = _node_mgr(bindings)
    assert len(node_mgr.get_nodes()) == 13

    result = node_mgr.allocate({"ncpus": 1},
                               slot_count=100,
                               assignment_id="slots")
    assert result

    result = node_mgr.allocate(
        {
            "exclusive": True,
            "node.vm_size": "Standard_D16s_v3"
        },
        node_count=10,
        assignment_id="nodes",
    )
    assert result
    print()
    print()
    for node in node_mgr.get_nodes():
        print(node.name, node.vm_size, node.assignments)

    assert len(node_mgr.new_nodes) == 0
    assert len(node_mgr.get_nodes()) == 13

    by_size = partition(node_mgr.get_nodes(), lambda b: b.vm_size)

    assert "Standard_E2s_v3" in by_size
    assert len(by_size["Standard_E2s_v3"]) == 2
    assert len(by_size["Standard_E16_v3"]) == 1
    assert len(by_size["Standard_D16s_v3"]) == 10
Example #20
0
def test_empty_cluster() -> None:
    node_mgr = _node_mgr(MockClusterBinding("clusty"))
    assert node_mgr.bootup()

    result = node_mgr.allocate({}, node_count=1)
    assert not result
def test_family_and_spots() -> None:
    bindings = MockClusterBinding("clusty")
    bindings.add_nodearray("htc", {},
                           spot=False,
                           max_count=10,
                           max_core_count=400)
    bindings.add_nodearray("hpc", {}, spot=False, max_placement_group_size=7)
    bindings.add_bucket(
        "htc",
        "Standard_F4s",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    bindings.add_bucket(
        "htc",
        "Standard_D4s_v3",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    bindings.add_bucket(
        "hpc",
        "Standard_D4s_v3",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    bindings.add_bucket(
        "hpc",
        "Standard_D4s_v3",
        max_count=20,
        available_count=10,
        family_consumed_core_count=40,
        family_quota_core_count=80,
        family_quota_count=20,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
        placement_groups=["123"],
    )

    bindings.add_nodearray("htcspot", {}, spot=True)
    bindings.add_bucket(
        "htcspot",
        "Standard_F4s",
        max_count=20,
        available_count=10,
        family_consumed_core_count=0,
        family_quota_core_count=0,
        family_quota_count=0,
        regional_consumed_core_count=45,
        regional_quota_core_count=100,
        regional_quota_count=25,
    )

    node_mgr = new_node_manager({"_mock_bindings": bindings})
    by_key: Dict[str,
                 NodeBucket] = partition(node_mgr.get_buckets(), lambda b:
                                         (b.nodearray, b.vm_size))
    htc = by_key[("htc", "Standard_F4s")][0]
    htc2 = by_key[("htc", "Standard_D4s_v3")][0]
    htcspot = by_key[("htcspot", "Standard_F4s")][0]
    hpcs = by_key[("hpc", "Standard_D4s_v3")]
    hpc_pg = [x for x in hpcs if x.placement_group][0]

    # ondemand instances use actual family quota
    assert htc.limits.family_max_count == 20
    assert htc2.limits.family_max_count == 20
    assert htc.limits.family_available_count == 10
    assert htc2.limits.family_available_count == 10

    # spot instances replace family with regional
    assert htcspot.limits.family_max_count == 25
    assert htcspot.limits.family_available_count == 13

    assert node_mgr.allocate(
        {
            "node.nodearray": "htc",
            "node.vm_size": "Standard_F4s"
        },
        node_count=1)
    # ondemand instances use actual family quota
    assert htc.limits.family_max_count == 20
    assert htc2.limits.family_max_count == 20
    assert htc.limits.family_available_count == 9
    assert htc2.limits.family_available_count == 10
    assert htc.limits.nodearray_available_count == 9
    assert htc2.limits.nodearray_available_count == 9
    assert htc.available_count == 9
    # nodearray limit affects htc2 since max_count=10
    assert htc2.available_count == 9

    # now the regional is affected by our allocation
    assert htcspot.limits.family_max_count == 25
    assert htcspot.limits.family_available_count == 13 - 1

    assert hpc_pg.available_count == 7
Example #22
0
def test_mock_bindings(bindings: MockClusterBinding) -> None:
    ctx = register_result_handler(DefaultContextHandler("[test]"))
    hpc, htc = _node_mgr(bindings).get_buckets()
    if hpc.nodearray != "hpc":
        hpc, htc = htc, hpc
    assert hpc.nodearray == "hpc"
    assert htc.nodearray == "htc"

    assert hpc.family_available_count == 10
    assert hpc.available_count == 10

    assert hpc.family_available_count == 10
    assert htc.family_available_count == 20

    hpc.decrement(1)
    assert hpc.family_available_count == 9
    assert htc.family_available_count == 20
    hpc.commit()
    assert hpc.family_available_count == 9
    assert htc.family_available_count == 18

    hpc.increment(1)
    hpc.commit()
    assert hpc.family_available_count == 10
    assert htc.family_available_count == 20

    ctx.set_context("[failure]")
    nm = _node_mgr(bindings)

    b = MockClusterBinding()
    b.add_nodearray("haspgs", {}, max_placement_group_size=20)
    b.add_bucket(
        "haspgs",
        "Standard_F4",
        100,
        100,
        placement_groups=["pg0", "pg1"],
    )
    # make sure we take the max_placement_group_size (20) into account
    # and that we have the non-pg and 2 pg buckets.
    nm = _node_mgr(b)
    no_pg, pg0, pg1 = sorted(nm.get_buckets(),
                             key=lambda b: b.placement_group or "")
    assert no_pg.available_count == 100
    assert pg0.available_count == 20
    assert pg1.available_count == 20

    # let's add a node to pg0 (100 - 1, 20 - 1, 20)
    b.add_node("haspgs-pg0-1", "haspgs", "Standard_F4", placement_group="pg0")

    nm = _node_mgr(b)
    no_pg, pg0, pg1 = sorted(nm.get_buckets(),
                             key=lambda b: b.placement_group or "")
    assert no_pg.available_count == 99
    assert pg0.available_count == 19
    assert pg1.available_count == 20

    # let's add a node to pg1 (100 - 2, 20 - 1, 20 - 1)
    b.add_node("haspgs-pg1-1", "haspgs", "Standard_F4", placement_group="pg1")

    nm = _node_mgr(b)
    no_pg, pg0, pg1 = sorted(nm.get_buckets(),
                             key=lambda b: b.placement_group or "")
    assert no_pg.available_count == 98
    assert pg0.available_count == 19
    assert pg1.available_count == 19

    # let's add 90 htc nodes so that our pg available counts are floored
    # by the overall available_count
    for i in range(90):
        b.add_node("haspgs-{}".format(i + 1), "haspgs", "Standard_F4")

    nm = _node_mgr(b)
    no_pg, pg0, pg1 = sorted(nm.get_buckets(),
                             key=lambda b: b.placement_group or "")
    assert no_pg.available_count == 8
    assert pg0.available_count == 8
    assert pg1.available_count == 8

    # lastly, add a nother node to a pg and see that all of avail go down
    b.add_node("haspgs-pg1-2", "haspgs", "Standard_F4", placement_group="pg1")
    nm = _node_mgr(b)
    no_pg, pg0, pg1 = sorted(nm.get_buckets(),
                             key=lambda b: b.placement_group or "")
    assert no_pg.available_count == 7
    assert pg0.available_count == 7
    assert pg1.available_count == 7
Example #23
0
def test_mock_bindings3() -> None:
    bindings = MockClusterBinding()
    bindings.add_nodearray("w", {}, location="westus2", max_count=8)
    bindings.add_bucket("w", "Standard_E2_v3", max_count=80, available_count=8)
    _node_mgr(bindings)