Ejemplo n.º 1
0
def test_clone() -> None:
    orig = SchedulerNode("lnx0", {"ncpus": 4})
    orig.metadata["exists_in_both"] = True
    new = orig.clone()
    assert new.available["ncpus"] == 4
    assert new.resources["ncpus"] == 4
    new.available["ncpus"] -= 1
    assert new.available["ncpus"] == 3
    assert orig.available["ncpus"] == 4

    job = Job("1", {"ncpus": 2})
    new.decrement(job._constraints, assignment_id=job.name)
    assert new.available["ncpus"] == 1
    assert orig.available["ncpus"] == 4
    assert new.assignments == set(["1"])
    assert orig.assignments == set()

    orig.metadata["exists_in_orig"] = True
    new.metadata["exists_in_new"] = True

    assert orig.metadata["exists_in_both"] is True
    assert "exists_in_new" not in orig.metadata
    assert orig.metadata["exists_in_orig"] is True

    assert new.metadata["exists_in_both"] is True
    assert new.metadata["exists_in_new"] is True
    assert "exists_in_orig" not in new.metadata
def test_node_resource_constraint() -> None:
    assert (NodeResourceConstraint("blah", "A").to_dict() == get_constraint({
        "blah": ["A"]
    }).to_dict())
    c = get_constraint({"blah": ["A"]})
    assert isinstance(c, NodeResourceConstraint)
    assert -1 == c.minimum_space(SchedulerNode(""))
    assert c.do_decrement(SchedulerNode(""))
    assert not c.satisfied_by_node(SchedulerNode("no-blah-define"))
    assert not c.satisfied_by_node(
        SchedulerNode("wrong-blah-define", {"blah": "B"}))
    assert c.satisfied_by_node(
        SchedulerNode("wrong-blah-define", {"blah": "A"}))
def test_or() -> None:
    assert (Or(NodeResourceConstraint("blah", "A"),
               NodeResourceConstraint("blah",
                                      "B")).to_dict() == get_constraint({
                                          "or": [{
                                              "blah": ["A"]
                                          }, {
                                              "blah": ["B"]
                                          }]
                                      }).to_dict())

    or_expr = {"or": [{"blah": ["A"]}, {"blah": ["B"]}]}
    assert isinstance(get_constraint(or_expr), Or)
    c = get_constraint({"node.vcpu_count": 2})
    assert -1 == c.minimum_space(SchedulerNode(""))
    assert c.do_decrement(SchedulerNode(""))
def test_job_excl() -> None:
    s = SchedulerNode("")
    # typical exclusive behavior - one task per job per node
    job_excl = get_constraint({"exclusive": True})
    assert job_excl.job_exclusive
    assert isinstance(job_excl, ExclusiveNode)
    assert job_excl.satisfied_by_node(s)
    assert -1 == job_excl.minimum_space(s)
    assert job_excl.do_decrement(s)

    s.assign("1")
    job_excl.assignment_id = "1"
    # can't put the same jobid on the same node twice
    assert not job_excl.satisfied_by_node(s)
    assert not job_excl.do_decrement(s)
    assert s.closed
    assert 0 == job_excl.minimum_space(s)
def test_min_resource_per_node() -> None:
    assert (MinResourcePerNode("pcpus", 2).to_dict() == get_constraint({
        "pcpus":
        2
    }).to_dict())

    c = get_constraint({"pcpus": 2})
    assert isinstance(c, MinResourcePerNode)
    assert 0 == c.minimum_space(SchedulerNode(""))
    try:
        assert not c.do_decrement(SchedulerNode(""))
        assert False
    except RuntimeError:
        pass

    s = SchedulerNode("has-pcpus", {"pcpus": 4})
    assert s.available["pcpus"] == 4

    assert c.do_decrement(s)
    assert s.available["pcpus"] == 2
    assert s.resources["pcpus"] == 4
    assert not c.satisfied_by_node(SchedulerNode("no-blah-define"))
    assert not c.satisfied_by_node(
        SchedulerNode("wrong-blah-define", {"pcpus": 1}))
    assert c.satisfied_by_node(SchedulerNode("min-blah-define", {"pcpus": 2}))
    assert c.satisfied_by_node(
        SchedulerNode("more-blah-define", {"pcpus": 100}))
def test_never() -> None:
    c = Never("my message")
    node = SchedulerNode("test", {"memgb": 4.0})
    assert not c.satisfied_by_node(node)
    assert c.satisfied_by_node(node).reasons == ["my message"]

    c = get_constraint({"never": "my other message"})
    assert isinstance(c, Never)
    assert c.message == "my other message"
def test_node_property_constraint() -> None:
    assert (NodePropertyConstraint("vcpu_count",
                                   2).to_dict() == get_constraint({
                                       "node.vcpu_count":
                                       2
                                   }).to_dict())
    assert isinstance(get_constraint({"node.vcpu_count": 2}),
                      NodePropertyConstraint)
    for attr in dir(Node):
        if not attr[0].lower():
            continue
        try:
            get_constraint({"node.{}".format(attr): 2})
        except ValueError:
            assert attr not in QUERYABLE_PROPERTIES

    c = get_constraint({"node.vcpu_count": 2})
    assert -1 == c.minimum_space(SchedulerNode(""))
    assert c.do_decrement(SchedulerNode(""))
def test_task_excl() -> None:
    s = SchedulerNode("")

    # now to test tack exclusive, where multiple tasks from the same
    # job can run on the same machine
    task_excl = get_constraint({"exclusive_task": True})
    assert not task_excl.job_exclusive
    assert isinstance(task_excl, ExclusiveNode)
    assert task_excl.satisfied_by_node(s)
    assert -1 == task_excl.minimum_space(s)
    assert task_excl.do_decrement(s)

    s.assign("1")
    task_excl.assignment_id = "1"
    assert task_excl.satisfied_by_node(s)
    assert task_excl.do_decrement(s)

    assert s.closed

    assert -1 == task_excl.minimum_space(s)
Ejemplo n.º 9
0
def onprem_burst_demand() -> None:
    onprem001 = SchedulerNode("onprem001",
                              resources={
                                  "onprem": True,
                                  "nodetype": "A",
                                  "ncpus": 16
                              })
    onprem002 = SchedulerNode("onprem002",
                              resources={
                                  "onprem": True,
                                  "nodetype": "A",
                                  "ncpus": 32
                              })

    # onprem002 already has 10 cores occupied
    onprem002.available["ncpus"] -= 10

    dcalc = new_demand_calculator(CONFIG,
                                  existing_nodes=[onprem001, onprem002])
    dcalc.node_mgr.add_default_resource({"node.nodearray": ["htc", "htcspot"]},
                                        "nodetype", "A")
    assert [b for b in dcalc.node_mgr.get_buckets()
            if b.nodearray == "htc"][0].resources["nodetype"] == "A"
    dcalc.node_mgr.add_default_resource({}, "nodetype", "B")

    assert [b for b in dcalc.node_mgr.get_buckets()
            if b.nodearray == "htc"][0].resources["nodetype"] == "A"
    # we want 50 ncpus, but there are only 38 onpremise, so we need to burst
    # 12 more cores.
    dcalc.add_job(Job("tc-100", {"nodetype": "A", "ncpus": 1}, iterations=50))

    demand_result = dcalc.finish()

    if not DRY_RUN:
        dcalc.bootup()

    # also note we can add defaults to the column by adding a :, like
    # onprem:False, as this is only defined on the onprem nodes and not
    # on the Azure nodes.
    print_demand(["name", "job_ids", "nodetype", "onprem:False", "*ncpus"],
                 demand_result)
def test_xor() -> None:
    assert (XOr(NodeResourceConstraint("blah", "A"),
                NodeResourceConstraint("blah",
                                       "B")).to_dict() == get_constraint({
                                           "xor": [{
                                               "blah": ["A"]
                                           }, {
                                               "blah": ["B"]
                                           }]
                                       }).to_dict())

    xor_expr = {"xor": [{"blah": ["A"]}, {"blah": ["B"]}]}
    assert isinstance(get_constraint(xor_expr), XOr)

    c = XOr(NodeResourceConstraint("blah", "A"),
            NodeResourceConstraint("blah", "B"))
    assert not c.satisfied_by_node(SchedulerNode(""))
    assert not c.satisfied_by_node(SchedulerNode("", {"blah": ["A", "B"]}))
    assert c.satisfied_by_node(SchedulerNode("", {"blah": "A"}))
    assert c.satisfied_by_node(SchedulerNode("", {"blah": "B"}))
    assert c.do_decrement(SchedulerNode("", {"blah": "A"}))
def test_minimum_space() -> None:
    c = MinResourcePerNode("pcpus", 1)
    assert 1 == c.minimum_space(SchedulerNode("", {"pcpus": 1}))
    assert 2 == c.minimum_space(SchedulerNode("", {"pcpus": 2}))
    snode = SchedulerNode("", {"pcpus": 2})
    assert -1 == ExclusiveNode(assignment_id="1").minimum_space(snode)
    snode.assign("1")
    assert 0 == ExclusiveNode(assignment_id="1").minimum_space(snode)
Ejemplo n.º 12
0
def test_placement_group() -> None:
    node = SchedulerNode("", {})
    node.exists = False

    node.placement_group = ""
    assert node.placement_group is None

    node.placement_group = "a"
    assert node.placement_group == "a"

    node.placement_group = "0"
    assert node.placement_group == "0"
    try:
        node.placement_group = "."
    except Exception:
        pass

    assert node.placement_group == "0"
    node.set_placement_group_escaped(".")
    assert node.placement_group == "_"

    node.exists = True
    try:
        node.placement_group = "123"
    except Exception:
        assert node.placement_group == "_"
def test_memory() -> None:
    c = get_constraints([{"memgb": 1}])
    node = SchedulerNode("test", {"memgb": 4.0})
    m = minimum_space(c, node)
    assert isinstance(m, int)
    assert m == 4
Ejemplo n.º 14
0
 def snodes():
     return [SchedulerNode("ip-010A0005", {"slots": 4})]