def test_users() -> None:
    ge_env = common_ge_env()
    hpc_q = ge_env.queues["hpc.q"]
    # make sure common_ge_env didn't add users
    assert not hpc_q.user_lists
    assert not hpc_q.xuser_lists

    # this may seem odd, but this is how these become expressed
    # i.e. this hostgroup can run users ryan and ben, but not ben
    hpc_q.user_lists["@hpc.q_rr0"] = ["ryan", "ben"]
    hpc_q.xuser_lists["@hpc.q_rr0"] = ["ben"]
    hg = ge_env.hostgroups["@hpc.q_rr0"]
    bh = BoundHostgroup(hpc_q, hg, 0)

    # no user, ben and random should never succeed
    assert isinstance(bh.make_constraint(ge_env, user=None), Never)
    assert isinstance(bh.make_constraint(ge_env, user="******"), Never)
    assert isinstance(bh.make_constraint(ge_env, user="******"), Never)

    # ok, the real constraint - user==ryan
    user_cons = bh.make_constraint(ge_env, user="******")
    assert isinstance(user_cons, And)

    node = SchedulerNode("tux")
    node._Node__nodearray = "hpc"
    assert user_cons.satisfied_by_node(node)
Exemplo n.º 2
0
    def run_test(
        ctype: str,
        node_pcpu: Optional[N],
        hg_pcpu: N,
        q_default_pcpu: N,
        complex_default: Optional[N],
    ) -> SchedulerNode:
        cast = float if ctype == "DOUBLE" else int
        node_res = {}
        if node_pcpu is not None:
            node_res["pcpu"] = cast(node_pcpu)
            node_res["p"] = cast(node_pcpu)

        node = SchedulerNode("tux", node_res)
        ge_env = common_ge_env()

        q = ge_env.queues["hpc.q"]
        complex_default_str = (
            "NONE" if complex_default is None else str(complex_default)
        )
        ge_env.complexes["pcpu"] = Complex(
            "pcpu", "p", ctype, "<=", True, True, complex_default_str, 0
        )

        q.complex_values[None] = {"pcpu": cast(q_default_pcpu)}
        q.complex_values["@hpc.q"] = {"pcpu": cast(hg_pcpu)}

        assert node.available.get("pcpu") == node_pcpu
        process_quotas(node, ge_env.complexes, ["@hpc.q"], [q])
        return node
def test_projects() -> None:
    ge_env = common_ge_env()
    hpc_q = ge_env.queues["hpc.q"]
    # make sure common_ge_env didn't add projects
    assert not hpc_q.projects
    assert not hpc_q.xprojects

    # this may seem odd, but this is how these become expressed
    # i.e. this hostgroup can run prj1 and prj2, but not prj2
    hpc_q.projects["@hpc.q_rr0"] = ["prj1", "prj2"]
    hpc_q.xprojects["@hpc.q_rr0"] = ["prj2"]
    hg = ge_env.hostgroups["@hpc.q_rr0"]
    bh = BoundHostgroup(hpc_q, hg, 0)

    # no project, ben and random should never succeed
    # yes - GridEngine will NOT schedule a job if a project is not defined
    assert isinstance(bh.make_constraint(ge_env, project=None), Never)
    assert isinstance(bh.make_constraint(ge_env, project="prj2"), Never)
    assert isinstance(bh.make_constraint(ge_env, project="random"), Never)

    # ok, the real constraint - project==prj1
    prj1_cons = bh.make_constraint(ge_env, project="prj1")
    assert isinstance(prj1_cons, And)

    node = SchedulerNode("tux")
    node._Node__nodearray = "hpc"
    assert prj1_cons.satisfied_by_node(node)
Exemplo n.º 4
0
    def run_test(
        ctype: str,
        node_lic: Optional[bool],
        hg_lic: bool,
        q_default_lic: bool,
        complex_default: Optional[bool],
    ) -> SchedulerNode:
        node_res = {}
        if node_lic is not None:
            node_res["lic"] = node_lic
            node_res["l"] = node_lic

        node = SchedulerNode("tux", node_res)
        ge_env = common_ge_env()

        q = ge_env.queues["hpc.q"]
        complex_default_str = (
            "NONE" if complex_default is None else str(complex_default)
        )
        ge_env.complexes["lic"] = Complex(
            "lic", "l", ctype, "<=", True, True, complex_default_str, 0
        )

        q.complex_values[None] = {"lic": q_default_lic}
        q.complex_values["@hpc.q"] = {"lic": hg_lic}

        assert node.available.get("lic") == node_lic
        process_quotas(node, ge_env.complexes, ["@hpc.q"], [q])
        return node
Exemplo n.º 5
0
def test_initialize() -> None:
    ge_env = common_ge_env()

    # ok - make sure we propagate an unknown error
    ge_env.qbin.qconf = mock.MagicMock(
        ["-sce", "ccnodeid"],
        side_effect=CalledProcessError(1,
                                       cmd=["-sce", "ccnodeid"],
                                       output="Unknown error".encode()),
    )
    ge_driver = GridEngineDriver({}, ge_env)
    try:
        ge_driver.initialize_environment()
    except CalledProcessError as e:
        assert e.stdout.decode() == "Unknown error"
    ge_env.qbin.qconf.assert_has_calls(
        [mock.call(["-sss"]), mock.call(["-sc"])])

    # ok - make sure we propagate an unknown error
    ge_env.qbin.qconf = mock.MagicMock(
        ["-sc"],
        return_value="",
    )
    ge_driver = GridEngineDriver({"read_only": True}, ge_env)
    ge_driver.initialize_environment()

    # now it does exist
    ge_env.qbin.qconf = mock.MagicMock(return_value="ccnodeid ccnodeid ...")
    ge_driver = GridEngineDriver({}, ge_env)
    ge_driver.initialize_environment()

    # ge_env.qbin.qconf.assert_called_once()

    # TODO I can't figure out how to make this throw
    # an exception the first call but not the next
    # now it does not exist, so we will be created

    class FakeQConf:
        def __init__(self) -> None:
            self.call_count = 0

        def __call__(self, args):  # type: ignore
            self.call_count += 1
            if args == ["-sss"]:
                assert self.call_count == 1
                return ""
            if args == ["-sc"]:
                assert self.call_count == 2
                return ""
            elif args[0] == "-Ace":
                assert self.call_count == 3
                return ""
            else:
                raise AssertionError("Unexpected call {}".format(args))

    ge_env.qbin.qconf = FakeQConf()
    ge_driver = GridEngineDriver({}, ge_env)
    ge_driver.initialize_environment()
Exemplo n.º 6
0
def test_quota_bound_resource_number() -> None:
    ge_env = common_ge_env()
    hpcq = ge_env.queues["hpc.q"]
    htcq = ge_env.queues["htc.q"]
    hpcq.complex_values[None] = {"pcpu": 6}
    htcq.complex_values[None] = {"pcpu": 4}

    node = SchedulerNode("tux", resources={"pcpu": 8})

    node.available["hpc.q@pcpu"] = 6
    node.available["htc.q@pcpu"] = 4

    c1 = make_quota_bound_consumable_constraint("pcpu", 1, hpcq, ge_env, ["@hpc.q"])
    c2 = make_quota_bound_consumable_constraint("pcpu", 2, htcq, ge_env, ["@htc.q"])
    # imagine the node has 8 pcpus, but hpc.q limits it to 6, and htc.q to 4
    assert node.available["pcpu"] == 8

    assert node.available["hpc.q@pcpu"] == 6
    assert node.available["htc.q@pcpu"] == 4

    # the total amount and hpc.q are decremented, htc.q untouched
    assert c1.satisfied_by_node(node)
    assert c1.do_decrement(node)
    assert node.available["pcpu"] == 7
    assert node.available["hpc.q@pcpu"] == 5
    assert node.available["htc.q@pcpu"] == 4

    # the total amount and htc.q are decremented, hpc.q untouched
    assert c2.satisfied_by_node(node)
    assert c2.do_decrement(node)
    assert node.available["pcpu"] == 5
    assert node.available["hpc.q@pcpu"] == 5
    assert node.available["htc.q@pcpu"] == 2

    # the total amount and htc.q are decremented, hpc.q is floored
    # to the total amount
    assert c2.satisfied_by_node(node)
    assert c2.do_decrement(node)
    assert node.available["pcpu"] == 3
    assert node.available["hpc.q@pcpu"] == 3
    assert node.available["htc.q@pcpu"] == 0

    # take out the remaining amount
    assert not c2.satisfied_by_node(node)
    for _ in range(3):
        assert c1.satisfied_by_node(node)
        assert c1.do_decrement(node)

    assert not c1.satisfied_by_node(node)
    assert node.available["pcpu"] == 0
    assert node.available["hpc.q@pcpu"] == 0
    assert node.available["htc.q@pcpu"] == 0
Exemplo n.º 7
0
    def run_test(
        ctype: str,
        node_lic: Optional[str],
        hg_lic: str,
        q_default_lic: str,
        complex_default: Optional[str],
    ) -> SchedulerNode:
        def cast(x: Optional[str]) -> Optional[str]:
            if x is None:
                return None
            if ctype == "CSTRING":
                return x.lower()
            return x

        node_res = {}
        if node_lic is not None:
            node_res["lic"] = cast(node_lic)
            node_res["l"] = cast(node_lic)

        node = SchedulerNode("tux", node_res)
        ge_env = common_ge_env()

        q = ge_env.queues["hpc.q"]
        complex_default_str = (
            "NONE" if complex_default is None else str(complex_default)
        )
        ge_env.complexes["lic"] = Complex(
            "lic", "l", ctype, "<=", True, True, complex_default_str, 0
        )

        q.complex_values[None] = {"lic": cast(q_default_lic)}
        q.complex_values["@hpc.q"] = {"lic": cast(hg_lic)}

        assert node.available.get("lic") == node_lic
        process_quotas(node, ge_env.complexes, ["@hpc.q"], [q])
        return node
Exemplo n.º 8
0
def test_preprocess_configs() -> None:
    ge_env = common_ge_env()

    d = driver.new_driver({}, ge_env)

    # If the user did not define any placement groups, define the defaults.
    pgs = [
        b.name.replace(".", "_").replace("@", "")
        for b in ge_env.queues["hpc.q"].bound_hostgroups.values()
    ]

    assert {
        "nodearrays": {
            "default": {
                "placement_groups": pgs
            }
        },
        "default_resources": [],
    } == d.preprocess_config({})

    # if they did define defaults, make no changes
    custom_config = {
        "nodearrays": {
            "default": {
                "placement_groups": ["hpc_q_mpi_CUSTOM"]
            }
        },
        "default_resources": [],
    }
    assert custom_config == d.preprocess_config(custom_config)

    # ensure we handle duplicating shortcut or long form for the user.
    assert {
        "nodearrays": {
            "default": {
                "placement_groups": pgs
            }
        },
        "default_resources": [
            {
                "name": "s",
                "select": {},
                "value": 1
            },
            {
                "name": "slots",
                "select": {},
                "value": 1
            },
        ],
    } == d.preprocess_config(
        {"default_resources": [{
            "name": "s",
            "select": {},
            "value": 1
        }]})

    assert {
        "nodearrays": {
            "default": {
                "placement_groups": pgs
            }
        },
        "default_resources": [
            {
                "name": "slots",
                "select": {},
                "value": 1
            },
            {
                "name": "s",
                "select": {},
                "value": 1
            },
        ],
    } == d.preprocess_config(
        {"default_resources": [{
            "name": "slots",
            "select": {},
            "value": 1
        }]})

    # ensure that ccnodeid is appended to relevant_complexes by default
    # note: if relevant_complexes is not defined, then every complex is 'relevant'
    # so no need to add it (and it would in fact break things)
    assert {
        "nodearrays": {
            "default": {
                "placement_groups": pgs
            }
        },
        "default_resources": [
            {
                "name": "slots",
                "select": {},
                "value": 1
            },
            {
                "name": "s",
                "select": {},
                "value": 1
            },
        ],
        "gridengine": {
            "relevant_complexes": ["slots", "ccnodeid"]
        },
    } == d.preprocess_config({
        "default_resources": [{
            "name": "slots",
            "select": {},
            "value": 1
        }],
        "gridengine": {
            "relevant_complexes": ["slots"]
        },
    })
Exemplo n.º 9
0
def test_custom_parser() -> None:
    ge_env = common_ge_env()
    qc = driver.HostgroupConstraint
    hg = Hostgroup("@htc_q_mpipg0", {"node.nodearray": "htc"})
    bhg = BoundHostgroup(ge_env.queues["htc.q"], hg, 0)
    q = qc(bhg, bhg.name.replace("@", "pg0"))
    json_dump(q.to_dict())
    expected_dict: Dict[str, Optional[Any]] = {
        "hostgroup-and-pg": {
            "hostgroup": "@htc_q_mpipg0",
            "user": None,
            "project": None,
            "placement-group": "pg0htc_q_mpipg0",
            "seq-no": 0,
            "constraints": [{
                "nodearray": ["htc"]
            }],
        }
    }
    assert q.to_dict() == expected_dict
    parsed = constraints.get_constraint(q.to_dict())
    assert parsed.hostgroups_set == q.hostgroups_set
    assert parsed.hostgroups_sorted == q.hostgroups_sorted
    assert parsed.placement_group == q.placement_group

    q = qc("htc.q", ["@htc.q", "@another"], None)
    expected_dict = {
        "hostgroups-and-pg": {
            "hostgroups": ["@another",
                           "@htc.q"],  # sort the hostgroups for consistency
            "placement-group": None,
        }
    }
    assert q.to_dict() == expected_dict
    parsed = constraints.get_constraint(q.to_dict())
    assert parsed.hostgroups_set == q.hostgroups_set
    assert parsed.hostgroups_sorted == q.hostgroups_sorted
    assert parsed.placement_group == q.placement_group

    node = SchedulerNode(
        "tux",
        {"_gridengine_hostgroups": q.hostgroups_sorted},
    )
    assert q.satisfied_by_node(node)
    assert q.do_decrement(node)

    node = SchedulerNode(
        "tux",
        {"_gridengine_hostgroups": q.hostgroups_sorted},
    )
    node.placement_group = "pg0"
    assert not q.satisfied_by_node(node)

    node = SchedulerNode("tux", {})
    node.exists = True
    assert not q.satisfied_by_node(node)

    node.exists = False
    assert q.satisfied_by_node(node)
    assert q.do_decrement(node)
    assert node.available["_gridengine_hostgroups"] == q.hostgroups_sorted
    assert node.software_configuration["gridengine_hostgroups"] == " ".join(
        q.hostgroups_sorted)
Exemplo n.º 10
0
def test_hostgroup_constraint() -> None:
    ge_env = common_ge_env()
    hostgroup = Hostgroup("@htc.q", {}, members=["tux42"])
    bound = BoundHostgroup(ge_env.queues["htc.q"], hostgroup, 0)
    cons = driver.HostgroupConstraint(bound, "pg1")

    def new_node(
        pg: Optional[str] = None,
        hostname: str = "tux1",
        hostgroup: Optional[Hostgroup] = None,
    ) -> SchedulerNode:
        node = SchedulerNode(hostname, {"slot_type": "highmem"})

        if pg:
            node.placement_group = pg

        if hostgroup:
            util.add_node_to_hostgroup(node, hostgroup)

        return node

    # node is not in a pg
    result = cons.satisfied_by_node(new_node(None, "tux42", hostgroup))
    assert not result
    assert result.status == "WrongPlacementGroup"
    # wrong pg
    result = cons.satisfied_by_node(new_node("pg2", "tux42", hostgroup))
    assert not result
    assert result.status == "WrongPlacementGroup"
    # not in the hostgroup
    result = cons.satisfied_by_node(new_node("pg1", "tux42"))
    assert not result
    assert result.status == "WrongHostgroup"
    # happy path
    assert cons.satisfied_by_node(new_node("pg1", "tux42", hostgroup))

    # reject this because node.nodearray != lowmem
    hostgroup = Hostgroup("@hg1", {"slot_type": "lowmem"}, members=["tux42"])
    bound = BoundHostgroup(ge_env.queues["htc.q"], hostgroup, 0)
    cons = driver.HostgroupConstraint(bound, "pg1")
    result = cons.satisfied_by_node(new_node("pg1", "tux42", hostgroup))
    assert not result
    assert result.status == "InvalidOption"

    hostgroup = Hostgroup("@hg1", {"slot_type": "highmem"}, members=["tux42"])
    bound = BoundHostgroup(ge_env.queues["htc.q"], hostgroup, 0)
    cons = driver.HostgroupConstraint(bound, "pg1")
    assert cons.satisfied_by_node(new_node("pg1", "tux42", hostgroup))

    cons_list = []
    for pg in ["pg1", "pg2", "pg3"]:
        hostgroup = Hostgroup("@" + pg, {"slot_type": "highmem"},
                              members=["tux42"])
        bound = BoundHostgroup(ge_env.queues["htc.q"], hostgroup, 0)
        cons = driver.HostgroupConstraint(bound, pg)
        cons_list.append(cons)

    pg1_hostgroup = Hostgroup("@pg1", {"slot_type": "highmem"},
                              members=["tux42"])
    assert XOr(*cons_list).satisfied_by_node(
        new_node("pg1", "tux42", pg1_hostgroup))
    pg2_hostgroup = Hostgroup("@pg2", {"slot_type": "highmem"},
                              members=["tux42"])
    assert XOr(*cons_list).satisfied_by_node(
        new_node("pg2", "tux42", pg2_hostgroup))
    pg3_hostgroup = Hostgroup("@pg3", {"slot_type": "highmem"},
                              members=["tux42"])
    assert XOr(*cons_list).satisfied_by_node(
        new_node("pg3", "tux42", pg3_hostgroup))
    pg4_hostgroup = Hostgroup("@pg4", {"slot_type": "highmem"},
                              members=["tux42"])
    assert not XOr(*cons_list).satisfied_by_node(
        new_node("pg4", "tux42", pg4_hostgroup))