async def test_smoke(self):
        ni = 0
        async with MockAdminAPI() as client:
            nodes_config_resp = await client.getNodesConfig(
                NodesFilter(node=NodeID(node_index=ni))
            )
            nodes_state_resp = await client.getNodesState(
                NodesStateRequest(filter=NodesFilter(node=NodeID(node_index=ni)))
            )
            maintenances_resp = await client.getMaintenances(MaintenancesFilter())

        nc = nodes_config_resp.nodes[0]
        ns = nodes_state_resp.states[0]
        mnt_ids = set()
        for mnt in maintenances_resp.maintenances:
            for s in mnt.shards:
                if s.node.node_index == ni:
                    mnt_ids.add(mnt.group_id)
            for n in mnt.sequencer_nodes:
                if n.node_index == ni:
                    mnt_ids.add(mnt.group_id)
        mnts = tuple(
            sorted(
                (
                    mnt
                    for mnt in maintenances_resp.maintenances
                    if mnt.group_id in mnt_ids
                ),
                key=operator.attrgetter("group_id"),
            )
        )

        nv = NodeView(node_config=nc, node_state=ns, maintenances=mnts)

        self._validate(nv, nc, ns, mnts)
示例#2
0
    async def test_smoke(self):
        ni = 0
        async with MockAdminAPI() as client:
            cv = await get_cluster_view(client)
            maintenances_resp = await apply_maintenance(
                client=client,
                shards=[
                    ShardID(
                        node=cv.get_node_view_by_node_index(0).node_id, shard_index=1
                    )
                ],
                sequencer_nodes=[cv.get_node_view_by_node_index(0).node_id],
            )
            (
                nodes_config_resp,
                nodes_state_resp,
                maintenances_resp,
            ) = await asyncio.gather(
                client.getNodesConfig(NodesFilter(node=NodeID(node_index=ni))),
                client.getNodesState(
                    NodesStateRequest(filter=NodesFilter(node=NodeID(node_index=ni)))
                ),
                client.getMaintenances(MaintenancesFilter()),
            )

        nc = [n for n in nodes_config_resp.nodes if n.node_index == ni][0]
        ns = [n for n in nodes_state_resp.states if n.node_index == ni][0]
        mnt_ids = set()
        for mnt in maintenances_resp.maintenances:
            for s in mnt.shards:
                if s.node.node_index == ni:
                    mnt_ids.add(mnt.group_id)
            for n in mnt.sequencer_nodes:
                if n.node_index == ni:
                    mnt_ids.add(mnt.group_id)
        mnts = tuple(
            sorted(
                (
                    mnt
                    for mnt in maintenances_resp.maintenances
                    if mnt.group_id in mnt_ids
                ),
                key=operator.attrgetter("group_id"),
            )
        )

        nv = NodeView(node_config=nc, node_state=ns, maintenances=mnts)

        self._validate(nv, nc, ns, mnts)
示例#3
0
async def get_nodes_config(
    client: AdminAPI, req: Optional[NodesFilter] = None
) -> NodesConfigResponse:
    """
    Wrapper for getNodesConfig() Thrift method
    """
    return await client.getNodesConfig(req or NodesFilter())
示例#4
0
 async def test_smoke(self):
     async with MockAdminAPI() as client:
         cv = await get_cluster_view(client)
         await apply_maintenance(
             client=client,
             shards=[
                 ShardID(node=cv.get_node_view_by_node_index(0).node_id,
                         shard_index=1)
             ],
             sequencer_nodes=[cv.get_node_view_by_node_index(0).node_id],
         )
         await apply_maintenance(
             client=client,
             node_ids=[cv.get_node_id(node_index=1)],
             user="******",
             reason="whatever",
         )
         (cv, nc_resp, ns_resp, mnts_resp) = await asyncio.gather(
             get_cluster_view(client),
             client.getNodesConfig(NodesFilter()),
             client.getNodesState(NodesStateRequest()),
             client.getMaintenances(MaintenancesFilter()),
         )
     self._validate(cv, nc_resp.nodes, ns_resp.states,
                    tuple(mnts_resp.maintenances))
 async def test_mismatch(self):
     async with MockAdminAPI() as client:
         (
             nodes_config_resp,
             nodes_state_resp,
             maintenances_resp,
         ) = await asyncio.gather(
             client.getNodesConfig(NodesFilter(node=NodeID(node_index=0))),
             client.getNodesState(
                 NodesStateRequest(filter=NodesFilter(node=NodeID(node_index=1)))
             ),
             client.getMaintenances(MaintenancesFilter()),
         )
     with self.assertRaises(ValueError):
         NodeView(
             node_config=nodes_config_resp.nodes[0],
             node_state=nodes_state_resp.states[0],
             maintenances=maintenances_resp.maintenances,
         )
示例#6
0
    async def smoke(self, client) -> None:
        cv = await get_cluster_view(client)
        storages_node_views = [
            nv for nv in cv.get_all_node_views() if nv.is_storage
        ]
        sequencers_node_views = [
            nv for nv in cv.get_all_node_views() if nv.is_sequencer
        ]

        # combined maintenance, storages and sequencers
        await apply_maintenance(
            client=client,
            shards=[
                ShardID(node=storages_node_views[0].node_id, shard_index=1)
            ],
            sequencer_nodes=[sequencers_node_views[0].node_id],
        )

        # storage-only maintenance
        await apply_maintenance(
            client=client,
            shards=[
                ShardID(node=storages_node_views[1].node_id, shard_index=1)
            ],
        )

        # sequencer-only maintenance
        await apply_maintenance(
            client=client, sequencer_nodes=[sequencers_node_views[2].node_id])

        # maintenance for whole nodes
        await apply_maintenance(
            client=client,
            node_ids=[
                storages_node_views[3].node_id,
                sequencers_node_views[4].node_id
            ],
            user="******",
            reason="whatever",
        )

        (cv, nc_resp, ns_resp, mnts_resp) = await asyncio.gather(
            get_cluster_view(client),
            client.getNodesConfig(NodesFilter()),
            client.getNodesState(NodesStateRequest()),
            client.getMaintenances(MaintenancesFilter()),
        )
        self._validate(cv, nc_resp.nodes, ns_resp.states,
                       tuple(mnts_resp.maintenances))
示例#7
0
async def get_node_by_name(client: AdminAPI, name: str) -> Node:
    """
    Returns Node by node name

    Raises:
        logdevice.admin.exceptions.types.NodeNotReady: if node client is
            connected to is not ready yet to process request
        thrift.py3.TransportError: if there's network error while
            communicating with Thrift
        ldops.exceptions.NodeNotFoundError: if there's no such node from
            point of view of AdminAPI provider
    """
    resp: NodesConfigResponse = await admin_api.get_nodes_config(
        client=client, req=NodesFilter(node=NodeID(name=name)))
    if not resp.nodes:
        raise NodeNotFoundError(f"Node not found: name=`{name}'")

    # There's guarantee from AdminAPI that there CANNOT be more than one
    # node with the same name
    return _get_node_by_node_config(resp.nodes[0])
示例#8
0
    async def shrink(self, node_indexes: typing.List[int]):
        """
        Shrinks the cluster by removing nodes from the NodesConfig. This
        operation requires that the removed nodes are empty (storage state:
        NONE) and dead.
        """

        ctx = nubia.context.get_context()
        async with ctx.get_cluster_admin_client() as client:
            try:
                await client.removeNodes(
                    request=RemoveNodesRequest(node_filters=[
                        NodesFilter(node=NodeID(node_index=idx))
                        for idx in node_indexes
                    ]),
                    rpc_options=RpcOptions(timeout=60),
                )
                termcolor.cprint("Successfully removed the nodes", "green")
            except Exception as e:
                termcolor.cprint(str(e), "red")
                return 1