def add_new_node_and_label_it(machineset_name): """ Add a new node and label it Args: machineset_name (str): Name of the machine set """ # Get the initial nodes list initial_nodes = get_worker_nodes() log.info(f"Current available worker nodes are {initial_nodes}") # get machineset replica count machineset_replica_count = machine.get_replica_count(machineset_name) # Increase its replica count machine.add_node(machineset_name, count=machineset_replica_count + 1) log.info(f"Increased {machineset_name} count " f"by {machineset_replica_count + 1}") # wait for the new node to come to ready state log.info("Waiting for the new node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) # Get the node name of new spun node nodes_after_new_spun_node = get_worker_nodes() new_spun_node = list(set(nodes_after_new_spun_node) - set(initial_nodes)) log.info(f"New spun node is {new_spun_node}") # Label it node_obj = ocp.OCP(kind='node') node_obj.add_label(resource_name=new_spun_node[0], label=constants.OPERATOR_NODE_LABEL) log.info(f"Successfully labeled {new_spun_node} with OCS storage label")
def add_new_node_and_label_it( machineset_name, num_nodes=1, mark_for_ocs_label=True ): """ Add a new node for ipi and label it Args: machineset_name (str): Name of the machine set num_nodes (int): number of nodes to add mark_for_ocs_label (bool): True if label the new node eg: add_new_node_and_label_it("new-tdesala-zlqzn-worker-us-east-2a") Returns: list: new spun nodes """ # Get the initial nodes list initial_nodes = tests.helpers.get_worker_nodes() log.info(f"Current available worker nodes are {initial_nodes}") # get machineset replica count machineset_replica_count = machine.get_replica_count(machineset_name) log.info( f"{machineset_name} has replica count: {machineset_replica_count}" ) # Increase its replica count log.info(f"Increasing the replica count by {num_nodes}") machine.add_node(machineset_name, count=machineset_replica_count + num_nodes) log.info( f"{machineset_name} now has replica " f"count: {machineset_replica_count + num_nodes}" ) # wait for the new node to come to ready state log.info("Waiting for the new node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) # Get the node name of new spun node nodes_after_new_spun_node = tests.helpers.get_worker_nodes() new_spun_nodes = list( set(nodes_after_new_spun_node) - set(initial_nodes) ) log.info(f"New spun nodes: {new_spun_nodes}") # Label it if mark_for_ocs_label: node_obj = ocp.OCP(kind='node') for new_spun_node in new_spun_nodes: node_obj.add_label( resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL ) logging.info( f"Successfully labeled {new_spun_node} with OCS storage label" ) return new_spun_nodes
def add_worker_based_on_pods_count_per_node( node_count, expected_count, role_type=None, machineset_name=None ): """ Function to evaluate number of pods up in node and add new node accordingly. Args: machineset_name (list): Machineset_names to add more nodes if required. node_count (int): Additional nodes to be added expected_count (int): Expected pod count in one node role_type (str): To add type to the nodes getting added Returns: bool: True if Nodes gets added, else false. """ # Check for POD running count on each nodes if ( config.ENV_DATA["deployment_type"] == "ipi" and config.ENV_DATA["platform"].lower() == "aws" ): app_nodes = node.get_nodes(node_type=role_type) pod_count_dict = node.get_running_pod_count_from_node(node_type=role_type) high_count_nodes, less_count_nodes = ([] for i in range(2)) for node_obj in app_nodes: count = pod_count_dict[f"{node_obj.name}"] if count >= expected_count: high_count_nodes.append(node_obj.name) else: less_count_nodes.append(node_obj.name) if len(less_count_nodes) <= 1: for name in machineset_name: count = machine.get_replica_count(machine_set=name) machine.add_node(machine_set=name, count=(count + node_count)) machine.wait_for_new_node_to_be_ready(name) return True else: logging.info( f"Enough pods can be created with available nodes {pod_count_dict}" ) return False elif ( config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "vsphere" ): raise UnsupportedPlatformError("Unsupported Platform to add worker") elif ( config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "baremetal" ): raise UnsupportedPlatformError("Unsupported Platform to add worker") elif ( config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "azure" ): raise UnsupportedPlatformError("Unsupported Platform to add worker")
def add_worker_based_on_cpu_utilization( node_count, expected_percent, role_type=None, machineset_name=None ): """ Function to evaluate CPU utilization of nodes and add node if required. Args: machineset_name (list): Machineset_names to add more nodes if required. node_count (int): Additional nodes to be added expected_percent (int): Expected utilization precent role_type (str): To add type to the nodes getting added Returns: bool: True if Nodes gets added, else false. """ # Check for CPU utilization on each nodes if ( config.ENV_DATA["deployment_type"] == "ipi" and config.ENV_DATA["platform"].lower() == "aws" ): app_nodes = node.get_nodes(node_type=role_type) uti_dict = node.get_node_resource_utilization_from_oc_describe( node_type=role_type ) uti_high_nodes, uti_less_nodes = ([] for i in range(2)) for node_obj in app_nodes: utilization_percent = uti_dict[f"{node_obj.name}"]["cpu"] if utilization_percent > expected_percent: uti_high_nodes.append(node_obj.name) else: uti_less_nodes.append(node_obj.name) if len(uti_less_nodes) <= 1: for name in machineset_name: count = machine.get_replica_count(machine_set=name) machine.add_node(machine_set=name, count=(count + node_count)) machine.wait_for_new_node_to_be_ready(name) return True else: logging.info(f"Enough resource available for more pod creation {uti_dict}") return False elif ( config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "vsphere" ): raise UnsupportedPlatformError("Unsupported Platform to add worker") elif ( config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "baremetal" ): raise UnsupportedPlatformError("Unsupported Platform to add worker") elif ( config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "azure" ): raise UnsupportedPlatformError("Unsupported Platform to add worker")
def delete_and_create_osd_node_ipi(osd_node_name): """ Unschedule, drain and delete osd node, and creating a new osd node. At the end of the function there should be the same number of osd nodes as it was in the beginning, and also ceph health should be OK. This function is for any IPI platform. Args: osd_node_name (str): the name of the osd node Returns: str: The new node name """ log.info("Going to unschedule, drain and delete %s node", osd_node_name) # Unscheduling node unschedule_nodes([osd_node_name]) # Draining Node drain_nodes([osd_node_name]) log.info("Getting machine name from specified node name") machine_name = machine.get_machine_from_node_name(osd_node_name) machine_type = machine.get_machine_type(machine_name) log.info(f"Node {osd_node_name} associated machine is {machine_name}") log.info( f"Deleting machine {machine_name} and waiting for new machine to come up" ) machine.delete_machine_and_check_state_of_new_spinned_machine(machine_name) new_machine_list = machine.get_machines(machine_type=machine_type) for machines in new_machine_list: # Trimming is done to get just machine name # eg:- machine_name:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b-nlgkr # After trimming:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b if re.match(machines.name[:-6], machine_name): new_machine_name = machines.name machineset_name = machine.get_machineset_from_machine_name( new_machine_name) log.info("Waiting for new worker node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) new_node_name = get_node_from_machine_name(new_machine_name) if not is_node_labeled(new_node_name): log.info("Adding ocs label to newly created worker node") node_obj = ocp.OCP(kind="node") node_obj.add_label(resource_name=new_node_name, label=constants.OPERATOR_NODE_LABEL) log.info( f"Successfully labeled {new_node_name} with OCS storage label") return new_node_name
def deploy_ocp(self, log_cli_level="DEBUG"): """ Deployment specific to OCP cluster on this platform Args: log_cli_level (str): openshift installer's log level (default: "DEBUG") """ super(AWSIPI, self).deploy_ocp(log_cli_level) if config.DEPLOYMENT.get("infra_nodes"): num_nodes = config.ENV_DATA.get("infra_replicas", 3) ms_list = machine.create_ocs_infra_nodes(num_nodes) for node in ms_list: machine.wait_for_new_node_to_be_ready(node) if config.DEPLOYMENT.get("host_network"): self.host_network_update()
def factory(additional_nodes=3): """ Args: additional_nodes (int): Number of additional nodes to be added (default=3). """ log.info("Creating machineset") machineset_name.append( machine.create_custom_machineset(instance_type="m5.4xlarge", zone="a")) machine.wait_for_new_node_to_be_ready(machineset_name[0]) log.info( f"Adding {additional_nodes} more nodes to machineset {machineset_name[0]}" ) node.add_new_node_and_label_it( machineset_name=machineset_name[0], num_nodes=additional_nodes, mark_for_ocs_label=False, ) machine.wait_for_new_node_to_be_ready(machineset_name[0])
def check_and_add_enough_worker(worker_count): """ Function to check if there is enough workers available to scale pods. IF there is no enough worker then worker will be added based on supported platforms Function also adds scale label to the respective worker nodes. Args: worker_count (int): Expected worker count to be present in the setup Returns: book: True is there is enough worker count else raise exception. """ # Check either to use OCS workers for scaling app pods # Further continue to label the worker with scale label else not worker_list = node.get_worker_nodes() ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL) scale_worker = machine.get_labeled_nodes(constants.SCALE_LABEL) if config.RUN.get("use_ocs_worker_for_scale"): if not scale_worker: helpers.label_worker_node(node_list=worker_list, label_key="scale-label", label_value="app-scale") else: if not scale_worker: for node_item in ocs_worker_list: worker_list.remove(node_item) if worker_list: helpers.label_worker_node( node_list=worker_list, label_key="scale-label", label_value="app-scale", ) scale_worker_list = machine.get_labeled_nodes(constants.SCALE_LABEL) logging.info(f"Print existing scale worker {scale_worker_list}") # Check if there is enough nodes to continue scaling of app pods if len(scale_worker_list) >= worker_count: logging.info(f"Setup has expected worker count {worker_count} " "to continue scale of pods") return True else: logging.info( "There is no enough worker in the setup, will add enough worker " "for the automation supported platforms") # Add enough worker for AWS if (config.ENV_DATA["deployment_type"] == "ipi" and config.ENV_DATA["platform"].lower() == "aws"): # Create machineset for app worker nodes on each aws zone # Each zone will have one app worker node ms_name = list() labels = [("node-role.kubernetes.io/app", "app-scale")] for obj in machine.get_machineset_objs(): if "app" in obj.name: ms_name.append(obj.name) if not ms_name: if len(machine.get_machineset_objs()) == 3: for zone in ["a", "b", "c"]: ms_name.append( machine.create_custom_machineset( instance_type="m5.4xlarge", labels=labels, zone=zone, )) else: ms_name.append( machine.create_custom_machineset( instance_type="m5.4xlarge", labels=labels, zone="a", )) for ms in ms_name: machine.wait_for_new_node_to_be_ready(ms) if len(ms_name) == 3: exp_count = int(worker_count / 3) else: exp_count = worker_count for name in ms_name: machine.add_node(machine_set=name, count=exp_count) for ms in ms_name: machine.wait_for_new_node_to_be_ready(ms) worker_list = node.get_worker_nodes() ocs_worker_list = machine.get_labeled_nodes( constants.OPERATOR_NODE_LABEL) scale_label_worker = machine.get_labeled_nodes( constants.SCALE_LABEL) ocs_worker_list.extend(scale_label_worker) final_list = list(dict.fromkeys(ocs_worker_list)) for node_item in final_list: if node_item in worker_list: worker_list.remove(node_item) if worker_list: helpers.label_worker_node( node_list=worker_list, label_key="scale-label", label_value="app-scale", ) return True elif (config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "vsphere"): raise UnsupportedPlatformError( "Unsupported Platform to add worker") elif (config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "baremetal"): raise UnsupportedPlatformError( "Unsupported Platform to add worker") elif (config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "azure"): raise UnsupportedPlatformError( "Unsupported Platform to add worker") else: raise UnavailableResourceException( "There is no enough worker nodes to continue app pod scaling")
def label_nodes(request, with_ocs): """ Fixture to label the node(s) that will run the application pod. That will be all workers node that do not run the OCS cluster. """ m_set = '' # this will hold machine_set name that added def teardown(): ceph_health_check() if with_ocs: return if m_set != '': log.info(f'Destroy {m_set}') machine.delete_custom_machineset(m_set) else: log.info('Clear label form worker (Application) nodes') # Getting all Application nodes app_nodes = machine.get_labeled_nodes(constants.APP_NODE_LABEL) log.debug(f'The application nodes are : {app_nodes}') helpers.remove_label_from_worker_node(app_nodes, constants.VDBENCH_NODE_LABEL) request.addfinalizer(teardown) if with_ocs: return # Add label to the worker nodes # Getting all OCS nodes (to verify app pod wil not run on) ocs_nodes = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL) worker_nodes = helpers.get_worker_nodes() # Getting list of free nodes free_nodes = list(set(worker_nodes) - set(ocs_nodes)) if not free_nodes: # No free nodes - Creating new machineset for application pods log.info('Adding new machineset, with worker for application pod') m_set = machine.create_custom_machineset( label=constants.APP_NODE_LABEL) machine.wait_for_new_node_to_be_ready(m_set) free_nodes = machine.get_labeled_nodes( f'node-role.kubernetes.io/app={constants.APP_NODE_LABEL}') # TODO: implement this for VMWare as well. log.info('Adding the app-node label to Non-OCS workers') log.debug(f'The Workers nodes are : {worker_nodes}') log.debug(f'The OCS nodes are : {ocs_nodes}') log.debug(f'The free nodes are : {free_nodes}') assert free_nodes, \ 'Did not found any worker to run on, pleas deploy another worker' helpers.label_worker_node(free_nodes, constants.APP_NODE_LABEL, constants.VDBENCH_NODE_LABEL) return
def test_node_replacement_reactive_aws_ipi( self, nodes, pvc_factory, pod_factory, dc_pod_factory, failure, interface, bucket_factory, rgw_bucket_factory, ): """ Knip-894 Node replacement - AWS-IPI-Reactive """ # Get worker nodes initial_nodes = get_worker_nodes() # Get OSD running nodes osd_running_nodes = get_osd_running_nodes() log.info(f"OSDs are running on nodes {osd_running_nodes}") # Label osd nodes with fedora app label_worker_node(osd_running_nodes, label_key="dc", label_value="fedora") # Create DC app pods log.info("Creating DC based app pods") if interface == "rbd": interface = constants.CEPHBLOCKPOOL elif interface == "cephfs": interface = constants.CEPHFILESYSTEM dc_pod_obj = [] for i in range(2): dc_pod = dc_pod_factory(interface=interface, node_selector={"dc": "fedora"}) pod.run_io_in_bg(dc_pod, fedora_dc=True) dc_pod_obj.append(dc_pod) # Get app pods running nodes dc_pod_node_name = get_app_pod_running_nodes(dc_pod_obj) log.info(f"DC app pod running nodes are {dc_pod_node_name}") # Get both osd and app pod running node common_nodes = get_both_osd_and_app_pod_running_node( osd_running_nodes, dc_pod_node_name) log.info(f"Both OSD and app pod is running on nodes {common_nodes}") # Get the machine name using the node name machine_name = machine.get_machine_from_node_name(common_nodes[0]) log.info(f"{common_nodes[0]} associated machine is {machine_name}") # Get the machineset name using machine name machineset_name = machine.get_machineset_from_machine_name( machine_name) log.info( f"{common_nodes[0]} associated machineset is {machineset_name}") # Get the failure node obj failure_node_obj = get_node_objs(node_names=[common_nodes[0]]) # Induce failure on the selected failure node log.info(f"Inducing failure on node {failure_node_obj[0].name}") if failure == "power off": # Power off AWS worker node instance nodes.stop_nodes(failure_node_obj, wait=True) log.info( f"Successfully powered off node: {failure_node_obj[0].name}") elif failure == "network failure": # Induce Network failure node_network_failure([failure_node_obj[0].name]) # Add annotation to the failed node annotation = "machine.openshift.io/exclude-node-draining=''" machine.add_annotation_to_machine(annotation=annotation, machine_name=machine_name) # Delete the machine machine.delete_machine(machine_name) log.info(f"Successfully deleted machine {machine_name}") # Wait for the new machine to spin log.info("Waiting for the new node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) # Get the node name of new spun node nodes_after_new_spun_node = get_worker_nodes() new_spun_node = list( set(nodes_after_new_spun_node) - set(initial_nodes)) log.info(f"New spun node is {new_spun_node}") # Label it node_obj = ocp.OCP(kind="node") node_obj.add_label(resource_name=new_spun_node[0], label=constants.OPERATOR_NODE_LABEL) log.info( f"Successfully labeled {new_spun_node} with OCS storage label") # DC app pods on the failed node will get automatically created on other # running node. Waiting for all dc app pod to reach running state pod.wait_for_dc_app_pods_to_reach_running_state(dc_pod_obj, timeout=1200) log.info("All the dc pods reached running state") pod.wait_for_storage_pods() # Check basic cluster functionality by creating resources # (pools, storageclasses, PVCs, pods - both CephFS and RBD), # run IO and delete the resources self.sanity_helpers.create_resources(pvc_factory, pod_factory, bucket_factory, rgw_bucket_factory) self.sanity_helpers.delete_resources() # Perform cluster and Ceph health checks self.sanity_helpers.health_check()
def add_worker_node(instance_type=None): global ms_name ms_name = list() worker_list = node.get_worker_nodes() ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL) scale_worker = machine.get_labeled_nodes(constants.SCALE_LABEL) if config.RUN.get("use_ocs_worker_for_scale"): if not scale_worker: helpers.label_worker_node(node_list=worker_list, label_key="scale-label", label_value="app-scale") else: if not scale_worker: for node_item in ocs_worker_list: worker_list.remove(node_item) if worker_list: helpers.label_worker_node( node_list=worker_list, label_key="scale-label", label_value="app-scale", ) scale_worker_list = machine.get_labeled_nodes(constants.SCALE_LABEL) logging.info(f"Print existing scale worker {scale_worker_list}") if (config.ENV_DATA["deployment_type"] == "ipi" and config.ENV_DATA["platform"].lower() == "aws"): log.info("Adding worker nodes on the current cluster") labels = [("node-role.kubernetes.io/app", "app-scale")] # Create machineset for app worker nodes on each zone for obj in machine.get_machineset_objs(): if "app" in obj.name: ms_name.append(obj.name) if instance_type is not None: instance_type = instance_type else: instance_type = "m5.4xlarge" if not ms_name: if len(machine.get_machineset_objs()) == 3: for zone in ["a", "b", "c"]: ms_name.append( machine.create_custom_machineset( instance_type=instance_type, labels=labels, zone=zone, )) else: ms_name.append( machine.create_custom_machineset( instance_type=instance_type, labels=labels, zone="a", )) for ms in ms_name: machine.wait_for_new_node_to_be_ready(ms) worker_list = node.get_worker_nodes() ocs_worker_list = machine.get_labeled_nodes( constants.OPERATOR_NODE_LABEL) scale_label_worker = machine.get_labeled_nodes(constants.SCALE_LABEL) ocs_worker_list.extend(scale_label_worker) final_list = list(dict.fromkeys(ocs_worker_list)) for node_item in final_list: if node_item in worker_list: worker_list.remove(node_item) if worker_list: helpers.label_worker_node(node_list=worker_list, label_key="scale-label", label_value="app-scale") return True elif (config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "vsphere"): log.info("Running scale test on existing worker nodes.") elif (config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "baremetal"): log.info("Running scale test on existing worker nodes.") elif (config.ENV_DATA["deployment_type"] == "upi" and config.ENV_DATA["platform"].lower() == "azure"): raise UnsupportedPlatformError("Unsupported Platform")
def test_nodereplacement_proactive(self, pvc_factory, pod_factory, dc_pod_factory): """ Knip-894 Node Replacement proactive """ # Get worker nodes worker_node_list = get_worker_nodes() log.info(f"Current available worker nodes are {worker_node_list}") osd_pods_obj = pod.get_osd_pods() osd_node_name = pod.get_pod_node(random.choice(osd_pods_obj)).name log.info(f"Selected OSD is {osd_node_name}") log.info("Creating dc pod backed with rbd pvc and running io in bg") for worker_node in worker_node_list: if worker_node != osd_node_name: rbd_dc_pod = dc_pod_factory(interface=constants.CEPHBLOCKPOOL, node_name=worker_node, size=20) pod.run_io_in_bg(rbd_dc_pod, expect_to_fail=False, fedora_dc=True) log.info("Creating dc pod backed with cephfs pvc and running io in bg") for worker_node in worker_node_list: if worker_node != osd_node_name: cephfs_dc_pod = dc_pod_factory( interface=constants.CEPHFILESYSTEM, node_name=worker_node, size=20) pod.run_io_in_bg(cephfs_dc_pod, expect_to_fail=False, fedora_dc=True) # Unscheduling node node.unschedule_nodes([osd_node_name]) # Draining Node node.drain_nodes([osd_node_name]) log.info("Getting machine name from specified node name") machine_name = machine.get_machine_from_node_name(osd_node_name) log.info(f"Node {osd_node_name} associated machine is {machine_name}") log.info( f"Deleting machine {machine_name} and waiting for new machine to come up" ) machine.delete_machine_and_check_state_of_new_spinned_machine( machine_name) new_machine_list = machine.get_machines() for machines in new_machine_list: # Trimming is done to get just machine name # eg:- machine_name:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b-nlgkr # After trimming:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b if re.match(machines.name[:-6], machine_name): new_machine_name = machines.name machineset_name = machine.get_machineset_from_machine_name( new_machine_name) log.info("Waiting for new worker node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) new_node_name = node.get_node_from_machine_name(new_machine_name) log.info("Adding ocs label to newly created worker node") node_obj = ocp.OCP(kind='node') node_obj.add_label(resource_name=new_node_name, label=constants.OPERATOR_NODE_LABEL) log.info( f"Successfully labeled {new_node_name} with OCS storage label") # Creating Resources log.info("Creating Resources using sanity helpers") self.sanity_helpers.create_resources(pvc_factory, pod_factory) # Deleting Resources self.sanity_helpers.delete_resources() # Verify everything running fine log.info( "Verifying All resources are Running and matches expected result") self.sanity_helpers.health_check()
def identify_and_add_nodes(self, scenario, num_of_nodes): """ Fetches info about the worker nodes and add nodes (if required) Args: scenario (str): Scenario of app pods running on OCS or dedicated nodes (eg., 'colocated', 'dedicated') num_of_nodes (int): number of nodes required for running test Returns: tuple: tuple containing: list: list of OCS nodes name list: list of non-OCS nodes name """ nodes_to_add = 0 initial_worker_nodes = helpers.get_worker_nodes() ocs_nodes = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL) non_ocs_nodes = list(set(initial_worker_nodes) - set(ocs_nodes)) if 'colocated' in scenario and len(ocs_nodes) < num_of_nodes: nodes_to_add = num_of_nodes - len(initial_worker_nodes) if 'dedicated' in scenario and len(non_ocs_nodes) < num_of_nodes: nodes_to_add = num_of_nodes - len(non_ocs_nodes) if nodes_to_add > 0: logger.info(f"{nodes_to_add} extra workers nodes needed") if config.ENV_DATA['deployment_type'] == 'ipi': machine_name = machine.get_machine_from_node_name( random.choice(initial_worker_nodes) ) machineset_name = machine.get_machineset_from_machine_name( machine_name ) machineset_replica_count = machine.get_replica_count( machineset_name ) machine.add_node( machineset_name, count=machineset_replica_count + nodes_to_add ) logger.info("Waiting for the new node(s) to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) else: # TODO: Add required num of nodes instead of skipping # https://github.com/red-hat-storage/ocs-ci/issues/1291 pytest.skip("Add node not implemented for UPI, github issue #1291") new_worker_nodes = helpers.get_worker_nodes() new_nodes_added = list(set(new_worker_nodes) - set(initial_worker_nodes)) assert len(new_nodes_added) > 0, 'Extra nodes not added in the cluster' non_ocs_nodes += new_nodes_added if 'colocated' in scenario and len(ocs_nodes) < num_of_nodes: logger.info('Adding OCS storage label to Non-OCS workers') node_obj = ocp.OCP(kind=constants.NODE) nodes_to_label = non_ocs_nodes[0:(num_of_nodes - len(ocs_nodes))] for node_name in nodes_to_label: node_obj.add_label( resource_name=node_name, label=constants.OPERATOR_NODE_LABEL ) ocs_nodes.append(node_name) non_ocs_nodes = list(set(non_ocs_nodes) - set(ocs_nodes)) logger.info(f"The OCS nodes are : {ocs_nodes}") logger.info(f"The Non-OCS nodes are: {non_ocs_nodes}") return ocs_nodes, non_ocs_nodes
def test_node_replacement_reactive_aws_ipi( self, nodes, pvc_factory, pod_factory, dc_pod_factory, failure, interface ): """ Knip-894 Node replacement - AWS-IPI-Reactive """ # Get worker nodes initial_nodes = get_worker_nodes() # Get OSD running nodes osd_running_nodes = get_osd_running_nodes() log.info(f"OSDs are running on nodes {osd_running_nodes}") # Label osd nodes with fedora app label_worker_node(osd_running_nodes, label_key='dc', label_value='fedora') # Create DC app pods log.info("Creating DC based app pods") if interface == 'rbd': interface = constants.CEPHBLOCKPOOL elif interface == 'cephfs': interface = constants.CEPHFILESYSTEM dc_pod_obj = [] for i in range(2): dc_pod = dc_pod_factory( interface=interface, node_selector={'dc': 'fedora'}) pod.run_io_in_bg(dc_pod, fedora_dc=True) dc_pod_obj.append(dc_pod) # Get app pods running nodes dc_pod_node_name = get_app_pod_running_nodes(dc_pod_obj) log.info(f"DC app pod running nodes are {dc_pod_node_name}") # Get both osd and app pod running node common_nodes = get_both_osd_and_app_pod_running_node( osd_running_nodes, dc_pod_node_name ) log.info(f"Both OSD and app pod is running on nodes {common_nodes}") # Get the machine name using the node name machine_name = machine.get_machine_from_node_name(common_nodes[0]) log.info(f"{common_nodes[0]} associated machine is {machine_name}") # Get the machineset name using machine name machineset_name = machine.get_machineset_from_machine_name( machine_name ) log.info( f"{common_nodes[0]} associated machineset is {machineset_name}" ) # Get the failure node obj failure_node_obj = get_node_objs(node_names=[common_nodes[0]]) # Induce failure on the selected failure node log.info(f"Inducing failure on node {failure_node_obj[0].name}") if failure == "power off": # Power off AWS worker node instance nodes.stop_nodes(failure_node_obj, wait=True) log.info(f"Successfully powered off node: {failure_node_obj[0].name}") elif failure == "network failure": # Induce Network failure node_network_failure([failure_node_obj[0].name]) # Add annotation to the failed node annotation = "machine.openshift.io/exclude-node-draining=''" machine.add_annotation_to_machine( annotation=annotation, machine_name=machine_name ) # Delete the machine machine.delete_machine(machine_name) log.info(f"Successfully deleted machine {machine_name}") # Wait for the new machine to spin log.info("Waiting for the new node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) # Get the node name of new spun node nodes_after_new_spun_node = get_worker_nodes() new_spun_node = list( set(nodes_after_new_spun_node) - set(initial_nodes) ) log.info(f"New spun node is {new_spun_node}") # Label it node_obj = ocp.OCP(kind='node') node_obj.add_label( resource_name=new_spun_node[0], label=constants.OPERATOR_NODE_LABEL ) log.info( f"Successfully labeled {new_spun_node} with OCS storage label" ) # DC app pods on the failed node will get automatically created on other # running node. Waiting for all dc app pod to reach running state pod.wait_for_dc_app_pods_to_reach_running_state( dc_pod_obj, timeout=1200 ) log.info("All the dc pods reached running state") # Check all OCS pods status, they should be in running state all_pod_obj = pod.get_all_pods( namespace=defaults.ROOK_CLUSTER_NAMESPACE ) for pod_obj in all_pod_obj: if '-1-deploy' and 'ocs-deviceset' not in pod_obj.name: try: helpers.wait_for_resource_state( resource=pod_obj, state=constants.STATUS_RUNNING, timeout=1800 ) except ResourceWrongStatusException: # 'rook-ceph-crashcollector' on the failed node stucks at # pending state. BZ 1810014 tracks it. # Ignoring 'rook-ceph-crashcollector' pod health check as # WA and deleting its deployment so that the pod # disappears. Will revert this WA once the BZ is fixed if 'rook-ceph-crashcollector' in pod_obj.name: ocp_obj = ocp.OCP( namespace=defaults.ROOK_CLUSTER_NAMESPACE ) pod_name = pod_obj.name deployment_name = '-'.join(pod_name.split("-")[:-2]) command = f"delete deployment {deployment_name}" ocp_obj.exec_oc_cmd(command=command) log.info(f"Deleted deployment for pod {pod_obj.name}") # Check basic cluster functionality by creating resources # (pools, storageclasses, PVCs, pods - both CephFS and RBD), # run IO and delete the resources self.sanity_helpers.create_resources(pvc_factory, pod_factory) self.sanity_helpers.delete_resources() # Perform cluster and Ceph health checks self.sanity_helpers.health_check()
def identify_and_add_nodes(self, scenario, num_of_nodes): """ Fetches info about the worker nodes and add nodes (if required) Args: scenario (str): Scenario of app pods running on OCS or dedicated nodes (eg., 'colocated', 'dedicated') num_of_nodes (int): number of nodes required for running test Returns: tuple: tuple containing: list: list of OCS nodes name list: list of non-OCS nodes name """ nodes_to_add = 0 initial_worker_nodes = node.get_worker_nodes() ocs_nodes = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL) non_ocs_nodes = list(set(initial_worker_nodes) - set(ocs_nodes)) if "colocated" in scenario and len(ocs_nodes) < num_of_nodes: nodes_to_add = num_of_nodes - len(initial_worker_nodes) if "dedicated" in scenario and len(non_ocs_nodes) < num_of_nodes: nodes_to_add = num_of_nodes - len(non_ocs_nodes) if nodes_to_add > 0: logger.info(f"{nodes_to_add} extra workers nodes needed") if config.ENV_DATA["deployment_type"] == "ipi": machine_name = machine.get_machine_from_node_name( random.choice(initial_worker_nodes)) machineset_name = machine.get_machineset_from_machine_name( machine_name) machineset_replica_count = machine.get_replica_count( machineset_name) machine.add_node(machineset_name, count=machineset_replica_count + nodes_to_add) logger.info("Waiting for the new node(s) to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) else: if (config.ENV_DATA.get("platform").lower() == constants.VSPHERE_PLATFORM): pytest.skip( "Skipping add node in VSPHERE due to https://bugzilla.redhat.com/show_bug.cgi?id=1844521" ) is_rhel = config.ENV_DATA.get( "rhel_workers") or config.ENV_DATA.get("rhel_user") node_type = constants.RHEL_OS if is_rhel else constants.RHCOS node.add_new_node_and_label_upi( node_type=node_type, num_nodes=nodes_to_add, mark_for_ocs_label=False, ) new_worker_nodes = node.get_worker_nodes() new_nodes_added = list( set(new_worker_nodes) - set(initial_worker_nodes)) assert (len(new_nodes_added) == nodes_to_add ), "Extra nodes not added in the cluster" non_ocs_nodes += new_nodes_added if "colocated" in scenario and len(ocs_nodes) < num_of_nodes: logger.info("Adding OCS storage label to Non-OCS workers") node_obj = ocp.OCP(kind=constants.NODE) nodes_to_label = non_ocs_nodes[0:(num_of_nodes - len(ocs_nodes))] for node_name in nodes_to_label: node_obj.add_label(resource_name=node_name, label=constants.OPERATOR_NODE_LABEL) ocs_nodes.append(node_name) non_ocs_nodes = list(set(non_ocs_nodes) - set(ocs_nodes)) logger.info(f"The OCS nodes are : {ocs_nodes}") logger.info(f"The Non-OCS nodes are: {non_ocs_nodes}") return ocs_nodes, non_ocs_nodes
def add_worker_node(instance_type=None): global ms_name ms_name = list() worker_list = helpers.get_worker_nodes() ocs_worker_list = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL) scale_worker = machine.get_labeled_nodes(constants.SCALE_LABEL) if config.RUN.get('use_ocs_worker_for_scale'): if not scale_worker: helpers.label_worker_node(node_list=worker_list, label_key='scale-label', label_value='app-scale') else: if not scale_worker: for node_item in ocs_worker_list: worker_list.remove(node_item) if worker_list: helpers.label_worker_node(node_list=worker_list, label_key='scale-label', label_value='app-scale') scale_worker_list = machine.get_labeled_nodes(constants.SCALE_LABEL) logging.info(f"Print existing scale worker {scale_worker_list}") if config.ENV_DATA['deployment_type'] == 'ipi' and config.ENV_DATA[ 'platform'].lower() == 'aws': log.info("Adding worker nodes on the current cluster") # Create machineset for app worker nodes on each zone for obj in machine.get_machineset_objs(): if 'app' in obj.name: ms_name.append(obj.name) if instance_type is not None: instance_type = instance_type else: instance_type = 'm5.4xlarge' if not ms_name: if len(machine.get_machineset_objs()) == 3: for zone in ['a', 'b', 'c']: ms_name.append( machine.create_custom_machineset( instance_type=instance_type, zone=zone)) else: ms_name.append( machine.create_custom_machineset( instance_type=instance_type, zone='a')) for ms in ms_name: machine.wait_for_new_node_to_be_ready(ms) worker_list = helpers.get_worker_nodes() ocs_worker_list = machine.get_labeled_nodes( constants.OPERATOR_NODE_LABEL) scale_label_worker = machine.get_labeled_nodes(constants.SCALE_LABEL) ocs_worker_list.extend(scale_label_worker) final_list = list(dict.fromkeys(ocs_worker_list)) for node_item in final_list: if node_item in worker_list: worker_list.remove(node_item) if worker_list: helpers.label_worker_node(node_list=worker_list, label_key='scale-label', label_value='app-scale') return True elif config.ENV_DATA['deployment_type'] == 'upi' and config.ENV_DATA[ 'platform'].lower() == 'vsphere': log.info('Running pgsql on existing worker nodes') elif config.ENV_DATA['deployment_type'] == 'upi' and config.ENV_DATA[ 'platform'].lower() == 'baremetal': log.info('Running pgsql on existing worker nodes') elif config.ENV_DATA['deployment_type'] == 'upi' and config.ENV_DATA[ 'platform'].lower() == 'azure': raise UnsupportedPlatformError("Unsupported Platform")