def post_ocp_deploy(self): """ Function does post OCP deployment stuff we need to do. """ set_selinux_permissions() set_registry_to_managed_state() add_stage_cert()
def post_ocp_deploy(self): """ Function does post OCP deployment stuff we need to do. """ verify_all_nodes_created() set_selinux_permissions() set_registry_to_managed_state() add_stage_cert() if config.ENV_DATA.get("huge_pages"): enable_huge_pages()
def post_ocp_deploy(self): """ Function does post OCP deployment stuff we need to do. """ if config.DEPLOYMENT.get("use_custom_ingress_ssl_cert"): configure_custom_ingress_cert() verify_all_nodes_created() set_selinux_permissions() set_registry_to_managed_state() add_stage_cert() if config.ENV_DATA.get("huge_pages"): enable_huge_pages() if config.DEPLOYMENT.get("dummy_zone_node_labels"): create_dummy_zone_labels()
def add_new_node_and_label_upi(node_type, num_nodes, mark_for_ocs_label=True, node_conf=None): """ Add a new node for aws/vmware upi platform and label it Args: node_type (str): Type of node, RHEL or RHCOS num_nodes (int): number of nodes to add mark_for_ocs_label (bool): True if label the new node node_conf (dict): The node configurations. Retuns: bool: True if node addition has done successfully """ node_conf = node_conf or {} initial_nodes = tests.helpers.get_worker_nodes() from ocs_ci.ocs.platform_nodes import PlatformNodesFactory plt = PlatformNodesFactory() node_util = plt.get_nodes_platform() node_util.create_and_attach_nodes_to_cluster(node_conf, node_type, num_nodes) for sample in TimeoutSampler( timeout=600, sleep=6, func=tests.helpers.get_worker_nodes ): if len(sample) == len(initial_nodes) + num_nodes: break nodes_after_exp = tests.helpers.get_worker_nodes() wait_for_nodes_status( node_names=tests.helpers.get_worker_nodes(), status=constants.NODE_READY ) new_spun_nodes = list(set(nodes_after_exp) - set(initial_nodes)) if node_type == constants.RHEL_OS: set_selinux_permissions(workers=new_spun_nodes) if mark_for_ocs_label: node_obj = ocp.OCP(kind='node') for new_spun_node in new_spun_nodes: node_obj.add_label( resource_name=new_spun_node, label=constants.OPERATOR_NODE_LABEL ) logging.info( f"Successfully labeled {new_spun_node} with OCS storage label" ) return True
def delete_and_create_osd_node_vsphere_upi(osd_node_name, use_existing_node=False): """ Unschedule, drain and delete osd node, and creating a new osd node. At the end of the function there should be the same number of osd nodes as it was in the beginning, and also ceph health should be OK. This function is for vSphere UPI. Args: osd_node_name (str): the name of the osd node use_existing_node (bool): If False, create a new node and label it. If True, use an existing node to replace the deleted node and label it. Returns: str: The new node name """ osd_node = get_node_objs(node_names=[osd_node_name])[0] remove_nodes([osd_node]) log.info(f"name of deleted node = {osd_node_name}") if config.ENV_DATA.get("rhel_workers"): node_type = constants.RHEL_OS else: node_type = constants.RHCOS if not use_existing_node: log.info("Preparing to create a new node...") new_node_names = add_new_node_and_label_upi(node_type, 1) new_node_name = new_node_names[0] else: node_not_in_ocs = get_worker_nodes_not_in_ocs()[0] log.info( f"Preparing to replace the node {osd_node_name} " f"with an existing node {node_not_in_ocs.name}" ) if node_type == constants.RHEL_OS: set_selinux_permissions(workers=[node_not_in_ocs]) label_nodes([node_not_in_ocs]) new_node_name = node_not_in_ocs.name return new_node_name