def add_nodes( self, count: int, ec2_user_data: str = "", dc_idx: int = 0, enable_auto_bootstrap: bool = False) -> List[BasePodContainer]: new_nodes = super().add_nodes( count=count, ec2_user_data=ec2_user_data, dc_idx=dc_idx, enable_auto_bootstrap=enable_auto_bootstrap) for node in new_nodes: KubernetesOps.expose_pod_ports( self.k8s_cluster, node.name, ports=SCYLLA_POD_EXPOSED_PORTS, labels=f"statefulset.kubernetes.io/pod-name={node.name}", selector=f"statefulset.kubernetes.io/pod-name={node.name}", namespace=self.namespace) LOCALRUNNER.run_shell_script(cmd="\n".join( self.hydra_iptables_redirect_rules(nodes=new_nodes)), sudo=True) atexit.register(LOCALRUNNER.run_shell_script, cmd="\n".join( self.hydra_iptables_redirect_rules( command="D", nodes=new_nodes)), sudo=True) self.update_nodes_iptables_redirect_rules(nodes=new_nodes) return new_nodes
def add_nodes( self, count: int, ec2_user_data: str = "", dc_idx: int = 0, rack: int = 0, enable_auto_bootstrap: bool = False ) -> List[MinikubeScyllaPodContainer]: new_nodes = super().add_nodes( count=count, ec2_user_data=ec2_user_data, dc_idx=dc_idx, rack=rack, enable_auto_bootstrap=enable_auto_bootstrap) for node in new_nodes: KubernetesOps.expose_pod_ports( self.k8s_cluster, node.name, ports=SCYLLA_POD_EXPOSED_PORTS, labels=f"statefulset.kubernetes.io/pod-name={node.name}", selector=f"statefulset.kubernetes.io/pod-name={node.name}", namespace=self.namespace) self.add_hydra_iptables_rules(nodes=new_nodes) self.update_nodes_iptables_redirect_rules(nodes=new_nodes) return new_nodes
def terminate_node(self, node: BasePodContainer) -> None: assert self.nodes[-1] == node, "Can withdraw the last node only" self.update_nodes_iptables_redirect_rules(command="D", nodes=[ node, ]) KubernetesOps.unexpose_pod_ports(self.k8s_cluster, node.name, namespace=self.namespace) super().terminate_node(node)
def destroy(self): self.parent_cluster.update_nodes_iptables_redirect_rules(command="D", nodes=[ self, ]) KubernetesOps.unexpose_pod_ports( self.parent_cluster.k8s_cluster, self.name, namespace=self.parent_cluster.namespace) super().destroy()
def send_files(self, src, dst, delete_dst=False, preserve_symlinks=False, verbose=False): KubernetesOps.copy_file(self.kluster, src, f"{self.namespace}/{self.pod}:{dst}", container=self.container, timeout=300) return True
def receive_files(self, src, dst, delete_dst=False, preserve_perm=True, preserve_symlinks=False, timeout=300): KubernetesOps.copy_file(self, f"{self.namespace}/{self.pod}:{src}", dst, container=self.container, timeout=timeout) return True
def __init__(self, context: Context) -> None: super().__init__(context) self.process = None self._k8s_core_v1_api = KubernetesOps.core_v1_api( context.k8s_kluster.get_api_client()) self._ws_lock = threading.RLock()
def _logger_cmd(self) -> str: if not self._cluster.allowed_labels_on_scylla_node: return '' wrong_scheduled_pods_on_scylla_node, node_names = [], [] if self._cluster.SCYLLA_POOL_NAME in self._cluster.pools: node_names = [ node.metadata.name for node in self._cluster.pools[ self._cluster.SCYLLA_POOL_NAME].nodes.items ] else: self._log.warning( "'%s' pool is not registered. Can not get node names to check pods scheduling", self._cluster.SCYLLA_POOL_NAME) return '' try: for pod in KubernetesOps.list_pods(self._cluster): if pod.spec.node_name not in node_names: continue for key, value in self._cluster.allowed_labels_on_scylla_node: if (key, value) in pod.metadata.labels.items(): break else: wrong_scheduled_pods_on_scylla_node.append( f"{pod.metadata.name} ({pod.spec.node_name} node)") except Exception as details: # pylint: disable=broad-except self._log.warning("Failed to get pods list: %s", str(details)) if not wrong_scheduled_pods_on_scylla_node: return '' joined_info = ', '.join(wrong_scheduled_pods_on_scylla_node) message = f"{self.WRONG_SCHEDULED_PODS_MESSAGE}: {joined_info}" return f"echo \"I`date -u +\"%m%d %H:%M:%S\"` {message}\" >> {self._target_log_file} 2>&1"
def _logger_cmd(self) -> str: if not self._cluster.allowed_labels_on_scylla_node: return '' wrong_scheduled_pods_on_scylla_node = [] node_names = [ node.metadata.name for node in self._cluster.pools[ self._cluster.SCYLLA_POOL_NAME].nodes.items ] try: for pod in KubernetesOps.list_pods( self._cluster, field_selector=f"spec.nodeName in ({', '.join(node_names)})" ): for key, value in self._cluster.allowed_labels_on_scylla_node: if (key, value) in pod.metadata.labels.items(): break else: wrong_scheduled_pods_on_scylla_node.append( f"{pod.metadata.name} ({pod.nodeName} node)") except Exception as details: # pylint: disable=broad-except self._log.debug("Failed to get pods list: %s", str(details)) if not wrong_scheduled_pods_on_scylla_node: return '' joined_info = ', '.join(wrong_scheduled_pods_on_scylla_node) message = f"{self.WRONG_SCHEDULED_PODS_MESSAGE}: {joined_info}" return f"echo \"I`date -u +\"%m%d %H:%M:%S\"` {message}\" >> {self._target_log_file} 2>&1"
def add_nodes(self, count: int, ec2_user_data: str = "", dc_idx: int = 0, enable_auto_bootstrap: bool = False) -> List[BasePodContainer]: pods = KubernetesOps.list_pods(self, namespace=self.namespace) self.log.debug("Numbers of pods: %s", len(pods)) assert count == len(pods), "You can't alter number of pods here" nodes = [] for node_index, pod in enumerate(pods): node = self._create_node(node_index, pod.metadata.name) nodes.append(node) self.nodes.append(node) return nodes
def __init__(self, pod: str, container: Optional[str] = None, namespace: str = "default", k8s_server_url: Optional[str] = None, k8s_configuration: Optional[k8s.client.Configuration] = None) -> None: self.pod = pod self.container = container self.namespace = namespace if k8s_configuration is None: self.k8s_server_url = k8s_server_url k8s_configuration = KubernetesOps.create_k8s_configuration(self) else: if k8s_server_url is not None: self.log.warning("`k8s_configuration' is not None, `k8s_server_url' parameter ignored") self.k8s_server_url = k8s_configuration.host self.k8s_configuration = k8s_configuration super().__init__(hostname=f"{pod}/{container}")
def __init__(self, context: Context) -> None: super().__init__(context) self.process = None self._k8s_core_v1_api = KubernetesOps.core_v1_api(context.config) self._ws_lock = threading.RLock()
def _k8s_scylla_cluster_api(self) -> Resource: return KubernetesOps.dynamic_api(self.k8s_cluster, api_version=SCYLLA_API_VERSION, kind=SCYLLA_CLUSTER_RESOURCE_KIND)
def _k8s_core_v1_api(self): return KubernetesOps.core_v1_api(self.k8s_cluster)
def _loadbalancer_service(self): services = KubernetesOps.list_services( self.parent_cluster, namespace=self.parent_cluster.namespace, field_selector=f"metadata.name={self.name}-loadbalancer") return services[0] if services else None
def _pod_status(self): pods = KubernetesOps.list_pods( self.parent_cluster, namespace=self.parent_cluster.namespace, field_selector=f"metadata.name={self.name}") return pods[0].status if pods else None
def helm(self, *args, **kwargs): return KubernetesOps.helm(self, *args, **kwargs, remoter=self.nodes[-1].remoter)