def _init_remoter(self, ssh_login_info):
        self.remoter = KubernetesCmdRunner(pod=self.name,
                                           container=self.parent_cluster.container,
                                           namespace=self.parent_cluster.namespace,
                                           k8s_server_url=self.parent_cluster.k8s_cluster.k8s_server_url)

        # TODO: refactor our commands to use sudo in more organized way and remove `sudo' dependency
        self.remoter.run("yum install -y sudo && yum clean all")
示例#2
0
    def test_run_in_mainthread(  # pylint: disable=too-many-arguments
            self, remoter_type, host: str, stmt: str, verbose: bool, ignore_status: bool, new_session: bool, retry: int,
            timeout: Union[float, None]):
        kwargs = {
            'verbose': verbose,
            'ignore_status': ignore_status,
            'new_session': new_session,
            'retry': retry,
            'timeout': timeout}
        try:
            expected = LocalCmdRunner().run(stmt, **kwargs)
        except Exception as exc:  # pylint: disable=broad-except
            expected = exc

        if issubclass(remoter_type, (RemoteCmdRunner, RemoteLibSSH2CmdRunner)):
            remoter = remoter_type(hostname=host, user=getpass.getuser(), key_file=self.key_file)
        else:
            remoter = KubernetesCmdRunner(
                FakeKluster('http://127.0.0.1:8001'),
                pod='sct-cluster-gce-minikube-0', container="scylla", namespace="scylla")
        try:
            result = remoter.run(stmt, **kwargs)
        except Exception as exc:  # pylint: disable=broad-except
            result = exc
        remoter._reconnect()
        try:
            result2 = remoter.run(stmt, **kwargs)
        except Exception as exc:  # pylint: disable=broad-except
            result2 = exc
        remoter.stop()

        self._compare_results(expected, result, stmt=stmt, kwargs=kwargs)
        self._compare_results(expected, result2, stmt=stmt, kwargs=kwargs)
示例#3
0
    def test_create_and_run_in_separate_thread(  # pylint: disable=too-many-arguments
            self, remoter_type, host: str, stmt: str, verbose: bool, ignore_status: bool,
            new_session: bool, retry: int, timeout: Union[float, None]):
        kwargs = {
            'verbose': verbose,
            'ignore_status': ignore_status,
            'new_session': new_session,
            'retry': retry,
            'timeout': timeout}
        self.log.info(repr({stmt: stmt, **kwargs}))
        try:
            expected = LocalCmdRunner().run(stmt, **kwargs)
        except Exception as exc:  # pylint: disable=broad-except
            expected = exc

        # Paramiko fails too often when it is invoked like that, that is why it is not in the test

        if issubclass(remoter_type, (RemoteCmdRunner, RemoteLibSSH2CmdRunner)):
            remoter = remoter_type(hostname=host, user=getpass.getuser(), key_file=self.key_file)
        else:
            remoter = KubernetesCmdRunner(
                FakeKluster('http://127.0.0.1:8001'),
                pod='sct-cluster-gce-minikube-0', container="scylla", namespace="scylla")

        libssh2_thread_results = []

        self._run_parallel(
            3,
            thread_body=self._create_and_run_in_separate_thread,
            args=(remoter, stmt, kwargs, libssh2_thread_results),
            kwargs={})

        for libssh2_result in libssh2_thread_results:
            self.log.error(str(libssh2_result))
            self._compare_results(expected, libssh2_result, stmt=stmt, kwargs=kwargs)
示例#4
0
 def _create_and_run_in_same_thread(remoter_type, host, key_file, stmt, kwargs, paramiko_thread_results):
     if issubclass(remoter_type, (RemoteCmdRunner, RemoteLibSSH2CmdRunner)):
         remoter = remoter_type(hostname=host, user=getpass.getuser(), key_file=key_file)
     else:
         remoter = KubernetesCmdRunner(
             FakeKluster('http://127.0.0.1:8001'),
             pod='sct-cluster-gce-minikube-0', container="scylla", namespace="scylla")
     try:
         result = remoter.run(stmt, **kwargs)
     except Exception as exc:  # pylint: disable=broad-except
         result = exc
     paramiko_thread_results.append(result)
     remoter._reconnect()
     try:
         result = remoter.run(stmt, **kwargs)
     except Exception as exc:  # pylint: disable=broad-except
         result = exc
     paramiko_thread_results.append(result)
     remoter.stop()
示例#5
0
 def _init_remoter(self, ssh_login_info):
     self.remoter = KubernetesCmdRunner(
         pod=self.name,
         container=self.parent_cluster.container,
         namespace=self.parent_cluster.namespace,
         k8s_server_url=self.parent_cluster.k8s_cluster.k8s_server_url)
示例#6
0
class BasePodContainer(cluster.BaseNode):
    def __init__(self,
                 name: str,
                 parent_cluster: "PodCluster",
                 node_prefix: str = "node",
                 node_index: int = 1,
                 base_logdir: Optional[str] = None,
                 dc_idx: int = 0):
        self.node_index = node_index
        super().__init__(name=name,
                         parent_cluster=parent_cluster,
                         base_logdir=base_logdir,
                         node_prefix=node_prefix,
                         dc_idx=dc_idx)

    @staticmethod
    def is_docker() -> bool:
        return True

    def tags(self) -> Dict[str, str]:
        return {
            **super().tags,
            "NodeIndex": str(self.node_index),
        }

    def _init_remoter(self, ssh_login_info):
        self.remoter = KubernetesCmdRunner(
            pod=self.name,
            container=self.parent_cluster.container,
            namespace=self.parent_cluster.namespace,
            k8s_server_url=self.parent_cluster.k8s_cluster.k8s_server_url)

    def _init_port_mapping(self):
        pass

    @property
    def system_log(self):
        return os.path.join(self.logdir, "system.log")

    @property
    def region(self):
        return self.parent_cluster.k8s_cluster.datacenter[
            0]  # TODO: find the node and return it's region.

    def start_journal_thread(self):
        self._journal_thread = get_system_logging_thread(
            logs_transport="kubectl",
            node=self,
            target_log_file=self.system_log)
        if self._journal_thread:
            self.log.info("Use %s as logging daemon",
                          type(self._journal_thread).__name__)
            self._journal_thread.start()
        else:
            TestFrameworkEvent(
                source=self.__class__.__name__,
                source_method="start_journal_thread",
                message="Got no logging daemon by unknown reason").publish()

    def check_spot_termination(self):
        pass

    @property
    def scylla_listen_address(self):
        pod_status = self._pod_status
        return pod_status and pod_status.pod_ip

    @property
    def _pod_status(self):
        pods = KubernetesOps.list_pods(
            self.parent_cluster,
            namespace=self.parent_cluster.namespace,
            field_selector=f"metadata.name={self.name}")
        return pods[0].status if pods else None

    @property
    def _cluster_ip_service(self):
        services = KubernetesOps.list_services(
            self.parent_cluster,
            namespace=self.parent_cluster.namespace,
            field_selector=f"metadata.name={self.name}")
        return services[0] if services else None

    @property
    def _loadbalancer_service(self):
        services = KubernetesOps.list_services(
            self.parent_cluster,
            namespace=self.parent_cluster.namespace,
            field_selector=f"metadata.name={self.name}-loadbalancer")
        return services[0] if services else None

    @property
    def _container_status(self):
        pod_status = self._pod_status
        if pod_status:
            return next((x for x in pod_status.container_statuses
                         if x.name == self.parent_cluster.container), None)
        return None

    def _refresh_instance_state(self):
        cluster_ip_service = self._cluster_ip_service
        cluster_ip = cluster_ip_service and cluster_ip_service.spec.cluster_ip
        pod_status = self._pod_status
        return (
            [
                cluster_ip,
            ],
            [
                cluster_ip,
                pod_status and pod_status.pod_ip,
            ],
        )

    def start_scylla_server(self,
                            verify_up=True,
                            verify_down=False,
                            timeout=300,
                            verify_up_timeout=300):
        if verify_down:
            self.wait_db_down(timeout=timeout)
        self.remoter.run("supervisorctl start scylla", timeout=timeout)
        if verify_up:
            self.wait_db_up(timeout=verify_up_timeout)

    @cluster.log_run_info
    def start_scylla(self, verify_up=True, verify_down=False, timeout=300):
        self.start_scylla_server(verify_up=verify_up,
                                 verify_down=verify_down,
                                 timeout=timeout)

    def stop_scylla_server(self,
                           verify_up=False,
                           verify_down=True,
                           timeout=300,
                           ignore_status=False):
        if verify_up:
            self.wait_db_up(timeout=timeout)
        self.remoter.run('supervisorctl stop scylla', timeout=timeout)
        if verify_down:
            self.wait_db_down(timeout=timeout)

    @cluster.log_run_info
    def stop_scylla(self, verify_up=False, verify_down=True, timeout=300):
        self.stop_scylla_server(verify_up=verify_up,
                                verify_down=verify_down,
                                timeout=timeout)

    def restart_scylla_server(self,
                              verify_up_before=False,
                              verify_up_after=True,
                              timeout=300,
                              ignore_status=False):
        if verify_up_before:
            self.wait_db_up(timeout=timeout)
        self.remoter.run("supervisorctl restart scylla", timeout=timeout)
        if verify_up_after:
            self.wait_db_up(timeout=timeout)

    @cluster.log_run_info
    def restart_scylla(self,
                       verify_up_before=False,
                       verify_up_after=True,
                       timeout=300):
        self.restart_scylla_server(verify_up_before=verify_up_before,
                                   verify_up_after=verify_up_after,
                                   timeout=timeout)

    @property
    def image(self) -> str:
        return self._container_status.image

    def iptables_node_redirect_rules(
            self,
            dest_ip: str,
            iptables_bin: str = IPTABLES_BIN,
            command: IptablesChainCommand = "A") -> List[str]:
        to_ip = self._cluster_ip_service.spec.cluster_ip
        return [
            iptables_port_redirect_rule(iptables_bin=iptables_bin,
                                        command=command,
                                        to_ip=to_ip,
                                        to_port=p.target_port,
                                        dest_ip=dest_ip,
                                        dest_port=p.node_port)
            for p in self._loadbalancer_service.spec.ports
        ]

    @property
    def ipv6_ip_address(self):
        raise NotImplementedError()

    def restart(self):
        raise NotImplementedError(
            "Not implemented yet")  # TODO: implement this method.

    @contextlib.contextmanager
    def remote_scylla_yaml(self,
                           path: str = cluster.SCYLLA_YAML_PATH
                           ) -> ContextManager:
        """Update scylla.yaml, k8s way

        Scylla Operator handles scylla.yaml updates using ConfigMap resource and we don't need to update it
        manually on each node.  Just collect all required changes to parent_cluster.scylla_yaml dict and if it
        differs from previous one, set parent_cluster.scylla_yaml_update_required flag.  No actual changes done here.
        Need to do cluster rollout restart.

        More details here: https://github.com/scylladb/scylla-operator/blob/master/docs/generic.md#configure-scylla
        """
        with self.parent_cluster.scylla_yaml_lock:
            scylla_yaml_copy = deepcopy(self.parent_cluster.scylla_yaml)
            yield self.parent_cluster.scylla_yaml
            if scylla_yaml_copy == self.parent_cluster.scylla_yaml:
                LOGGER.debug("%s: scylla.yaml hasn't been changed", self)
                return
            original = yaml.safe_dump(scylla_yaml_copy).splitlines(
                keepends=True)
            changed = yaml.safe_dump(
                self.parent_cluster.scylla_yaml).splitlines(keepends=True)
            diff = "".join(unified_diff(original, changed))
            LOGGER.debug("%s: scylla.yaml requires to be updated with:\n%s",
                         self, diff)
            self.parent_cluster.scylla_yaml_update_required = True
class BasePodContainer(cluster.BaseNode):
    def __init__(self, name: str, parent_cluster: "PodCluster", node_prefix: str = "node", node_index: int = 1,
                 base_logdir: Optional[str] = None, dc_idx: int = 0):
        self.node_index = node_index
        super().__init__(name=name,
                         parent_cluster=parent_cluster,
                         base_logdir=base_logdir,
                         node_prefix=node_prefix,
                         dc_idx=dc_idx)

    def tags(self) -> Dict[str, str]:
        return {**super().tags,
                "NodeIndex": str(self.node_index), }

    def _init_remoter(self, ssh_login_info):
        self.remoter = KubernetesCmdRunner(pod=self.name,
                                           container=self.parent_cluster.container,
                                           namespace=self.parent_cluster.namespace,
                                           k8s_server_url=self.parent_cluster.k8s_cluster.k8s_server_url)

        # TODO: refactor our commands to use sudo in more organized way and remove `sudo' dependency
        self.remoter.run("yum install -y sudo && yum clean all")

    def _init_port_mapping(self):
        pass

    @property
    def system_log(self):
        return os.path.join(self.logdir, "system.log")

    @property
    def region(self):
        return self.parent_cluster.k8s_cluster.datacenter[0]  # TODO: find the node and return it's region.

    def start_journal_thread(self):
        self._journal_thread = get_system_logging_thread(logs_transport="kubectl",
                                                         node=self,
                                                         target_log_file=self.system_log)
        if self._journal_thread:
            self.log.info("Use %s as logging daemon", type(self._journal_thread).__name__)
            self._journal_thread.start()
        else:
            TestFrameworkEvent(source=self.__class__.__name__,
                               source_method="start_journal_thread",
                               message="Got no logging daemon by unknown reason").publish()

    def check_spot_termination(self):
        pass

    @property
    def scylla_listen_address(self):
        return self._pod_status.pod_ip

    @property
    def _pod_status(self):
        pods = KubernetesOps.list_pods(self.parent_cluster, namespace=self.parent_cluster.namespace,
                                       field_selector=f"metadata.name={self.name}")
        return pods[0].status if pods else None

    @property
    def _cluster_ip_service(self):
        services = KubernetesOps.list_services(self.parent_cluster, namespace=self.parent_cluster.namespace,
                                               field_selector=f"metadata.name={self.name}")
        return services[0] if services else None

    @property
    def _loadbalancer_service(self):
        services = KubernetesOps.list_services(self.parent_cluster, namespace=self.parent_cluster.namespace,
                                               field_selector=f"metadata.name={self.name}-loadbalancer")
        return services[0] if services else None

    @property
    def _container_status(self):
        pod_status = self._pod_status
        if pod_status:
            return next((x for x in pod_status.container_statuses if x.name == self.parent_cluster.container), None)
        return None

    def _refresh_instance_state(self):
        return ([self._cluster_ip_service.spec.cluster_ip, ],
                [self._cluster_ip_service.spec.cluster_ip, self._pod_status.pod_ip, ], )

    def start_scylla_server(self, verify_up=True, verify_down=False, timeout=300, verify_up_timeout=300):
        if verify_down:
            self.wait_db_down(timeout=timeout)
        self.remoter.run("supervisorctl start scylla", timeout=timeout)
        if verify_up:
            self.wait_db_up(timeout=verify_up_timeout)

    @cluster.log_run_info
    def start_scylla(self, verify_up=True, verify_down=False, timeout=300):
        self.start_scylla_server(verify_up=verify_up, verify_down=verify_down, timeout=timeout)

    def stop_scylla_server(self, verify_up=False, verify_down=True, timeout=300, ignore_status=False):
        if verify_up:
            self.wait_db_up(timeout=timeout)
        self.remoter.run('sudo supervisorctl stop scylla', timeout=timeout)
        if verify_down:
            self.wait_db_down(timeout=timeout)

    @cluster.log_run_info
    def stop_scylla(self, verify_up=False, verify_down=True, timeout=300):
        self.stop_scylla_server(verify_up=verify_up, verify_down=verify_down, timeout=timeout)

    def restart_scylla_server(self, verify_up_before=False, verify_up_after=True, timeout=300, ignore_status=False):
        if verify_up_before:
            self.wait_db_up(timeout=timeout)
        self.remoter.run("supervisorctl restart scylla", timeout=timeout)
        if verify_up_after:
            self.wait_db_up(timeout=timeout)

    @cluster.log_run_info
    def restart_scylla(self, verify_up_before=False, verify_up_after=True, timeout=300):
        self.restart_scylla_server(verify_up_before=verify_up_before, verify_up_after=verify_up_after, timeout=timeout)

    @property
    def image(self) -> str:
        return self._container_status.image

    def iptables_node_redirect_rules(self, dest_ip: str) -> str:
        to_ip = self._cluster_ip_service.spec.cluster_ip
        ports = self._loadbalancer_service.spec.ports
        return "\n".join(iptables_port_redirect_rule(to_ip, p.target_port, dest_ip, p.node_port) for p in ports)

    @property
    def ipv6_ip_address(self):
        raise NotImplementedError()

    def restart(self):
        raise NotImplementedError("Not implemented yet")  # TODO: implement this method.