def join_cluster(vapp, template_name, template_revision, target_nodes=None):
    script = "#!/usr/bin/env bash\n" \
             "kubeadm token create\n" \
             "ip route get 1 | awk '{print $NF;exit}'\n"
    node_names = get_node_names(vapp, NodeType.MASTER)
    master_result = execute_script_in_nodes(vapp=vapp,
                                            node_names=node_names,
                                            script=script)
    errors = _get_script_execution_errors(master_result)
    if errors:
        raise ScriptExecutionError(
            f"Join cluster script execution failed on master node "
            f"{node_names}:{errors}")
    init_info = master_result[0][1].content.decode().split()

    node_names = get_node_names(vapp, NodeType.WORKER)
    if target_nodes is not None:
        node_names = [name for name in node_names if name in target_nodes]
    tmp_script_filepath = get_local_script_filepath(template_name,
                                                    template_revision,
                                                    ScriptFile.NODE)
    tmp_script = utils.read_data_file(tmp_script_filepath, logger=LOGGER)
    script = tmp_script.format(token=init_info[0], ip=init_info[1])
    worker_results = execute_script_in_nodes(vapp=vapp,
                                             node_names=node_names,
                                             script=script)
    errors = _get_script_execution_errors(worker_results)
    if errors:
        raise ScriptExecutionError(
            f"Join cluster script execution failed on worker node "
            f"{node_names}:{errors}")
    for result in worker_results:
        if result[0] != 0:
            raise ClusterJoiningError(f"Couldn't join cluster:"
                                      f"\n{result[2].content.decode()}")
Ejemplo n.º 2
0
def init_cluster(config, vapp, template):
    script_filepath = get_local_script_filepath(template['name'],
                                                template['revision'],
                                                ScriptFile.MASTER)
    script = read_data_file(script_filepath, logger=LOGGER)
    nodes = get_nodes(vapp, NodeType.MASTER)
    result = execute_script_in_nodes(config, vapp, template['admin_password'],
                                     script, nodes)
    if result[0][0] != 0:
        raise ClusterInitializationError(
            f"Couldn\'t initialize cluster:\n{result[0][2].content.decode()}")
Ejemplo n.º 3
0
def init_cluster(vapp, template_name, template_revision):
    script_filepath = get_local_script_filepath(template_name,
                                                template_revision,
                                                ScriptFile.MASTER)
    script = utils.read_data_file(script_filepath, logger=LOGGER)
    node_names = get_node_names(vapp, NodeType.MASTER)
    result = execute_script_in_nodes(vapp=vapp,
                                     node_names=node_names,
                                     script=script)
    if result[0][0] != 0:
        raise ClusterInitializationError(
            f"Couldn\'t initialize cluster:\n{result[0][2].content.decode()}")
Ejemplo n.º 4
0
    def _get_init_script(self):
        """Read the initialization script from disk to create temp vApp.

        :return: content of the initialization script.

        :rtype: str
        """
        init_script_filepath = ltm.get_script_filepath(
            self.template_name, self.template_revision, ScriptFile.INIT)
        init_script = read_data_file(
            init_script_filepath, logger=self.logger,
            msg_update_callback=self.msg_update_callback)
        if self.ssh_key is not None:
            init_script += \
                f"mkdir -p /root/.ssh\n" \
                f"echo '{self.ssh_key}' >> /root/.ssh/authorized_keys\n" \
                f"chmod -R go-rwx /root/.ssh"
        return init_script
Ejemplo n.º 5
0
def join_cluster(config, vapp, template, target_nodes=None):
    init_info = get_init_info(config, vapp, template['admin_password'])
    tmp_script_filepath = get_local_script_filepath(template['name'],
                                                    template['revision'],
                                                    ScriptFile.NODE)
    tmp_script = read_data_file(tmp_script_filepath, logger=LOGGER)
    script = tmp_script.format(token=init_info[0], ip=init_info[1])
    if target_nodes is None:
        nodes = get_nodes(vapp, NodeType.WORKER)
    else:
        nodes = []
        for node in vapp.get_all_vms():
            if node.get('name') in target_nodes:
                nodes.append(node)
    results = execute_script_in_nodes(config, vapp, template['admin_password'],
                                      script, nodes)
    for result in results:
        if result[0] != 0:
            raise ClusterJoiningError('Couldn\'t join cluster:\n%s' %
                                      result[2].content.decode())
def init_cluster(vapp, template_name, template_revision):
    try:
        script_filepath = get_local_script_filepath(template_name,
                                                    template_revision,
                                                    ScriptFile.MASTER)
        script = utils.read_data_file(script_filepath, logger=LOGGER)
        node_names = get_node_names(vapp, NodeType.MASTER)
        result = execute_script_in_nodes(vapp=vapp,
                                         node_names=node_names,
                                         script=script)
        errors = _get_script_execution_errors(result)
        if errors:
            raise ScriptExecutionError(
                f"Initialize cluster script execution failed on node "
                f"{node_names}:{errors}")
        if result[0][0] != 0:
            raise ClusterInitializationError(
                f"Couldn't initialize cluster:\n{result[0][2].content.decode()}"
            )  # noqa: E501
    except Exception as e:
        LOGGER.error(e, exc_info=True)
        raise ClusterInitializationError(
            f"Couldn't initialize cluster: {str(e)}")
    def _customize_vm(self, vapp, vm_name):
        """Customize a vm in a VApp using customization script.

        :param pyvcloud.vcd.vapp.VApp vapp:
        :param str vm_name:

        :raises Exception: if unable to execute the customization script in
            the vm.
        """
        msg = f"Customizing vApp '{self.temp_vapp_name}', vm '{vm_name}'"
        if self.msg_update_callback:
            self.msg_update_callback.general(msg)
        if self.logger:
            self.logger.info(msg)

        cust_sctipt_filepath = get_local_script_filepath(
            self.template_name, self.template_revision, ScriptFile.CUST)
        cust_script = read_data_file(
            cust_sctipt_filepath,
            logger=self.logger,
            msg_update_callback=self.msg_update_callback)

        vs = get_vsphere(self.sys_admin_client,
                         vapp,
                         vm_name,
                         logger=self.logger)
        callback = vgr_callback(
            prepend_msg='Waiting for guest tools, status: "',
            logger=self.logger,
            msg_update_callback=self.msg_update_callback)
        wait_until_tools_ready(vapp, vm_name, vs, callback=callback)
        password_auto = vapp.get_admin_password(vm_name)

        try:
            result = vs.execute_script_in_guest(
                vs.get_vm_by_moid(vapp.get_vm_moid(vm_name)),
                'root',
                password_auto,
                cust_script,
                target_file=None,
                wait_for_completion=True,
                wait_time=10,
                get_output=True,
                delete_script=True,
                callback=vgr_callback(
                    logger=self.logger,
                    msg_update_callback=self.msg_update_callback))
        except Exception as err:
            # TODO() replace raw exception with specific exception
            # unsure all errors execute_script_in_guest can result in
            # Docker TLS handshake timeout can occur when internet is slow
            if self.msg_update_callback:
                self.msg_update_callback.error(
                    "Failed VM customization. Check CSE install log")
            if self.logger:
                self.logger.error(f"Failed VM customization with error: {err}",
                                  exc_info=True)
            raise

        if len(result) > 0:
            msg = f'Result: {result}'
            if self.msg_update_callback:
                self.msg_update_callback.general_no_color(msg)
            if self.logger:
                self.logger.debug(msg)

            result_stdout = result[1].content.decode()
            result_stderr = result[2].content.decode()

            msg = 'stderr:'
            if self.msg_update_callback:
                self.msg_update_callback.general_no_color(msg)
            if self.logger:
                self.logger.debug(msg)
            if len(result_stderr) > 0:
                if self.msg_update_callback:
                    self.msg_update_callback.general_no_color(result_stderr)
                if self.logger:
                    self.logger.debug(result_stderr)

            msg = 'stdout:'
            if self.msg_update_callback:
                self.msg_update_callback.general_no_color(msg)
            if self.logger:
                self.logger.debug(msg)
            if len(result_stdout) > 0:
                if self.msg_update_callback:
                    self.msg_update_callback.general_no_color(result_stdout)
                if self.logger:
                    self.logger.debug(result_stdout)

        if len(result) == 0 or result[0] != 0:
            msg = "Failed VM customization"
            if self.msg_update_callback:
                self.msg_update_callback.error(f"{msg}. Please check logs.")
            if self.logger:
                self.logger.error(
                    f"{msg}\nResult start===\n{result}\n===Result end",
                    exc_info=True)
            # TODO: replace raw exception with specific exception
            raise Exception(f"{msg}; Result: {result}")

        # Do not reboot VM after customization. Reboot will generate a new
        # machine-id, and once we capture the VM, all VMs deployed from the
        # template will have the same machine-id, which can lead to
        # unpredictable behavior

        msg = f"Customized vApp '{self.temp_vapp_name}', vm '{vm_name}'"
        if self.msg_update_callback:
            self.msg_update_callback.general(msg)
        if self.logger:
            self.logger.info(msg)
def add_nodes(client,
              num_nodes,
              node_type,
              org,
              vdc,
              vapp,
              catalog_name,
              template,
              network_name,
              num_cpu=None,
              memory_in_mb=None,
              storage_profile=None,
              ssh_key=None):
    specs = []
    try:
        if num_nodes < 1:
            return None

        # DEV NOTE: With api v33.0 and onwards, get_catalog operation will fail
        # for non admin users of an an org which is not hosting the catalog,
        # even if the catalog is explicitly shared with the org in question.
        # This happens because for api v 33.0 and onwards, the Org XML no
        # longer returns the href to catalogs accessible to the org, and typed
        # queries hide the catalog link from non admin users.
        # As a workaround, we will use a sys admin client to get the href and
        # pass it forward. Do note that the catalog itself can still be
        # accessed by these non admin users, just that they can't find by the
        # href on their own.

        sys_admin_client = None
        try:
            sys_admin_client = vcd_utils.get_sys_admin_client()
            org_name = org.get_name()
            org_resource = sys_admin_client.get_org_by_name(org_name)
            org_sa = Org(sys_admin_client, resource=org_resource)
            catalog_item = org_sa.get_catalog_item(
                catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
            catalog_item_href = catalog_item.Entity.get('href')
        finally:
            if sys_admin_client:
                sys_admin_client.logout()

        source_vapp = VApp(client, href=catalog_item_href)
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': network_name,
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        task = vapp.add_vms(specs, power_on=False)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()

        if not num_cpu:
            num_cpu = template[LocalTemplateKey.CPU]
        if not memory_in_mb:
            memory_in_mb = template[LocalTemplateKey.MEMORY]
        for spec in specs:
            vm_name = spec['target_vm_name']
            vm_resource = vapp.get_vm(vm_name)
            vm = VM(client, resource=vm_resource)

            task = vm.modify_cpu(num_cpu)
            client.get_task_monitor().wait_for_status(task)

            task = vm.modify_memory(memory_in_mb)
            client.get_task_monitor().wait_for_status(task)

            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
            vapp.reload()

            if node_type == NodeType.NFS:
                LOGGER.debug(f"Enabling NFS server on {vm_name}")
                script_filepath = get_local_script_filepath(
                    template[LocalTemplateKey.NAME],
                    template[LocalTemplateKey.REVISION], ScriptFile.NFSD)
                script = utils.read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(vapp=vapp,
                                                       node_names=[vm_name],
                                                       script=script)
                errors = _get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"VM customization script execution failed on node "
                        f"{vm_name}:{errors}")
    except Exception as e:
        # TODO: get details of the exception to determine cause of failure,
        # e.g. not enough resources available.
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))

    vapp.reload()
    return {'task': task, 'specs': specs}
def add_nodes(client,
              num_nodes,
              node_type,
              org,
              vdc,
              vapp,
              catalog_name,
              template,
              network_name,
              num_cpu=None,
              memory_in_mb=None,
              storage_profile=None,
              ssh_key_filepath=None):
    specs = []
    try:
        if num_nodes < 1:
            return None
        catalog_item = org.get_catalog_item(
            catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key_filepath is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': network_name,
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        task = vapp.add_vms(specs, power_on=False)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()

        if not num_cpu:
            num_cpu = template[LocalTemplateKey.CPU]
        if not memory_in_mb:
            memory_in_mb = template[LocalTemplateKey.MEMORY]
        for spec in specs:
            vm_name = spec['target_vm_name']
            vm_resource = vapp.get_vm(vm_name)
            vm = VM(client, resource=vm_resource)

            task = vm.modify_cpu(num_cpu)
            client.get_task_monitor().wait_for_status(task)

            task = vm.modify_memory(memory_in_mb)
            client.get_task_monitor().wait_for_status(task)

            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
            vapp.reload()

            if node_type == NodeType.NFS:
                LOGGER.debug(f"Enabling NFS server on {vm_name}")
                script_filepath = get_local_script_filepath(
                    template[LocalTemplateKey.NAME],
                    template[LocalTemplateKey.REVISION], ScriptFile.NFSD)
                script = utils.read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(vapp=vapp,
                                                       node_names=[vm_name],
                                                       script=script)
                errors = _get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"VM customization script execution failed on node "
                        f"{vm_name}:{errors}")
    except Exception as e:
        # TODO: get details of the exception to determine cause of failure,
        # e.g. not enough resources available.
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))

    vapp.reload()
    return {'task': task, 'specs': specs}
Ejemplo n.º 10
0
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp,
              req_spec):
    try:
        if qty < 1:
            return None
        specs = []
        catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                            template['catalog_item_name'])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        storage_profile = req_spec.get(RequestKey.STORAGE_PROFILE_NAME)
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script_common = ''

        cust_script_init = \
"""
#!/usr/bin/env bash
if [ x$1=x"postcustomization" ];
then
""" # noqa: E128

        cust_script_end = \
"""
fi
"""  # noqa: E128

        ssh_key_filepath = req_spec.get(RequestKey.SSH_KEY_FILEPATH)
        if ssh_key_filepath is not None:
            cust_script_common += \
f"""
mkdir -p /root/.ssh
echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys
chmod -R go-rwx /root/.ssh
""" # noqa

        if cust_script_common == '':
            cust_script = None
        else:
            cust_script = cust_script_init + cust_script_common + \
                cust_script_end
        for n in range(qty):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': req_spec.get(RequestKey.NETWORK_NAME),
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        num_cpu = req_spec.get(RequestKey.NUM_CPU)
        mb_memory = req_spec.get(RequestKey.MB_MEMORY)
        configure_hw = bool(num_cpu or mb_memory)
        task = vapp.add_vms(specs, power_on=not configure_hw)
        # TODO(get details of the exception like not enough resources avail)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()
        if configure_hw:
            for spec in specs:
                vm_resource = vapp.get_vm(spec['target_vm_name'])
                if num_cpu:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_cpu(num_cpu)
                    client.get_task_monitor().wait_for_status(task)
                if mb_memory:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_memory(mb_memory)
                    client.get_task_monitor().wait_for_status(task)
                vm = VM(client, resource=vm_resource)
                task = vm.power_on()
                client.get_task_monitor().wait_for_status(task)
            vapp.reload()

        password = vapp.get_admin_password(spec['target_vm_name'])
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            command = \
                f"/bin/echo \"root:{template['admin_password']}\" | chpasswd"
            nodes = [vm_resource]
            execute_script_in_nodes(config,
                                    vapp,
                                    password,
                                    command,
                                    nodes,
                                    check_tools=True,
                                    wait=False)
            if node_type == NodeType.NFS:
                LOGGER.debug(
                    f"enabling NFS server on {spec['target_vm_name']}")
                script_filepath = get_local_script_filepath(
                    template['name'], template['revision'], ScriptFile.NFSD)
                script = read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(
                    config, vapp, template['admin_password'], script, nodes)
                errors = get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"Script execution failed on node "
                        f"{spec['target_vm_name']}:{errors}")
    except Exception as e:
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))
    return {'task': task, 'specs': specs}
Ejemplo n.º 11
0
def test_0090_install_retain_temp_vapp(config, unregister_cse_before_test):
    """Test install.

    Installation options: '--config', '--template', '--ssh-key',
        '--retain-temp-vapp'.

    Tests that installation:
    - downloads/uploads ova file,
    - creates photon temp vapp,
    - creates k8s templates
    - skips deleting the temp vapp
    - checks that proper packages are installed in the vm in temp vApp

    command: cse install --config cse_test_config.yaml --retain-temp-vapp
        --ssh-key ~/.ssh/id_rsa.pub
    required files: ~/.ssh/id_rsa.pub, cse_test_config.yaml
    expected: cse registered, catalog exists, source ovas exist,
        temp vapps exist, k8s templates exist.
    """
    cmd = f"install --config {env.ACTIVE_CONFIG_FILEPATH} --ssh-key " \
          f"{env.SSH_KEY_FILEPATH} --retain-temp-vapp"
    result = env.CLI_RUNNER.invoke(cli, cmd.split(), catch_exceptions=False)
    assert result.exit_code == 0,\
        testutils.format_command_info('cse', cmd, result.exit_code,
                                      result.output)

    # check that cse was registered correctly
    env.check_cse_registration(config['amqp']['routing_key'],
                               config['amqp']['exchange'])

    vdc = VDC(env.CLIENT, href=env.VDC_HREF)
    ssh_client = paramiko.SSHClient()
    ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    for template_config in env.TEMPLATE_DEFINITIONS:
        # check that source ova file exists in catalog
        assert env.catalog_item_exists(
            template_config['source_ova_name']), \
            'Source ova file does not existswhen it should.'

        # check that k8s templates exist
        catalog_item_name = get_revisioned_template_name(
            template_config['name'], template_config['revision'])
        assert env.catalog_item_exists(catalog_item_name), \
            'k8s template does not exist when it should.'

        # check that temp vapp exists
        temp_vapp_name = testutils.get_temp_vapp_name(template_config['name'])
        try:
            vdc.reload()
            vapp_resource = vdc.get_vapp(temp_vapp_name)
        except EntityNotFoundException:
            assert False, 'vApp does not exist when it should.'

        # ssh into vms to check for installed software
        vapp = VApp(env.CLIENT, resource=vapp_resource)
        # The temp vapp is shutdown before the template is captured, it
        # needs to be powered on before trying to ssh into it.
        task = vapp.power_on()
        env.CLIENT.get_task_monitor().wait_for_success(task)

        # HACK! let the ssh daemon come up
        time.sleep(env.WAIT_INTERVAL)  # 30 seconds

        ip = vapp.get_primary_ip(TEMP_VAPP_VM_NAME)
        try:
            ssh_client.connect(ip, username='******')
            # run different commands depending on OS
            if 'photon' in temp_vapp_name:
                script_filepath = get_local_script_filepath(
                    template_config['name'], template_config['revision'],
                    ScriptFile.CUST)
                script = read_data_file(script_filepath)
                pattern = r'(kubernetes\S*)'
                packages = re.findall(pattern, script)
                stdin, stdout, stderr = ssh_client.exec_command("rpm -qa")
                installed = [line.strip('.x86_64\n') for line in stdout]
                for package in packages:
                    assert package in installed, \
                        f"{package} not found in Photon VM"
            elif 'ubuntu' in temp_vapp_name:
                script_filepath = get_local_script_filepath(
                    template_config['name'], template_config['revision'],
                    ScriptFile.CUST)
                script = read_data_file(script_filepath)
                pattern = r'((kubernetes|docker\S*|kubelet|kubeadm|kubectl)\S*=\S*)'  # noqa: E501
                packages = [tup[0] for tup in re.findall(pattern, script)]
                cmd = "dpkg -l | awk '{print $2\"=\"$3}'"
                stdin, stdout, stderr = ssh_client.exec_command(cmd)
                installed = [line.strip() for line in stdout]
                for package in packages:
                    assert package in installed, \
                        f"{package} not found in Ubuntu VM"
        finally:
            if ssh_client:
                ssh_client.close()