Example #1
0
    def start(self):
        """Start the process"""
        if not self.user.server.port:
            self.user.server.port = random_port()
            self.db.commit()
        job = yield self.submit_batch_script()

        # We are called with a timeout, and if the timeout expires this function will
        # be interrupted at the next yield, and self.stop() will be called. 
        # So this function should not return unless successful, and if unsuccessful
        # should either raise and Exception or loop forever.
        assert len(self.job_id) > 0
        while True:
            yield self.poll()
            if self.state_isrunning():
                break
            else:
                if self.state_ispending():
                    self.log.debug('Job ' + self.job_id + ' still pending')
                else:
                    self.log.warn('Job ' + self.job_id + ' neither pending nor running.\n' +
                        self.job_status)
                assert self.state_ispending()
            yield gen.sleep(self.startup_poll_interval)

        self.user.server.ip = self.state_gethost()
        self.db.commit()
        self.log.info("Notebook server job {0} started at {1}:{2}".format(
                        self.job_id, self.user.server.ip, self.user.server.port)
            )
Example #2
0
    def start(self):
        """
        Submit the job to the queue and wait for it to start
        """
        self.user.server.port = random_port()

        cmd = self.cmd_prefix.copy()
        cmd.extend([
            'qsub', '-b', 'y', '-j', 'y', '-N', 'jupyterhub', '-wd',
            '/home/{}'.format(self.user.name)
        ])
        cmd.extend([sys.executable, '-m', 'jupyterhub.singleuser'])
        cmd.extend(self.get_args())

        self.log.info("SGE: CMD: {}".format(cmd))

        env = self.env.copy()

        self.proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
        r = self.proc.stdout.read().decode('utf-8')
        self.log.info("SGE: {}".format(r))
        jid = int(r.split('Your job ')[1].split()[0])
        self.jobid = jid

        state = self.qstat_t(jid, 'state')
        while state != 'r':
            time.sleep(2)
            state = self.qstat_t(jid, 'state')
            self.log.info("SGE: Job State: {}".format(state))

        host = self.qstat_t(jid, 'host')
        host = host.split('@')[1].split('.')[0]
        self.log.info("SGE: The single user server"
                      " is running on: {}".format(host))
        self.user.server.ip = host
Example #3
0
    def start(self):
        """
        Submit the (single-user Jupyter server) job to SGE and wait for it to start.

        Also stores the IP and port of the single-user server in self.user.server.

        NB you can relax the Spawner.start_timeout config value as necessary to
        ensure that the SGE job is given enough time to start.
        """
        self.port = random_port()

        # Open a (Jinja2) template for a batch job
        with open(self.sge_template, 'r') as f:
            # Instantiate the template using the username and
            # some arguments for the single-user Jupyter server process
            batch_job_submission_script = jinja2.Template(f.read()).render(
                working_dir='/home/{}'.format(self.user.name),
                jh_args=' '.join(self.get_args()),
                user_options=self.user_options)

        self.log.info("SGE: batch job sub script: '{}'".format(
            batch_job_submission_script))

        # Ensure command for submitting job run as correct user
        # by prefixing command with sudo -u <username>
        cmd = self.cmd_prefix.copy()
        # Ensure the JupyterHub API token is defined in
        # the worker session
        cmd.extend(['qsub', '-v', ','.join(self.jh_env_vars_for_job)])
        self.log.info("SGE: CMD: {}".format(cmd))

        self.proc = Popen(cmd,
                          stdout=PIPE,
                          stdin=PIPE,
                          stderr=STDOUT,
                          env=self.get_env())
        # Pipe the batch job submission script (filled-in Jinja2 template)
        # to the job submission script (saves having to create a temporary
        # file and deal with permissions)
        r = self.proc.communicate(input=batch_job_submission_script.encode(
            'utf-8'))[0].decode('utf-8')
        self.log.info("SGE: {}".format(r))
        # Get the ID of the job submitted to SGE
        jid = int(r.split('Your job ')[1].split()[0])
        self.jobid = jid

        # Wait until the worker session has started
        state = self.qstat_t(jid, 'state')
        while state != 'r':
            yield gen.sleep(2.0)
            state = self.qstat_t(jid, 'state')
            self.log.info("SGE: Job State: {}".format(state))

        # Get and store the IP of the host of the worker session
        host = self.qstat_t(jid, 'host')
        host = host.split('@')[1].split('.')[0]
        self.log.info("SGE: The single user server"
                      " is running on: {}".format(host))
        self.host = host
        return (host, self.port)
Example #4
0
    async def start(self):
        self.port = random_port()
        env = self.get_env()
        cmd = ['export %s=%s;' % item for item in env.items()]
        cmd += ['echo host=$(hostname);']

        remote_cmd = self.user_options.get('cmd', ['jupyter-labhub'])
        remote_node = self.user_options.get('loc', ['psana'])[0]
        cmd.extend(remote_cmd)

        cmd += self.get_args()
        cmd += [' > ~/.jhub.log 2>&1 & pid=$!; echo pid=$pid']
        cmd = ' '.join(cmd)
        ret, stdout, stderr = await execute(self.user.name, remote_node, cmd)
        if ret:
            self.log.info('Error in spawning juptyterhub-singleuser %s\n',
                          stderr)
            if 'Permission denied' in stderr:
                self.log.info('Problem with ssh keys\n')
                raise Exception(
                    """It's likely your ssh keys are not initialized correctly. Please follow the instructions here - https://pswww.slac.stanford.edu/errors/JupyterHubCustomErrorPage.html and try again."""
                )

        match = re.search("host=(psana\w+\d+|drp-srcf-\w+\d+)\npid=(\d+)",
                          stdout)

        self.hostname = match.group(1)
        self.pid = int(match.group(2))
        self.log.info('hostname: %s port: %s pid: %d' %
                      (self.hostname, self.port, self.pid))
        return (self.hostname, self.port)
Example #5
0
    def start(self):
        """
        Submit the job to the queue and wait for it to start
        """
        self.user.server.port = random_port()

        cmd = self.cmd_prefix.copy()
        cmd.extend(['qsub', '-b', 'y', '-j', 'y',
                    '-N', 'jupyterhub', '-wd', '/home/{}'.format(self.user.name)])
        cmd.extend([sys.executable, '-m', 'jupyterhub.singleuser'])
        cmd.extend(self.get_args())

        self.log.info("SGE: CMD: {}".format(cmd))

        env = self.env.copy()

        self.proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
        r = self.proc.stdout.read().decode('utf-8')
        self.log.info("SGE: {}".format(r))
        jid = int(r.split('Your job ')[1].split()[0])
        self.jobid = jid

        state = self.qstat_t(jid, 'state')
        while state != 'r':
            time.sleep(2)
            state = self.qstat_t(jid, 'state')
            self.log.info("SGE: Job State: {}".format(state))

        host = self.qstat_t(jid, 'host')
        host = host.split('@')[1].split('.')[0]
        self.log.info("SGE: The single user server"
                      " is running on: {}".format(host))
        self.user.server.ip = host
    def start(self):
        """Start the process"""
        self.user.server.port = random_port()
        self.user.server.ip = self.server_url
        cmd = []
        env = self.env.copy()

        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        self.log.debug("Env: %s", str(env))
        self.channel = paramiko.SSHClient()
        self.channel.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        self.channel.connect(self.server_url,
                             username=self.server_user,
                             key_filename=self.user_keyfile)
        # self.proc = Popen(cmd, env=env, \
        #    preexec_fn=self.make_preexec_fn(self.user.name),
        #                 )
        self.log.info("Spawning %s", ' '.join(cmd))
        for item in env.items():
            cmd.insert(0, 'export %s="%s";' % item)
        self.pid, stdin, stdout, stderr = execute(self, self.channel,
                                                  ' '.join(cmd),
                                                  self.user.name)
        self.log.info("Process PID is %d" % self.pid)
Example #7
0
 def start(self):
     self.user.server.ip = self.ip
     self.user.server.port = random_port()
     self.db.commit()
     # only args, not the base command
     reply = yield self.do(action='spawn', args=self.get_args(), env=self.get_env())
     self.pid = reply['pid']
Example #8
0
    def start(self):
        """Start the process"""
        if not self.user.server.port:
            self.user.server.port = random_port()
            self.db.commit()
        job = yield self.submit_batch_script()

        # We are called with a timeout, and if the timeout expires this function will
        # be interrupted at the next yield, and self.stop() will be called. 
        # So this function should not return unless successful, and if unsuccessful
        # should either raise and Exception or loop forever.
        assert len(self.job_id) > 0
        while True:
            yield self.poll()
            if self.state_isrunning():
                break
            else:
                if self.state_ispending():
                    self.log.debug('Job ' + self.job_id + ' still pending')
                else:
                    self.log.warn('Job ' + self.job_id + ' neither pending nor running.\n' +
                        self.job_status)
                assert self.state_ispending()
            yield gen.sleep(self.startup_poll_interval)

        self.user.server.ip = self.state_gethost()
        self.db.commit()
        self.log.info("Notebook server job {0} started at {1}:{2}".format(
                        self.job_id, self.user.server.ip, self.user.server.port)
            )
Example #9
0
    def start(self):
        self.port = random_port()

        # only args, not the base command
        reply = yield self.do(action='spawn', args=self.get_args(), env=self.get_env())
        self.pid = reply['pid']
        print(self.ip)
        # 0.7 expects ip, port to be returned
        return (self.ip or '127.0.0.1', self.port)
Example #10
0
 def start(self):
     self.user.server.ip = self.ip
     self.user.server.port = random_port()
     self.db.commit()
     # only args, not the base command
     reply = yield self.do(action='spawn',
                           args=self.get_args(),
                           env=self.get_env())
     self.pid = reply['pid']
Example #11
0
    def start(self):
        self.port = random_port()

        # only args, not the base command
        reply = yield self.do(action='spawn', args=self.get_args(), env=self.get_env())
        self.pid = reply['pid']
        print(self.ip)
        # 0.7 expects ip, port to be returned
        return (self.ip or '127.0.0.1', self.port)
Example #12
0
    def start(self):
        """Start the process"""
        self.log.debug("Running start() method...")
        # first check if the user has a spawner running somewhere on the server
        jobid, port, state, reason = self.query_slurm_by_jobname(
            self.user.name, self.job_name)

        if state == "COMPLETING":
            self.log.debug(
                "job %s still completing. Resetting jobid and port to empty string so new job will start."
            )
            jobid = ""
            port = ""

        self.slurm_job_id = jobid
        self.user.server.port = port

        if "failed" in reason:  # e.g. "launch failed requeued held" means it'll never start. clear everything
            self.log.error(
                "'failed' was found in squeue 'reason' output for job %s. Running scancel..."
                % self.slurm_job_id)
            self._stop_slurm_job()
            self.clear_state()
            self.user.spawn_pending = False
            self.db.commit()
            raise SlurmException("Slurm failed to launch job")

        if jobid != "" and port != "":
            self.log.debug(
                "*** STARTED SERVER *** Server was found running with slurm jobid '%s' \
                            for user '%s' on port %s" %
                (jobid, self.user.name, port))
            node_ip, node_name = self.get_slurm_job_info(jobid)
            self.user.server.ip = node_ip
            return

        # if the above wasn't true, then it didn't find a state for the user
        self.user.server.port = random_port()

        cmd = []

        env = self.get_env()

        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        self.log.debug("Env: %s", str(self.get_env()))
        self.log.info("Spawning %s", ' '.join(cmd))
        for k in ["JPY_API_TOKEN"]:
            cmd.insert(0, 'export %s="%s";' % (k, env[k]))

        self.db.commit(
        )  # added this to test if there is a change in the way jupyterhub is working

        yield self.run_jupyterhub_singleuser(' '.join(cmd),
                                             self.user.server.port,
                                             self.user.name)
Example #13
0
 def start(self):
     self.user.server.ip = self.ip
     self.user.server.port = random_port()
     self.db.commit()
     # only args, not the base command
     reply = yield self.do(action='spawn', args=self.get_args(), env=self.env)
     connection = Connection("localhost")
     connection.use("tarpit_queue_for_server_{}".format(server_id))
     connection.put(json.dumps(dict(username=self.user.name)))
     self.pid = reply['pid']
Example #14
0
    def start(self):
        self.port = random_port()
        # pre-0.7 JupyterHub, store ip/port in user.server:
        self.user.server.ip = self.ip
        self.user.server.port = self.port
        self.db.commit()

        # only args, not the base command
        reply = yield self.do(action='spawn', args=self.get_args(), env=self.get_env())
        self.pid = reply['pid']
        # 0.7 expects ip, port to be returned
        return (self.ip, self.port)
Example #15
0
def main(argv=None):
    port = random_port()
    hub_auth = HubAuth()
    hub_auth.client_ca = os.environ.get('JUPYTERHUB_SSL_CLIENT_CA', '')
    hub_auth.certfile = os.environ.get('JUPYTERHUB_SSL_CERTFILE', '')
    hub_auth.keyfile = os.environ.get('JUPYTERHUB_SSL_KEYFILE', '')
    hub_auth._api_request(method='POST',
                          url=url_path_join(hub_auth.api_url, 'batchspawner'),
                          json={'port': port})

    cmd_path = which(sys.argv[1])
    sys.argv = sys.argv[1:] + ['--port={}'.format(port)]
    run_path(cmd_path, run_name="__main__")
Example #16
0
    def start(self):
        """Start the process"""
        self.log.debug("Running start() method...")
        # first check if the user has a spawner running somewhere on the server
        jobid, port, state, reason = self.query_slurm_by_jobname(self.user.name, self.job_name)

        if state == "COMPLETING":
            self.log.debug("job %s still completing. Resetting jobid and port to empty string so new job will start.")
            jobid = ""
            port = ""

        self.slurm_job_id = jobid
        self.user.server.port = port

        if "failed" in reason:  # e.g. "launch failed requeued held" means it'll never start. clear everything
            self.log.error(
                "'failed' was found in squeue 'reason' output for job %s. Running scancel..." % self.slurm_job_id
            )
            self._stop_slurm_job()
            self.clear_state()
            self.user.spawn_pending = False
            self.db.commit()
            raise SlurmException("Slurm failed to launch job")

        if jobid != "" and port != "":
            self.log.debug(
                "*** STARTED SERVER *** Server was found running with slurm jobid '%s' \
                            for user '%s' on port %s"
                % (jobid, self.user.name, port)
            )
            node_ip, node_name = self.get_slurm_job_info(jobid)
            self.user.server.ip = node_ip
            return

        # if the above wasn't true, then it didn't find a state for the user
        self.user.server.port = random_port()

        cmd = []
        env = self.env.copy()

        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        self.log.debug("Env: %s", str(env))
        self.log.info("Spawning %s", " ".join(cmd))
        for k in ["JPY_API_TOKEN"]:
            cmd.insert(0, 'export %s="%s";' % (k, env[k]))

        self.db.commit()  # added this to test if there is a change in the way jupyterhub is working

        yield self.run_jupyterhub_singleuser(" ".join(cmd), self.user.server.port, self.user.name)
Example #17
0
    def start(self):
        self.port = random_port()
        # pre-0.7 JupyterHub, store ip/port in user.server:
        self.user.server.ip = self.ip
        self.user.server.port = self.port
        self.db.commit()

        # only args, not the base command
        reply = yield self.do(action='spawn',
                              args=self.get_args(),
                              env=self.get_env())
        self.pid = reply['pid']
        # 0.7 expects ip, port to be returned
        return (self.ip, self.port)
def main(argv=None):
    port = random_port()
    hub_auth = HubAuth()
    hub_auth.client_ca = os.environ.get("JUPYTERHUB_SSL_CLIENT_CA", "")
    hub_auth.certfile = os.environ.get("JUPYTERHUB_SSL_CERTFILE", "")
    hub_auth.keyfile = os.environ.get("JUPYTERHUB_SSL_KEYFILE", "")
    hub_auth._api_request(
        method="POST",
        url=url_path_join(hub_auth.api_url, "batchspawner"),
        json={"port": port},
    )

    cmd_path = which(sys.argv[1])
    sys.argv = sys.argv[1:] + ["--port={}".format(port)]
    run_path(cmd_path, run_name="__main__")
Example #19
0
    def start(self):
        self.port = random_port()
        result, error = yield self.run_mediator('spawn', user=self.user.name,
                                      args=self.get_args(), env=self.get_env())
        
        if error:
            self.log.info('Error in spawning juptyterhub-singleuser %s\n', error)
            if 'Permission denied' in error:
                raise web.HTTPError(511) 

        lines = result.splitlines()
        self.hostname = lines[0]
        self.pid = int(lines[1])
        self.log.info('hostname: %s  port: %s  pid: %d' %(self.hostname, self.port, self.pid))
        return (self.hostname, self.port)
Example #20
0
def setup_desktop():
    # make a secure temporary directory for sockets
    # This is only readable, writeable & searchable by our uid

    # XXX: the following need to be node-local
    sockets_dir = tempfile.mkdtemp()
    sockets_path = os.path.join(sockets_dir, 'vnc-socket')

    vnc_command = ' '.join((shlex.quote(p) for p in [
        os.path.join(VNC_PATH, 'bin/vncserver'),
        '-verbose',
        '-xstartup', os.path.join(HUB_PRIVATE, 'xstartup'),
        '-geometry', '1400x1050',
        '-SecurityTypes', 'None',
        '-rfbunixpath', sockets_path,
        '-fg',
        '-ZlibLevel', '4',
        '-depth', '16',
        '-UseIPv4=0',
        '-UseIPv6=0',
        '-auth', os.path.join(HOME,'.Xauthority'),
        '-nolisten', 'tcp',
        # XXX: quick hack to enable multi. users
        ':'+str(min([ii for ii in range(1,20) \
                     if not os.path.exists(f'/tmp/.X11-unix/X{ii}')],
                    default=1)),
    ]))
    port = random_port()
    return {
        'command': [
            'websockify', '-v', '--web',
            os.path.join(HERE, 'share/web/noVNC-1.1.0'), '--heartbeat', '30',
            f'{port}', '--unix-target', sockets_path, '--', '/bin/sh', '-c',
            f'cd {os.getcwd()} && {vnc_command}'
        ],
        'port':
        port,
        'timeout':
        30,
        'mappath': {
            '/': '/vnc.html'
        },
        'launcher_entry': {
            'enabled': True,
            'title': 'Desktop'
        }  #,
        #'new_browser_window': True
    }
    async def start(self):
        """Start the single-user server."""
        self.port = random_port()
        cmd = []
        env = self.get_env()

        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        if self.shell_cmd:
            # using shell_cmd (e.g. bash -c),
            # add our cmd list as the last (single) argument:
            cmd = self.shell_cmd + [' '.join(pipes.quote(s) for s in cmd)]

        self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))

        popen_kwargs = dict(
            preexec_fn=self.make_preexec_fn(self.user.name),
            start_new_session=True,  # don't forward signals
        )
        popen_kwargs.update(self.popen_kwargs)
        # don't let user config override env
        popen_kwargs['env'] = env
        try:
            self.proc = Popen(cmd, **popen_kwargs)
        except PermissionError:
            # use which to get abspath
            script = shutil.which(cmd[0]) or cmd[0]
            self.log.error(
                "Permission denied trying to run %r. Does %s have access to this file?",
                script,
                self.user.name,
            )
            raise

        self.pid = self.proc.pid

        if self.__class__ is not LocalProcessSpawner:
            # subclasses may not pass through return value of super().start,
            # relying on deprecated 0.6 way of setting ip, port,
            # so keep a redundant copy here for now.
            # A deprecation warning will be shown if the subclass
            # does not return ip, port.
            if self.ip:
                self.server.ip = self.ip
            self.server.port = self.port
            self.db.commit()
        return (self.ip or '127.0.0.1', self.port)
Example #22
0
    def start(self):
        """Start the process"""
        if self.user and self.user.server and self.user.server.port:
            self.port = self.user.server.port
            self.db.commit()
        elif (jupyterhub.version_info < (0, 7)
              and not self.user.server.port) or (jupyterhub.version_info >=
                                                 (0, 7) and not self.port):
            self.port = random_port()
            self.db.commit()
        job = yield self.submit_batch_script()

        # We are called with a timeout, and if the timeout expires this function will
        # be interrupted at the next yield, and self.stop() will be called.
        # So this function should not return unless successful, and if unsuccessful
        # should either raise and Exception or loop forever.
        if len(self.job_id) == 0:
            raise RuntimeError(
                "Jupyter batch job submission failure (no jobid in output)")
        while True:
            yield self.poll()
            if self.state_isrunning():
                break
            else:
                if self.state_ispending():
                    self.log.debug('Job ' + self.job_id + ' still pending')
                else:
                    self.log.warn('Job ' + self.job_id +
                                  ' neither pending nor running.\n' +
                                  self.job_status)
                    raise RuntimeError(
                        'The Jupyter batch job has disappeared'
                        ' while pending in the queue or died immediately'
                        ' after starting.')
            yield gen.sleep(self.startup_poll_interval)

        self.current_ip = self.state_gethost()
        if jupyterhub.version_info < (0, 7):
            # store on user for pre-jupyterhub-0.7:
            self.user.server.port = self.port
            self.user.server.ip = self.current_ip
        self.db.commit()
        self.log.info("Notebook server job {0} started at {1}:{2}".format(
            self.job_id, self.current_ip, self.port))

        return self.current_ip, self.port
    def start(self):
        """Start the process"""
        self.user.server.port = random_port()
        cmd = []
        env = self.env.copy()

        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        self.log.debug("Env: %s", str(env))
        self.log.info("Spawning %s", ' '.join(cmd))
        for k in ["JPY_API_TOKEN"]:
            cmd.insert(0, 'export %s="%s";' % (k, env[k]))
        #self.pid, stdin, stdout, stderr = execute(self.channel, ' '.join(cmd))
        
        output = run_jupyterhub_singleuser(' '.join(cmd), self.user.name)
        output = output.decode() # convert bytes object to string
        self.log.debug("Stdout of trying to call run_jupyterhub_singleuser(): %s" % output)
        self.slurm_job_id = output.split(' ')[-1] # the job id should be the very last part of the string

        # make sure jobid is really a number
        try:
            int(self.slurm_job_id)
        except ValueError:
            self.log.info("sbatch returned this at the end of their string: %s" % self.slurm_job_id)

        #time.sleep(2)
        job_state = self._check_slurm_job_state()
        while(True):
            self.log.info("job_state is %s" % job_state)
            if 'RUNNING' in job_state:
                break
            elif 'PENDING' in job_state:
                job_state = self._check_slurm_job_state()
                time.sleep(5)
            else:
                self.log.info("Job %s failed to start!" % self.slurm_job_id)
                return 1 # is this right? Or should I not return, or return a different thing?
        
        notebook_ip = get_slurm_job_info(self.slurm_job_id)

        self.user.server.ip = notebook_ip 
        self.log.info("Notebook server ip is %s" % self.user.server.ip)
Example #24
0
    def start(self):
        """Start the single-user server."""
        self.port = random_port()
        cmd = [APP_PATH]
        env = self.get_env()
        env['SANDSTONE_SETTINGS'] = SANDSTONE_SETTINGS

        args = self.get_args()
        # print(args)
        cmd.extend(args)

        self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
        try:
            self.proc = Popen(
                cmd,
                env=env,
                preexec_fn=self.make_preexec_fn(self.user.name),
                start_new_session=True,  # don't forward signals
            )
        except PermissionError:
            # use which to get abspath
            script = shutil.which(cmd[0]) or cmd[0]
            self.log.error(
                "Permission denied trying to run %r. Does %s have access to this file?",
                script,
                self.user.name,
            )
            raise

        self.pid = self.proc.pid

        if self.__class__ is not LocalProcessSpawner:
            # subclasses may not pass through return value of super().start,
            # relying on deprecated 0.6 way of setting ip, port,
            # so keep a redundant copy here for now.
            # A deprecation warning will be shown if the subclass
            # does not return ip, port.
            if self.ip:
                self.user.server.ip = self.ip
            self.user.server.port = self.port
        return (self.ip or '127.0.0.1', self.port)
Example #25
0
    def start(self):
        """Start the process. Overridden in order to capture output."""
        self.port = random_port()

        env = self.get_env()
        cmd = []
        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
        self.proc = Popen(
            cmd, env=env, preexec_fn=self.make_preexec_fn(self.user.name),
            start_new_session=True,  # don't forward signals
            stdout=PIPE, stderr=STDOUT,
        )
        self.pid = self.proc.pid
        self._read_proc_stderr_thread = thrd = threading.Thread(
            target=self._read_proc_stderr, name='_read_proc_stderr')
        thrd.daemon = True
        thrd.start()
        return (self.ip or '127.0.0.1', self.port)
Example #26
0
    def start(self):
        """Start the process"""
        self.user.server.port = random_port()
        self.user.server.ip = self.server_url
        cmd = []
        env = self.env.copy()

        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        self.log.debug("Env: %s", str(env))
        self.channel = paramiko.SSHClient()
        self.channel.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        self.channel.connect(self.server_url, username=self.server_user, key_filename=self.user_keyfile)
        # self.proc = Popen(cmd, env=env, \
        #    preexec_fn=self.make_preexec_fn(self.user.name),
        #                 )
        self.log.info("Spawning %s", ' '.join(cmd))
        for item in env.items():
            cmd.insert(0, 'export %s="%s";' % item)
        self.pid, stdin, stdout, stderr = execute(self, self.channel, ' '.join(cmd), self.user.name)
        self.log.info("Process PID is %d" % self.pid)
Example #27
0
    def start(self):
        """Start the process"""
        if self.ip:
            self.user.server.ip = self.ip
        self.user.server.port = random_port()
        cmd = []
        env = self.env.copy()

        cmd.extend(self.cmd)
        cmd.extend(self.get_args())

        self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
        kinit = [
            'kinit', '-t',
            '/etc/security/keytabs/%s.jupyter.keytab' % self.user.name,
            '%s@%s' % (self.user.name, REALM)
        ]
        Popen(kinit, preexec_fn=self.make_preexec_fn(self.user.name)).wait()
        self.proc = Popen(
            cmd,
            env=env,
            preexec_fn=self.make_preexec_fn(self.user.name),
        )
        self.pid = self.proc.pid
Example #28
0
    def start(self):
        self.port = random_port()
        self.log.debug('user:% Using port %s to start spawning for user %s',
                       self.user.name, self.port)

        # if a previous attempt to start the service for this user was made and failed,
        # systemd keeps the service around in 'failed' state. This will prevent future
        # services with the same name from being started. While this behavior makes sense
        # (since if it fails & is deleted immediately, we will lose state info), in our
        # case it is ok to reset it and move on when trying to start again.
        try:
            if subprocess.check_output(self.systemctl_cmd +
                                       ['is-failed', self.unit_name]).decode(
                                           'utf-8').strip() == 'failed':
                subprocess.check_output(self.systemctl_cmd +
                                        ['reset-failed', self.unit_name])
                self.log.info('user:%s Unit %s in failed state, resetting',
                              self.user.name, self.unit_name)
        except subprocess.CalledProcessError as e:
            # This is returned when the unit is *not* in failed state. bah!
            pass
        env = self.get_env()

        cmd = self.systemd_run_cmd[:]

        cmd.extend(['--unit', self.unit_name])
        try:
            pwnam = pwd.getpwnam(self.user.name)
        except KeyError:
            self.log.exception('No user named %s found in the system' %
                               self.user.name)
            raise
        cmd.extend(['--uid', str(pwnam.pw_uid), '--gid', str(pwnam.pw_gid)])

        if self.isolate_tmp:
            cmd.extend(['--property=PrivateTmp=yes'])

        if self.isolate_devices:
            cmd.extend(['--property=PrivateDevices=yes'])

        if self.extra_paths:
            env['PATH'] = '{extrapath}:{curpath}'.format(
                curpath=env['PATH'],
                extrapath=':'.join(
                    [self._expand_user_vars(p) for p in self.extra_paths]))

        for key, value in env.items():
            cmd.append('--setenv={key}={value}'.format(key=key, value=value))

        cmd.append('--setenv=SHELL={shell}'.format(shell=self.default_shell))

        if self.mem_limit is not None:
            # FIXME: Detect & use proper properties for v1 vs v2 cgroups
            cmd.extend([
                '--property=MemoryAccounting=yes',
                '--property=MemoryLimit={mem}'.format(mem=self.mem_limit),
            ])

        if self.cpu_limit is not None:
            # FIXME: Detect & use proper properties for v1 vs v2 cgroups
            # FIXME: Make sure that the kernel supports CONFIG_CFS_BANDWIDTH
            #        otherwise this doesn't have any effect.
            cmd.extend([
                '--property=CPUAccounting=yes',
                '--property=CPUQuota={quota}%'.format(
                    quota=int(self.cpu_limit * 100))
            ])

        if self.disable_user_sudo:
            cmd.append('--property=NoNewPrivileges=yes')

        if self.readonly_paths is not None:
            cmd.extend([
                self._expand_user_vars(
                    '--property=ReadOnlyDirectories=-{path}'.format(path=path))
                for path in self.readonly_paths
            ])

        if self.readwrite_paths is not None:
            cmd.extend([
                self._expand_user_vars(
                    '--property=ReadWriteDirectories={path}'.format(path=path))
                for path in self.readwrite_paths
            ])

        # We unfortunately have to resort to doing cd with bash, since WorkingDirectory property
        # of systemd units can't be set for transient units via systemd-run until systemd v227.
        # Centos 7 has systemd 219, and will probably never upgrade - so we need to support them.
        bash_cmd = [
            '/bin/bash', '-c', "cd {wd} && exec {cmd} {args}".format(
                wd=shlex.quote(self._expand_user_vars(self.user_workingdir)),
                cmd=' '.join([
                    shlex.quote(self._expand_user_vars(c)) for c in self.cmd
                ]),
                args=' '.join([shlex.quote(a) for a in self.get_args()]))
        ]
        cmd.extend(bash_cmd)

        self.log.debug('user:%s Running systemd-run with: %s', self.user.name,
                       ' '.join(cmd))
        subprocess.check_output(cmd)

        for i in range(self.start_timeout):
            is_up = yield self.poll()
            if is_up is None:
                return (self.ip or '127.0.0.1', self.port)
            yield gen.sleep(1)

        return None
Example #29
0
    async def start(self):
        self.port = random_port()
        self.log.debug('user:%s Using port %s to start spawning user server',
                       self.user.name, self.port)

        # If there's a unit with this name running already. This means a bug in
        # JupyterHub, a remnant from a previous install or a failed service start
        # from earlier. Regardless, we kill it and start ours in its place.
        # FIXME: Carefully look at this when doing a security sweep.
        if await systemd.service_running(self.unit_name):
            self.log.info(
                'user:%s Unit %s already exists but not known to JupyterHub. Killing',
                self.user.name, self.unit_name)
            await systemd.stop_service(self.unit_name)
            if await systemd.service_running(self.unit_name):
                self.log.error(
                    'user:%s Could not stop already existing unit %s',
                    self.user.name, self.unit_name)
                raise Exception(
                    'Could not stop already existing unit {}'.format(
                        self.unit_name))

        env = self.get_env()

        properties = {}

        if self.dynamic_users:
            properties['DynamicUser'] = '******'
            properties['StateDirectory'] = self._expand_user_vars('{USERNAME}')

            # HOME is not set by default otherwise
            env['HOME'] = self._expand_user_vars('/var/lib/{USERNAME}')
            # Set working directory to $HOME too
            working_dir = env['HOME']
            # Set uid, gid = None so we don't set them
            uid = gid = None
        else:
            try:
                unix_username = self._expand_user_vars(self.username_template)
                pwnam = pwd.getpwnam(unix_username)
            except KeyError:
                self.log.exception(
                    'No user named {} found in the system'.format(
                        unix_username))
                raise
            uid = pwnam.pw_uid
            gid = pwnam.pw_gid
            if self.user_workingdir is None:
                working_dir = pwnam.pw_dir
            else:
                working_dir = self._expand_user_vars(self.user_workingdir)

        if self.isolate_tmp:
            properties['PrivateTmp'] = 'yes'

        if self.isolate_devices:
            properties['PrivateDevices'] = 'yes'

        if self.extra_paths:
            env['PATH'] = '{extrapath}:{curpath}'.format(
                curpath=env['PATH'],
                extrapath=':'.join(
                    [self._expand_user_vars(p) for p in self.extra_paths]))

        env['SHELL'] = self.default_shell

        if self.mem_limit is not None:
            # FIXME: Detect & use proper properties for v1 vs v2 cgroups
            properties['MemoryAccounting'] = 'yes'
            properties['MemoryLimit'] = self.mem_limit

        if self.cpu_limit is not None:
            # FIXME: Detect & use proper properties for v1 vs v2 cgroups
            # FIXME: Make sure that the kernel supports CONFIG_CFS_BANDWIDTH
            #        otherwise this doesn't have any effect.
            properties['CPUAccounting'] = 'yes'
            properties['CPUQuota'] = '{}%'.format(int(self.cpu_limit * 100))

        if self.disable_user_sudo:
            properties['NoNewPrivileges'] = 'yes'

        if self.readonly_paths is not None:
            properties['ReadOnlyDirectories'] = [
                self._expand_user_vars(path) for path in self.readonly_paths
            ]

        if self.readwrite_paths is not None:
            properties['ReadWriteDirectories'] = [
                self._expand_user_vars(path) for path in self.readwrite_paths
            ]

        properties.update(self.unit_extra_properties)

        await systemd.start_transient_service(
            self.unit_name,
            cmd=[self._expand_user_vars(c) for c in self.cmd],
            args=[self._expand_user_vars(a) for a in self.get_args()],
            working_dir=working_dir,
            environment_variables=env,
            properties=properties,
            uid=uid,
            gid=gid)

        for i in range(self.start_timeout):
            is_up = await self.poll()
            if is_up is None:
                return (self.ip or '127.0.0.1', self.port)
            await asyncio.sleep(1)

        return None
Example #30
0
 def _port(self):
     return random_port()
Example #31
0
 def start(self):
     """Start the single-user server."""
     self.port = random_port()
     env = self.get_env()
     api_url = self.hub_api
     arg = {
         "user": self.user.name,
         "port": self.port,
         "log": 'INFO',
         "base_url": 'user/{name}'.format(name=self.user.name),
         "hub_host": '',
         "hub_prefix": '/',
         "hub_api_url":
         api_url,  # http://127.0.0.1/hub/api or with specific Jupyterhub-server address
         "notebook_dir": '~/notebooks',
         "ip": '0.0.0.0'
     }
     self.ssh_client = paramiko.SSHClient()
     self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     self.ssh_client.connect(self.server_url, username=self.user.name)
     # first we define the environment, similar to the LocalProcessSpawner. Then we execute the remote jupyterhub-singleuser-server command
     stdin, stdout, stderr = self.ssh_client.exec_command(
         'export PATH="{PATH}" ; export VIRTUAL_ENV="{VIRTUAL_ENV}" ; export USER="******" ; export JUPYTERHUB_CLIENT_ID="{JUPYTERHUB_CLIENT_ID}" ; export JUPYTERHUB_API_TOKEN="{JUPYTERHUB_API_TOKEN}" ; export JUPYTERHUB_OAUTH_CALLBACK_URL="{JUPYTERHUB_OAUTH_CALLBACK_URL}" ; export SHELL="{SHELL}" ; export HOME="{HOME}" ; export JUPYTERHUB_HOST="{JUPYTERHUB_HOST}" ; export LANG="{LANG}" ; export JPY_API_TOKEN="{JPY_API_TOKEN}" ; mkdir -p {arg_notebook_dir} ; jupyterhub-singleuser --log={arg_log} --user={arg_user} --base-url={arg_base_url} --hub-host={arg_hub_host} --hub-prefix={arg_hub_prefix} --hub-api-url={arg_hub_api_url} --ip={arg_ip} --port={arg_port} --notebook-dir={arg_notebook_dir} &> jupyterhub_singleuser.log & pid=$! ; echo PID=$pid'
         .format(PATH=env['PATH'],
                 VIRTUAL_ENV=env['VIRTUAL_ENV'],
                 USER=env['USER'],
                 JUPYTERHUB_CLIENT_ID=env['JUPYTERHUB_CLIENT_ID'],
                 JUPYTERHUB_API_TOKEN=env['JUPYTERHUB_API_TOKEN'],
                 JUPYTERHUB_OAUTH_CALLBACK_URL=env[
                     'JUPYTERHUB_OAUTH_CALLBACK_URL'],
                 SHELL=env['SHELL'],
                 HOME=env['HOME'],
                 JUPYTERHUB_HOST=env['JUPYTERHUB_HOST'],
                 LANG=env['LANG'],
                 JPY_API_TOKEN=env['JPY_API_TOKEN'],
                 arg_log=arg['log'],
                 arg_user=arg['user'],
                 arg_base_url=arg['base_url'],
                 arg_hub_host=arg['hub_host'],
                 arg_hub_prefix=arg['hub_prefix'],
                 arg_hub_api_url=arg['hub_api_url'],
                 arg_ip=arg['ip'],
                 arg_port=arg['port'],
                 arg_notebook_dir=arg['notebook_dir']))
     self.pid = int(stdout.readline().replace("PID=", ""))
     call([
         "ssh", "-N", "-f",
         "%s@%s" % (self.user.name, self.server_url),
         "-L {port}:localhost:{port}".format(port=self.port)
     ])
     p = Popen([
         "pgrep", "-f", "L {port}:localhost:{port}".format(port=self.port)
     ],
               stdout=subprocess.PIPE,
               stderr=subprocess.PIPE)
     out, err = p.communicate()
     self.tunnelpid = int(out)
     if self.__class__ is not SSHRemoteSpawner:
         # subclasses may not pass through return value of super().start,
         # relying on deprecated 0.6 way of setting ip, port,
         # so keep a redundant copy here for now.
         # A deprecation warning will be shown if the subclass
         # does not return ip, port.
         if self.ip:
             self.user.server.ip = self.ip
         self.user.server.port = self.port
     return (self.ip or '127.0.0.1', self.port)
Example #32
0
    async def start(self):
        with TemporaryDirectory() as td:
            local_resource_path = td
            start_script = os.path.join(
                local_resource_path,
                self.start_notebook_cmd
            )

            user = pwd.getpwnam(self.user.name)
            uid = user.pw_uid
            gid = user.pw_gid
            self.port = random_port()
            host = pipes.quote(self.user_options['host'])
            self.ssh_target = self.ip_for_host(host)
            remote_env = await self.remote_env(host=self.ssh_target)
            opts = self.ssh_opts(
                persist=self.ssh_control_persist_time,
                known_hosts=self.known_hosts
            )

            self.cert_paths = self.stage_certs(
                self.cert_paths,
                local_resource_path
            )

            # Create the start script (part of resources)
            await self.create_start_script(start_script, remote_env=remote_env)

            # Set proper ownership to the user we'll run as
            for f in [local_resource_path] + \
                     [os.path.join(local_resource_path, f)
                      for f in os.listdir(local_resource_path)]:
                shutil.chown(f, user=uid, group=gid)

            # Create remote directory in user's home
            create_dir_proc = self.spawn_as_user(
                "ssh {opts} {host} mkdir -p {path}".format(
                    opts=opts,
                    host=self.ssh_target,
                    path=self.resource_path
                )
            )
            create_dir_proc.expect(pexpect.EOF)

            copy_files_proc = self.spawn_as_user(
                "scp {opts} {files} {host}:{target_dir}/".format(
                    opts=opts,
                    files=' '.join([os.path.join(local_resource_path, f)
                                    for f in os.listdir(local_resource_path)]),
                    cp_dir=local_resource_path,
                    host=self.ssh_target,
                    target_dir=self.resource_path
                )
            )
            i = copy_files_proc.expect([
                ".*No such file or directory",
                "ssh: Could not resolve hostname",
                pexpect.EOF,
            ])

            if i == 0:
                raise IOError("No such file or directory: {}".format(
                    local_resource_path))
            elif i == 1:
                raise HostNotFound(
                    "Could not resolve hostname {}".format(self.ssh_target)
                )
            elif i == 2:
                self.log.info("Copied resources for {user} to {host}".format(
                    user=self.user.name,
                    host=self.ssh_target
                ))

            # Start remote notebook
            start_notebook_child = self.spawn_as_user(
                "ssh {opts} -L {port}:{ip}:{port} {host} {cmd}".format(
                    ip="127.0.0.1",
                    port=self.port,
                    opts=opts,
                    host=self.ssh_target,
                    cmd=os.path.join(self.resource_path,
                                     self.start_notebook_cmd)
                ),
                timeout=None
            )

            self.proc = start_notebook_child.proc
            self.pid = self.proc.pid

            if self.ip:
                self.user.server.ip = self.ip
            self.user.server.port = self.port

            return (self.ip or '127.0.0.1', self.port)
Example #33
0
    async def start(self):
        """Start the single-user server."""
        self.port = random_port()
        cmd = []
        env = self.get_env()
        token = None

        cmd.extend(self.cmd)

        cmd.extend(self.get_args())

        if self.shell_cmd:
            # using shell_cmd (e.g. bash -c),
            # add our cmd list as the last (single) argument:
            cmd = self.shell_cmd + [' '.join(pipes.quote(s) for s in cmd)]

        self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))

        auth_state = await self.user.get_auth_state()
        if auth_state:
            token = pywintypes.HANDLE(auth_state['auth_token'])

        user_env = None
        cwd = None

        try:
            # Will load user variables, if the user profile is loaded
            user_env = win32profile.CreateEnvironmentBlock(token, False)
        except Exception as exc:
            self.log.warning("Failed to load user environment for %s: %s", self.user.name, exc)
        else:
            # Only load user environment if we hold a valid auth token
            if token:
                env.update(user_env)
            if not 'APPDATA' in user_env:
                #If the 'APPDATA' does not exist, the USERPROFILE points at the default
                #directory which is not writable. this changes the path over to public
                #documents, so at least its a writable location.
                user_env['USERPROFILE'] = user_env['PUBLIC']

        # On Posix, the cwd is set to ~ before spawning the singleuser server (preexec_fn).
        # Windows Popen doesn't have preexec_fn support, so we need to set cwd directly.
        if self.notebook_dir:
            cwd = os.getcwd()
        elif env['APPDATA']:
            cwd = user_env['USERPROFILE']
        else:
            # Set CWD to a temp directory, since we failed to load the user profile
            cwd = mkdtemp()

        popen_kwargs = dict(
            token=token,
            cwd=cwd,
        )

        popen_kwargs.update(self.popen_kwargs)
        # don't let user config override env
        popen_kwargs['env'] = env
        try:
            self.proc = PopenAsUser(cmd, **popen_kwargs)
        except PermissionError:
            # use which to get abspath
            script = shutil.which(cmd[0]) or cmd[0]
            self.log.error("Permission denied trying to run %r. Does %s have access to this file?",
                           script, self.user.name,
                          )
            if token:
                token.Detach()
            raise

        self.pid = self.proc.pid
        if token:
            token.Detach()

        if self.__class__ is not LocalProcessSpawner:
            # subclasses may not pass through return value of super().start,
            # relying on deprecated 0.6 way of setting ip, port,
            # so keep a redundant copy here for now.
            # A deprecation warning will be shown if the subclass
            # does not return ip, port.
            if self.ip:
                self.server.ip = self.ip
            self.server.port = self.port
            self.db.commit()

        return (self.ip or '127.0.0.1', self.port)
Example #34
0
    async def start(self):
        """Start the single-user server."""
        import pwd
        user = pwd.getpwnam(self.user.name)
#        uid = user.pw_uid
#        gid = user.pw_gid
        hosthome = user.pw_dir
        conthome = self.conthome.replace("USERNAME", self.user.name)

        self.port = random_port()

        podman_base_cmd = [
                "podman", "run", "-d",
                 # https://www.redhat.com/sysadmin/rootless-podman
                #"--storage-opt", "ignore_chown_errors",
                # "--rm",
                # "-u", "{}:{}".format(uid, gid),
                # "-p", "{hostport}:{port}".format(
                #         hostport=self.port, port=self.standard_jupyter_port
                #         ),
                "--net", "host",
                "-v", "{}:{}".format(hosthome, conthome),
                ]
        if self.startatconthome:
            podman_base_cmd += ["-w", conthome]
        # append flags for the JUPYTER*** environment in the container
        jupyter_env = self.get_env()
        podman_base_cmd_jupyter_env = []
        for k, v in jupyter_env.items():
            podman_base_cmd_jupyter_env.append("--env")
            podman_base_cmd_jupyter_env.append("{k}={v}".format(k=k,v=v))
        podman_base_cmd += podman_base_cmd_jupyter_env

        start_cmd = self.start_cmd
        port_already_set = False
        if "PORT" in self.start_cmd:
            start_cmd = self.start_cmd.replace("PORT", str(self.port))
            port_already_set = True
        jupyter_base_cmd = [self.image, start_cmd]

        if not port_already_set:
            jupyter_base_cmd.append("--NotebookApp.port={}".format(self.port))

        podman_cmd = podman_base_cmd+self.podman_additional_cmds
        jupyter_cmd = jupyter_base_cmd+self.jupyter_additional_cmds

        cmd = shlex.split(" ".join(podman_cmd+jupyter_cmd))

        env = self.user_env({})

        self.log.info("Spawning via Podman command: %s", ' '.join(s for s in cmd))

        # test whether a preexec_fn was set externally or not
        if self.preexec_fn_set == 0:
            preexec_fn = self.make_preexec_fn(self.user.name)
        else:
            preexec_fn = self.preexec_fn
        popen_kwargs = dict(
            preexec_fn=preexec_fn,
            stdout=PIPE, stderr=PIPE,
            start_new_session=True,  # don't forward signals
        )
        popen_kwargs.update(self.popen_kwargs)
        # don't let user config override env
        popen_kwargs['env'] = env

        # https://stackoverflow.com/questions/2502833/store-output-of-subprocess-popen-call-in-a-string

        if self.pull_image_first:
            pull_cmd = ["podman", "pull", self.pull_image]
            pull_proc = Popen(pull_cmd, **popen_kwargs)
            output, err = pull_proc.communicate()
            if pull_proc.returncode == 0:
                pass
            else:
                self.log.error(
                    "PodmanSpawner.start pull error: {}".format(err)
                )
                raise RuntimeError(err)

        proc = Popen(cmd, **popen_kwargs)
        output, err = proc.communicate()
        if proc.returncode == 0:
            self.cid = output[:-2]
        else:
            self.log.error(
                    "PodmanSpawner.start error: {}".format(err)
                    )
            raise RuntimeError(err)
        return ('127.0.0.1', self.port)
 def _port_default(self):
     return random_port()
Example #36
0
    def start(self):
        """Start the process"""
        self.pid = 0

        options = self.user_options
        self.log.debug(f"options: {options}")
        self.server_url = self.node_dict[options['host']]
        self.server_user = self.user.name
        self.log.debug(f"username: {self.server_user}")
        self.log.debug(f"home path: {os.path.expanduser('~')}")
        env = self.get_env()
        singularity_env = []

        for k, v in env.items():
            singularity_env.append(f"SINGULARITYENV_{k}={v}")
        dir_list = []
        if options['dirs'] != '':
            # bind dirs for user
            dir_list.extend([x.strip() for x in options['dirs'].split(';')])
        if self.default_bind_path != '':
            dir_list.extend(
                [x.strip() for x in self.default_bind_path.split(';')])
        if dir_list != []:
            singularity_env.append(f'SINGULARITY_BIND="{",".join(dir_list)}"')
        if jupyterhub.version_info < (0, 7):
            port = random_port()
        else:
            port = self.user.server.port
        gpu_args = ""
        if options['host'] in self.gpu_enabled_list:
            gpu_args = "--nv"
        cmd = [
            *singularity_env, self.singularity_exe_path, "exec", gpu_args,
            self.singularity_container_path
        ]

        cmd.extend(self.cmd)
        jupyterhub_singleuser_args = self.get_args()
        # modify --ip in args to let jupyterhub-singleuser bind public ip
        for arg in jupyterhub_singleuser_args:
            if arg.startswith('--ip='):
                pass
            else:
                cmd.append(arg)
        cmd.append(f"--ip={self.server_url}")
        cmd.extend(['&>', f'/tmp/jupytersingler_{self.server_user}.log'])

        self.log.debug("Env: %s", str(env))
        self.channel = SshConnection()
        self.channel.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        rsa_file = os.path.join(os.path.join(self.home_path, self.server_user),
                                '.ssh/id_rsa')
        self.log.debug(f"use the rsa file for {self.server_user}: {rsa_file}")
        k = paramiko.RSAKey.from_private_key_file(rsa_file)
        self.log.debug(f"home: {self.home_path}")
        self.log.debug(f"connecting ssh tunel server_url={self.server_url} "
                       f"username={self.server_user}")
        try:
            self.channel.connect(self.server_url,
                                 username=self.server_user,
                                 pkey=k)
        except Exception as e:
            self.log.debug(repr(e))
            sys.exit(1)
        self.log.info(f'cmd: {cmd}')
        self.log.info(f"Spawning {' '.join(cmd)}")
        # We use the jupyterhub-singleuser in singularity container, and
        # singularity launch the jupyterhub-singleuser, so the pid what we get
        # is singularity's, which means the ppid of jupyterhub-singleuser.
        # But it's ok to use this ppid to monitor and control the process
        # of jupyterhub-singleuser
        self.pid, stdin, stdout, stderr = execute(self.channel, ' '.join(cmd))
        self.log.info(f"Process PID is {self.pid}")

        if jupyterhub.version_info < (0, 7):
            # store on user for pre-jupyterhub-0.7:
            self.user.server.ip = self.server_url
            self.user.server.port = port
        # jupyterhub 0.7 prefers returning ip, port:
        return (self.server_url, port)