def pull(self, tag=None): """ pull Docker image from registry """ if self.ssh_tunnel_port: if self.registry: local_port = self.registry.port local_host = self.registry.host elif self.image.registry: local_port = self.image.registry.port local_host = self.image.registry.host else: raise ValueError( 'Either local host or local port for SSH tunnel ' 'can not be obtained' ) with contextlib.closing(open(os.devnull, 'w')) as output: with patch(sys, 'stdout', output): # forward sys.stdout to os.devnull to prevent # printing debug messages by fab.remote_tunnel with fab.remote_tunnel( remote_port=self.ssh_tunnel_port, local_port=local_port, local_host=local_host, ): registry = 'localhost:{0}'.format(self.ssh_tunnel_port) self.pull_image(tag=tag, registry=registry) else: self.pull_image(tag=tag, registry=self.registry)
def notebook(): with remote_tunnel(8888): with shell_env(PYTHONHASHSEED='1', PYSPARK_PYTHON='python3', PYSPARK_DRIVER_PYTHON='jupyter', PYSPARK_DRIVER_PYTHON_OPTS="notebook --no-browser"): run('./spark/bin/pyspark --master local[4]')
def remote_tunnel(self): if self.ssh_tunnel_port: if self.registry: local_port = self.registry.port local_host = self.registry.host elif self.image.registry: local_port = self.image.registry.port local_host = self.image.registry.host else: raise ValueError( 'Either local host or local port for SSH tunnel ' 'can not be obtained') with contextlib.closing(open(os.devnull, 'w')) as output: with utils.patch(sys, 'stdout', output): # forward sys.stdout to os.devnull to prevent # printing debug messages by fab.remote_tunnel with fab.remote_tunnel( remote_port=self.host_registry.port, local_port=local_port, local_host=local_host, ): yield else: yield
def pvcluster(remote_dir, paraview_cmd, paraview_args, paraview_port, paraview_remote_port, job_dict, shell_cmd): with show('debug'), \ remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)): with shell_env(PARAVIEW_CMD=paraview_cmd, PARAVIEW_ARGS=paraview_args): run('echo $PARAVIEW_HOME') run('echo $PARAVIEW_ARGS') run('mkdir -p ' + remote_dir) with cd(remote_dir): cmd_line = shell_cmd cmd_line += 'mycluster --create pvserver.job --jobname=pvserver' cmd_line += ' --jobqueue ' + job_dict['job_queue'] cmd_line += ' --ntasks ' + job_dict['job_ntasks'] cmd_line += ' --taskpernode ' + job_dict['job_ntaskpernode'] if 'vizstack' in paraview_args: cmd_line += ' --script mycluster-viz-paraview.bsh' else: cmd_line += ' --script mycluster-paraview.bsh' cmd_line += ' --project ' + job_dict['job_project'] run(cmd_line) run('chmod u+rx pvserver.job') run(shell_cmd+'mycluster --immediate --submit pvserver.job')
def __call__(self): kwargs = {'remote_port': self.remote_port, 'local_port': self.port, 'local_host': self.host} # pylint: disable=not-context-manager with api.remote_tunnel(**{k: v for k, v in kwargs.items() if v is not None}): yield
def pyspark(): # with remote_tunnel(MASTER_WEBUI_PORT): with remote_tunnel(4040): # SparkContext with shell_env(PYTHONHASHSEED='1', PYSPARK_PYTHON='python3', PYSPARK_DRIVER_PYTHON='ipython'): with cd('pfc'): run('~/spark/bin/pyspark --master local[4]')
def pvserver(remote_dir, paraview_cmd, paraview_port, paraview_remote_port): with show('debug'), remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)), cd(remote_dir): # with cd(remote_dir): if not use_multiprocess: run('sleep 2;' + paraview_cmd + '</dev/null &>/dev/null&', pty=False) else: # # run('sleep 2;'+paraview_cmd+'&>/dev/null',pty=False) run('sleep 2;' + paraview_cmd) # , pty=False)
def pvserver(remote_dir, paraview_cmd, paraview_port, paraview_remote_port): with show('debug'), remote_tunnel(int(paraview_remote_port),local_port=int(paraview_port)), cd(remote_dir): # with cd(remote_dir): if not use_multiprocess: run('sleep 2;'+paraview_cmd+'</dev/null &>/dev/null&', pty=False) else: # # run('sleep 2;'+paraview_cmd+'&>/dev/null',pty=False) run('sleep 2;'+paraview_cmd) # , pty=False)
def __call__(self): kwargs = { 'remote_port': self.remote_port, 'local_port': self.port, 'local_host': self.host } # pylint: disable=not-context-manager with api.remote_tunnel( **{k: v for k, v in list(kwargs.items()) if v is not None}): yield
def pull(self, tag=None): """ pull[:tag=None] - pull Docker image from registry """ with fab.remote_tunnel( remote_port=self.registry.port, local_port=self.local_registry.port, local_host=self.local_registry.host, ): fabricio.run('docker pull {image}'.format( image=self.image[self.registry:tag]), )
def remote_tunnel(self): with contextlib.ExitStack() as stack: if self.ssh_tunnel: output = stack.enter_context( contextlib.closing(open(os.devnull, 'w'))) # noqa # forward sys.stdout to os.devnull to prevent # printing debug messages by fab.remote_tunnel stack.enter_context(utils.patch(sys, 'stdout', output)) stack.enter_context( fab.remote_tunnel( remote_bind_address=self.ssh_tunnel.bind_address, remote_port=self.ssh_tunnel.port, local_host=self.ssh_tunnel.host, local_port=self.ssh_tunnel.host_port, )) yield
def remote_tunnel(self): devnull = open(os.devnull, 'w') stack = self.ssh_tunnel and [ contextlib.closing(devnull), # forward sys.stdout to os.devnull to prevent # printing debug messages by fab.remote_tunnel utils.patch(sys, 'stdout', devnull), fab.remote_tunnel( remote_bind_address=self.ssh_tunnel.bind_address, remote_port=self.ssh_tunnel.port, local_host=self.ssh_tunnel.host, local_port=self.ssh_tunnel.host_port, ), ] or [] with nested(*stack): yield
def pull(self, tag=None): """ pull Docker image from registry """ if self.tunnel_required: with contextlib.closing(open(os.devnull, 'w')) as output: with utils.patch(sys, 'stdout', output): # forward sys.stdout to os.devnull to prevent # printing debug messages by fab.remote_tunnel with fab.remote_tunnel( remote_port=self.registry.port, local_port=self.local_registry.port, local_host=self.local_registry.host, ): _DockerTasks.pull(self, tag=tag) else: _DockerTasks.pull(self, tag=tag)
def pvcluster(remote_dir, paraview_home, paraview_args, paraview_port, paraview_remote_port, job_dict): with show('debug'), remote_tunnel(int(paraview_remote_port), local_port=int(paraview_port)): with shell_env(PARAVIEW_HOME=paraview_home, PARAVIEW_ARGS=paraview_args): run('echo $PARAVIEW_HOME') run('echo $PARAVIEW_ARGS') run('mkdir -p ' + remote_dir) with cd(remote_dir): cmd_line = 'mycluster --create pvserver.job --jobname=pvserver' cmd_line += ' --jobqueue ' + job_dict['job_queue'] cmd_line += ' --ntasks ' + job_dict['job_ntasks'] cmd_line += ' --taskpernode ' + job_dict['job_ntaskpernode'] if 'vizstack' in paraview_args: cmd_line += ' --script mycluster-viz-paraview.bsh' else: cmd_line += ' --script mycluster-paraview.bsh' cmd_line += ' --project ' + job_dict['job_project'] run(cmd_line) run('chmod u+rx pvserver.job') run('mycluster --immediate --submit pvserver.job')
def port_test(rport, lport): # Run a test with hide('everything'), remote_tunnel(int(rport), local_port=int(lport)): run('cd')