def main(): try: # Your ssh identity on the remote machine. ctx = rs.Context("ssh") # Change e.g., if you have a differnent username on the remote machine #ctx.user_id = "your_ssh_username" session = rs.Session() session.add_context(ctx) # open home directory on a remote machine remote_dir = rs.filesystem.Directory('sftp://localhost/', session=session) # copy .bash_history to /tmp/ on the local machine remote_dir.copy('/etc/hosts', 'file://localhost/tmp/') # list 'h*' in local /tmp/ directory local_dir = rs.filesystem.Directory('file://localhost/tmp/') for entry in local_dir.list(pattern='h*'): print(entry) return 0 except rs.SagaException as ex: # Catch all saga exceptions print("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
def main(): try: for i in range(0, 1000): print "**************************** Job: %d *****************************" % i ctx = saga.Context("ssh") ctx.user_id = USER_ID session = saga.Session() session.add_context(ctx) # Create a job service object that represent a remote pbs cluster. # The keyword 'pbs' in the url scheme triggers the PBS adaptors # and '+ssh' enables PBS remote access via SSH. js = saga.job.Service("%s" % REMOTE_HOST, session=session) # describe our job jd = saga.job.Description() # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. #jd.environment = {'MYOUTPUT':'"Hello from SAGA"'} #jd.environment = {'MYOUTPUT':'"Hello from SAGA"'} jd.executable = '/bin/sleep' jd.queue = 'normal' jd.project = 'TG-MCB090174' jd.wall_time_limit = '10' jd.total_cpu_count = 1 #jd.number_of_processes = 1 jd.arguments = ['10'] jd.output = "/tmp/saga_job.%s.stdout" % USER_ID jd.error = "/tmp/saga_job.%s.stderr" % USER_ID # Create a new job from the job description. The initial state of # the job is 'New'. jobs = [] for i in range(0, 20): j = js.create_job(jd) # Now we can start our job. j.run() jobs.append(j) print "Job %3d : %s [%s]" % (i, j.id, j.state) for j in jobs: j.cancel() print "Job : %s [%s]" % (j.id, j.state) # js.close () return 0 except saga.SagaException, ex: # Catch all saga exceptions print "An exception occured: (%s) %s " % (ex.type, (str(ex))) # Trace back the exception. That can be helpful for debugging. print " \n*** Backtrace:\n %s" % ex.traceback return -1
def main(): try: ctx = rs.Context("x509") ctx.user_proxy = '/Users/mark/proj/myproxy/xsede.x509' session = rs.Session() session.add_context(ctx) # open home directory on a remote machine #remote_dir = rs.filesystem.Directory('sftp://hotel.futuregrid.org/opt/', #remote_dir = rs.filesystem.Directory('go://netbook/', session=session) #remote_dir = rs.filesystem.Directory('go://marksant#netbook/~/tmp/go') remote_dir = rs.filesystem.Directory('go://xsede#stampede/~/tmp/go/') for entry in remote_dir.list(): if remote_dir.is_dir(entry): print("d %12s %s" % (remote_dir.get_size(entry), entry)) elif remote_dir.is_link(entry): print("l %12s %s" % (remote_dir.get_size(entry), entry)) elif remote_dir.is_file(entry): print("- %12s %s" % (remote_dir.get_size(entry), entry)) else: print('Other taste ....: %s' % entry) return 0 except rs.SagaException as ex: # Catch all saga exceptions print("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
def process_dependencies(dependencies, machine_parameters): i = 0 for x in dependencies: ctx = rs.Context("ssh") ctx.user_id = "vshah505" session = rs.Session() session.add_context(ctx) flag = 1 for files in x: if flag == 1: service = machine_parameters[i][1] service = service[(service.find('/') + 2):] flag = 0 else: # print("file://localhost" + os.getcwd() + "/" + files) # print("file://"+service+machine_parameters[i][2]) f = rs.filesystem.File("file://localhost" + os.getcwd() + "/" + files, session=session) f.copy("sftp://" + service + machine_parameters[i][2] + files) print "Successful copy" i += 1
def main(): try: # Your ssh identity on the remote machine. ctx = rs.Context("ssh") #ctx.user_id = getpass.getuser() # Change if necessary session = rs.Session() session.add_context(ctx) # open home directory on a remote machine remote_dir = rs.filesystem.Directory( 'sftp://stampede.tacc.xsede.org/tmp/', session=session) for entry in remote_dir.list(): if remote_dir.is_dir(entry): print "d %12s %s" % (remote_dir.get_size(entry), entry) else: print "- %12s %s" % (remote_dir.get_size(entry), entry) return 0 except rs.SagaException, ex: # Catch all saga exceptions print "An exception occured: (%s) %s " % (ex.type, (str(ex))) # Trace back the exception. That can be helpful for debugging. print " \n*** Backtrace:\n %s" % ex.traceback return -1
def main(): try: # Your ssh identity on the remote machine. Change if necessary. ctx = rs.Context("ssh") #ctx.user_id = "oweidner" #ctx.user_key = "/Users/oweidner/.ssh/sagaproj_rsa" session = rs.Session() session.add_context(ctx) # Create a job service object that represent the local machine. # The keyword 'fork://' in the url scheme triggers the 'shell' adaptor # which can execute jobs on the local machine as well as on a remote # machine via "ssh://hostname". You can use 'localhost' or replace # it with the name/address of a machien you have ssh access to. js = rs.job.Service("ssh://localhost", session=session) # describe our job jd = rs.job.Description() # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd.environment = {'MYOUTPUT': '"Hello from SAGA"'} jd.executable = '/bin/echo' jd.arguments = ['$MYOUTPUT'] jd.output = "mysagajob.stdout" jd.error = "mysagajob.stderr" # Create a new job from the job description. The initial state of # the job is 'New'. myjob = js.create_job(jd) # Check our job's id and state print("Job ID : %s" % (myjob.id)) print("Job State : %s" % (myjob.state)) print("\n...starting job...\n") # Now we can start our job. myjob.run() print("Job ID : %s" % (myjob.id)) print("Job State : %s" % (myjob.state)) print("\n...waiting for job...\n") # wait for the job to either finish or fail myjob.wait() print("Job State : %s" % (myjob.state)) print("Exitcode : %s" % (myjob.exit_code)) return 0 except rs.SagaException as ex: # Catch all saga exceptions print("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
def _get_session(): global session if session is None: ctx = saga.Context("ssh") ctx.user_id = USER session = saga.Session(default=False) session.add_context(ctx) return session
def context(self): """ Configure SSH saga context properties """ if self._context is None: self._context = saga.Context('UserPass') self._context.user_id = self.user_id self._context.user_pass = self.password self._context.remote_port = self.port self._context.life_time = 0 return self._context
def __init__(self, **kwargs): self.REMOTE_HOST = kwargs.get('REMOTE_HOST', None) self.ADDRESS = kwargs.get('ADDRESS', None) # Address of your server self.USER = kwargs.get('USER', 'vinay') # Username self.PASSWORD = kwargs.get('PASSWORD', '') # That's amazing I got the same combination on my luggage! self.WORKING_DIR = kwargs.get('WORKING_DIR', None) # Your working directory self.mysql_url = kwargs.get('mysql_url', None) # hive database string self.ctx = saga.Context("ssh") self.ctx.user_id = self.USER if self.PASSWORD: self.ctx.user_pass = self.PASSWORD self.session = saga.Session() self.session.add_context(self.ctx)
def session(cfg): s = rs.Session() t = cfg.get('context_tye') if t: c = rs.Context(t) c.context_user_proxy = cfg.get('context_user_proxy') c.context_user_id = cfg.get('context_user_id') c.context_user_pass = cfg.get('context_user_pass') s.add_context(c) return s
def start_pilot(cr=None): """ In order to start a pilot on the newly created CR, we need to define a resource description for that CR. To do so, we programatically create a clone of the local.localhost description, and replace the job submission URL with an ssh:// URL pointing to the CR. """ if not cr: class _CR(object): def __init__(self): self.access = 'ssh://remote.host.net:1234/' cr = _CR() # get the local resource config session = rp.Session() cfg = session.get_resource_config('local.localhost') # create a new config based on the local one, and add it back new_cfg = rp.ResourceConfig('ec2.vm', cfg) new_cfg.schemas = ['ssh'] new_cfg['ssh']['job_manager_endpoint'] = cr.access new_cfg['ssh']['filesystem_endpoint'] = cr.access # the new config needs to make sure we can bootstrap on the VM new_cfg['pre_bootstrap_0'] = [ 'sudo apt-get update', 'sudo apt-get install -y python-virtualenv python-dev dnsutils bc' ] session.add_resource_config(new_cfg) # use the *same* ssh key for ssh access to the VM ssh_ctx = rs.Context('SSH') ssh_ctx.user_id = 'admin' ssh_ctx.user_key = os.environ['EC2_KEYPAIR'] session.contexts.append(ssh_ctx) # submit a pilot to it. pd = rp.ComputePilotDescription() pd.resource = 'ec2.vm' pd.runtime = 10 pd.cores = 1 pd.exit_on_error = True, pmgr = rp.PilotManager(session=session) return pmgr.submit_pilots(pd)
def main(): try: c = rs.Context('ssh') c.user_id = 'tg12736' c.user_cert = '/home/user/ssh/id_rsa_xsede' # private key derived from cert s = rs.Session(default=False) # create session with no contexts s.add_context(c) js = rs.job.Service('ssh://login1.stampede.tacc.utexas.edu', session=s) js.run_job("/bin/true") except rs.SagaException as ex: # Catch all saga exceptions print("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
def main(): try: ctx = rs.Context("x509") ctx.user_proxy = '/Users/mark/proj/myproxy/xsede.x509' session = rs.Session() session.add_context(ctx) source = "go://marksant#netbook/Users/mark/tmp/go/" #destination = "go://xsede#stampede/~/tmp/" #destination = "go://gridftp.stampede.tacc.xsede.org/~/tmp/" destination = "go://oasis-dm.sdsc.xsede.org/~/tmp/" #destination = "go://ncsa#BlueWaters/~/tmp/" filename = "my_file" # open home directory on a remote machine source_dir = rs.filesystem.Directory(source) # copy .bash_history to /tmp/ on the local machine source_dir.copy(filename, destination) # list 'm*' in local /tmp/ directory dest_dir = rs.filesystem.Directory(destination) for entry in dest_dir.list(pattern='%s*' % filename[0]): print(entry) dest_file = rs.filesystem.File(os.path.join(destination, filename)) assert dest_file.is_file() == True assert dest_file.is_link() == False assert dest_file.is_dir() == False print('Size: %d' % dest_file.get_size()) return 0 except rs.SagaException as ex: # Catch all saga exceptions print("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
def __init__(self, **args): self.REMOTE_HOST = args.get('remote_host', "noah-login-01") self.ADDRESS = args.get('address', '10.7.95.60') # Address of your server self.USER = args.get('user', 'vinay') # Username self.PASSWORD = args.get( 'pass', '') # That's amazing I got the same combination on my luggage! self.WORKING_DIR = args.get( 'pwd', '/homes/ensprod/vinay_test/covid') # Your working directory self.mysql_url = args.get('mysql_url', None) # hive database string self.ctx = saga.Context("ssh") self.ctx.user_id = self.USER self.session = saga.Session() self.session.add_context(self.ctx) # hive instance self.hive_session = sessionmaker() self.engine = create_engine(self.mysql_url, pool_recycle=3600, echo=False) self.hive_session.configure(bind=self.engine)
def __init__(self, cfg_file): # initialize configuration. We only use the 'rs.tests' category from # the config file. rut.TestConfig.__init__(self, cfg_file, 'radical.saga.tests') # setup a saga session for the tests # don't populate session with default contexts... self.session = rs.Session(default=False) # attempt to create a context from the test config if self.context_type: c = rs.Context(self.context_type) c.user_id = self.context_user_id c.user_pass = self.context_user_pass c.user_cert = self.context_user_cert c.user_proxy = self.context_user_proxy # add it to the session self.session.add_context(c)
def main(): try: # Your ssh identity on the remote machine ctx = rs.Context("ssh") ctx.user_id = "your_username" session = rs.Session() session.add_context(ctx) # Create a job service object that represent a remote loadleveler # cluster. The keyword 'loadl' in the url scheme triggers the # LoadLeveler adaptors and '+ssh' enables LoadLeveler remote access # via SSH. and 'cluster' URL query specify loadleveler cluster name. # "llq -X cluster" js = rs.job.Service("loadl+ssh://%s?cluster=%s" % \ (CLUSTER_HOST, CLUSTER_NAME), session=session) # describe our job jd = rs.job.Description() # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd.environment = {'MYOUTPUT':'"Hello LoadLevler Adaptor from SAGA"'} jd.executable = '/bin/echo' jd.arguments = ['$MYOUTPUT'] jd.output = "/tmp/mysagajob.stdout" jd.error = "/tmp/mysagajob.stderr" # Create a new job from the job description. The initial state of # the job is 'New'. myjob = js.create_job(jd) # Check our job's id and state print "Job ID : %s" % (myjob.id) print "Job State : %s" % (myjob.state) print "\n...starting job...\n" # Now we can start our job. myjob.run() print "Job ID : %s" % (myjob.id) print "Job State : %s" % (myjob.state) print "\n...waiting for job...\n" # wait for the job to either finish or fail myjob.wait() print "Job ID : %s" % (myjob.id) print "Job State : %s" % (myjob.state) print "Exitcode : %s" % (myjob.exit_code) js.close() return 0 except rs.SagaException, ex: # Catch all saga exceptions print "An exception occurred: (%s) %s " % (ex.type, (str(ex))) # Get the whole traceback in case of an exception - # this can be helpful for debugging the problem print " \n*** Backtrace:\n %s" % ex.traceback return -1
REMOTE_FILE_ENDPOINT = "sftp://" + REMOTE_HOST + "/" + REMOTE_DIR # the dimension (in pixel) of the whole fractal imgx = 2048 imgy = 2048 # the number of tiles in X and Y direction tilesx = 2 tilesy = 2 #----------------------------------------------------------------------------- # if __name__ == "__main__": try: # Your ssh identity on the remote machine ctx = rs.Context("ssh") #ctx.user_id = "" session = rs.Session() session.add_context(ctx) # list that holds the jobs jobs = [] # create a working directory in /scratch dirname = '%s/mbrot/' % (REMOTE_FILE_ENDPOINT) workdir = rs.filesystem.Directory(dirname, rs.filesystem.CREATE, session=session) # copy the executable and warpper script to the remote host
# if not 'EC2_URL' in os.environ: usage("no %s in environment" % 'EC2_URL') if not 'EC2_ACCESS_KEY' in os.environ: usage("no %s in environment" % 'EC2_ACCESS_KEY') if not 'EC2_SECRET_KEY' in os.environ: usage("no %s in environment" % 'EC2_SECRET_KEY') if not 'EC2_KEYPAIR_ID' in os.environ: usage("no %s in environment" % 'EC2_KEYPAIR_ID') if not 'EC2_KEYPAIR' in os.environ: usage("no %s in environment" % 'EC2_KEYPAIR') server = rs.Url(os.environ['EC2_URL']) # in order to connect to EC2, we need an EC2 ID and KEY c1 = rs.Context('ec2') c1.user_id = os.environ['EC2_ACCESS_KEY'] c1.user_key = os.environ['EC2_SECRET_KEY'] c1.server = server # in order to access a created VM, we additionally need to point to the ssh # key which is used for EC2 VM contextualization, i.e. as EC2 'keypair'. # If the keypair is not yet registered on EC2, it will be registered by SAGA # -- but then a user_key *must* be specified (only the public key is ever # transfererd to EC2). c2 = rs.Context('ec2_keypair') c2.token = os.environ['EC2_KEYPAIR_ID'] c2.user_cert = os.environ['EC2_KEYPAIR'] c2.user_id = 'ubuntu' # the user id on the target VM c2.server = server
def main(): try: # Your ssh identity on the remote machine ctx = rs.Context("ssh") ctx.user_id = "oweidner" session = rs.Session() # session.add_context(ctx) # Create a job service object that represent the local machine. # The keyword 'fork://' in the url scheme triggers the 'shell' adaptor # which can execute jobs on the local machine as well as on a remote # machine via "ssh://hostname". js = rs.job.Service("ssh://%s" % REMOTE_HOST, session=session) # describe our job jd = rs.job.Description() # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd.environment = {'MYOUTPUT': '"Hello from SAGA"'} jd.executable = '/bin/echo' jd.arguments = ['$MYOUTPUT'] jd.output = "/tmp/mysagajob-%s.stdout" % getpass.getuser() jd.error = "/tmp/mysagajob-%s.stderr" % getpass.getuser() # Create a new job from the job description. The initial state of # the job is 'New'. myjob = js.create_job(jd) # Check our job's id and state print("Job ID : %s" % (myjob.id)) print("Job State : %s" % (myjob.state)) print("\n...starting job...\n") # Now we can start our job. myjob.run() print("Job ID : %s" % (myjob.id)) print("Job State : %s" % (myjob.state)) print("\n...waiting for job...\n") # wait for the job to either finish or fail myjob.wait() print("Job State : %s" % (myjob.state)) print("Exitcode : %s" % (myjob.exit_code)) outfilesource = 'sftp://%s/tmp/mysagajob-%s.stdout' % ( REMOTE_HOST, getpass.getuser()) outfiletarget = "file://%s/" % os.getcwd() out = rs.filesystem.File(outfilesource, session=session) out.copy(outfiletarget) print("Staged out %s to %s (size: %s bytes)" % (outfilesource, outfiletarget, out.get_size())) return 0 except rs.SagaException as ex: # Catch all saga exceptions print("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
def main(): try: # Your ssh identity on the remote machine ctx = saga.Context("ssh") ctx.user_id = "your_username" session = saga.Session() session.add_context(ctx) # Create a job service object that represent a remote loadleveler cluster. # The keyword 'loadl' in the url scheme triggers the LoadLeveler adaptors # and '+ssh' enables LoadLeveler remote access via SSH. # and 'cluster' URL query specify loadleveler cluster name. "llq -X cluster" js = saga.job.Service("loadl+ssh://%s?cluster=your_cluster_name" % REMOTE_HOST, session=session) # describe our job jd = saga.job.Description() # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd.environment = {'MYOUTPUT': '"Hello LoadLevler Adaptor from SAGA"'} jd.executable = '/bin/echo' jd.arguments = ['$MYOUTPUT'] jd.output = "/tmp/mysagajob.stdout" jd.error = "/tmp/mysagajob.stderr" # Create a new job from the job description. The initial state of # the job is 'New'. myjob = js.create_job(jd) # Check our job's id and state print "Job ID : %s" % (myjob.id) print "Job State : %s" % (myjob.state) print "\n...starting job...\n" # Now we can start our job. myjob.run() print "Job ID : %s" % (myjob.id) print "Job State : %s" % (myjob.state) print "\n...waiting for job...\n" # wait for the job to either finish or fail myjob.wait() print "Job ID : %s" % (myjob.id) print "Job State : %s" % (myjob.state) print "\n...waiting for job...\n" # wait for the job to either finish or fail myjob.wait() print "Job State : %s" % (myjob.state) print "Exitcode : %s" % (myjob.exit_code) outfilesource = 'sftp://%s/tmp/mysagajob.stdout' % REMOTE_HOST outfiletarget = 'file://localhost/tmp/' out = saga.filesystem.File(outfilesource, session=session) out.copy(outfiletarget) print "Staged out %s to %s (size: %s bytes)\n" % ( outfilesource, outfiletarget, out.get_size()) return 0 except saga.SagaException, ex: # Catch all saga exceptions print "An exception occured: (%s) %s " % (ex.type, (str(ex))) # Trace back the exception. That can be helpful for debugging. print " \n*** Backtrace:\n %s" % ex.traceback return -1
def main(): try: # Your ssh identity on the remote machine. ctx = rs.Context("ssh") # Change e.g., if you have a differnent username on the remote machine #ctx.user_id = "your_ssh_username" session = rs.Session() session.add_context(ctx) # Create a job service object that represent a remote pbs cluster. # The keyword 'pbs' in the url scheme triggers the PBS adaptors # and '+ssh' enables PBS remote access via SSH. js = rs.job.Service("pbspro://localhost/", session=session) # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd = rs.job.Description() jd.wall_time_limit = 1 # minutes jd.executable = '/bin/data' #jd.total_cpu_count = 12 # for lonestar this has to be a multiple of 12 #jd.spmd_variation = '12way' # translates to the qsub -pe flag #jd.queue = "batch" jd.project = "e291" jd.output = "examplejob.out" jd.error = "examplejob.err" # Create a new job from the job description. The initial state of # the job is 'New'. job = js.create_job(jd) # Register our callback. We want it to 'fire' on job state change job.add_callback(rs.STATE, job_state_change_cb) # Check our job's id and state print "Job ID : %s" % (job.id) print "Job State : %s" % (job.state) # Now we can start our job. print "\n...starting job...\n" job.run() print "Job ID : %s" % (job.id) # List all jobs that are known by the adaptor. # This should show our job as well. print "\nListing active jobs: " for jid in js.list(): print " * %s" % jid # wait for our job to complete print "\n...waiting for job...\n" job.wait() print "Job State : %s" % (job.state) print "Exitcode : %s" % (job.exit_code) print "Exec. hosts : %s" % (job.execution_hosts) print "Create time : %s" % (job.created) print "Start time : %s" % (job.started) print "End time : %s" % (job.finished) js.close() return 0 except rs.SagaException, ex: # Catch all saga exceptions print "An exception occured: (%s) %s " % (ex.type, (str(ex))) # Get the whole traceback in case of an exception - # this can be helpful for debugging the problem print " \n*** Backtrace:\n %s" % ex.traceback return -1
def start_cr(): """ We use SAGA to start a VM (called Compute Resource (cr) in this context) """ # In order to connect to EC2, we need an EC2 ID and KEY. We read those # from the environment. ec2_ctx = rs.Context('EC2') ec2_ctx.user_id = os.environ['EC2_ACCESS_KEY'] ec2_ctx.user_key = os.environ['EC2_SECRET_KEY'] # The SSH keypair we want to use the access the EC2 VM. If the keypair is # not yet registered on EC2 saga will register it automatically. This # context specifies the key for VM startup, ie. the VM will be configured to # accept this key ec2keypair_ctx = rs.Context('EC2_KEYPAIR') ec2keypair_ctx.token = os.environ['EC2_KEYPAIR_ID'] ec2keypair_ctx.user_key = os.environ['EC2_KEYPAIR'] ec2keypair_ctx.user_id = 'admin' # the user id on the target VM # We specify the *same* ssh key for ssh access to the VM. That now should # work if the VM go configured correctly per the 'EC2_KEYPAIR' context # above. ssh_ctx = rs.Context('SSH') ssh_ctx.user_id = 'admin' ssh_ctx.user_key = os.environ['EC2_KEYPAIR'] session = rs.Session(False) # FALSE: don't use other (default) contexts session.contexts.append(ec2_ctx) session.contexts.append(ec2keypair_ctx) session.contexts.append(ssh_ctx) cr = None # compute resource handle rid = None # compute resource ID try: # ---------------------------------------------------------------------- # # reconnect to VM (ID given in ARGV[1]) # if len(sys.argv) > 1: rid = sys.argv[1] # reconnect to the given resource print('reconnecting to %s' % rid) cr = rs.resource.Compute(id=rid, session=session) print('reconnected to %s' % rid) print(" state : %s (%s)" % (cr.state, cr.state_detail)) # ---------------------------------------------------------------------- # # start a new VM # else: # start a VM if needed # in our session, connect to the EC2 resource manager rm = rs.resource.Manager("ec2://aws.amazon.com/", session=session) # Create a resource description with an image and an OS template,. # We pick a small VM and a plain Ubuntu image... cd = rs.resource.ComputeDescription() cd.image = 'ami-e6eeaa8e' # plain debain wheezy cd.template = 'Small Instance' # Create a VM instance from that description. cr = rm.acquire(cd) print("\nWaiting for VM to become active...") # ---------------------------------------------------------------------- # # use the VM # # Wait for the VM to 'boot up', i.e., become 'ACTIVE' cr.wait(rs.resource.ACTIVE) # Query some information about the newly created VM print("Created VM: %s" % cr.id) print(" state : %s (%s)" % (cr.state, cr.state_detail)) print(" access : %s" % cr.access) # give the VM some time to start up comlpetely, otherwise the subsequent # job submission might end up failing... time.sleep(60) return cr except Exception as e: # Catch all other exceptions print("An Exception occured: %s " % e) raise
def main(): tmp_dir = None try: tmp_dir = tempfile.mkdtemp(prefix='saga-test-', suffix='-%s' % TEST_NAME, dir=os.path.expanduser('~/tmp')) print('tmpdir: %s' % tmp_dir) ctx = saga.Context("x509") ctx.user_proxy = '/Users/mark/proj/myproxy/xsede.x509' session = saga.Session() session.add_context(ctx) source_url = saga.Url() source_url.schema = 'go' source_url.host = SOURCE source_url.path = tmp_dir target_url = saga.Url() target_url.schema = 'go' target_url.host = TARGET target_url.path = os.path.join('~/saga-tests/', os.path.basename(tmp_dir)) print("Point to local Directory through GO ...") d = saga.filesystem.Directory(source_url) print("And check ...") assert d.is_dir() == True assert d.is_file() == False assert d.is_link() == False d.close() print("Point to remote Directory through GO ...") d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS) print("And check ...") assert d.is_dir() == True assert d.is_file() == False assert d.is_link() == False d.close() print("Point to local file through GO, before creation ...") caught = False try: saga.filesystem.File(os.path.join(str(source_url), FILE_A_level_0)) except saga.DoesNotExist: caught = True assert caught == True print("Create actual file ...") touch(tmp_dir, FILE_A_level_0) print("Try again ...") f = saga.filesystem.File(os.path.join(str(source_url), FILE_A_level_0)) assert f.is_file() == True assert f.is_dir() == False assert f.is_link() == False f.close() print("Copy local file to remote, using different filename ...") d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS) d.copy(os.path.join(str(source_url), FILE_A_level_0), FILE_A_level_0 + COPIED_SUFFIX) d.close() f = saga.filesystem.File( os.path.join(str(target_url), FILE_A_level_0 + COPIED_SUFFIX)) assert f.is_file() == True assert f.is_dir() == False assert f.is_link() == False f.close() print("Copy local file to remote, keeping filename in tact ...") d = saga.filesystem.Directory(target_url, flags=saga.filesystem.CREATE_PARENTS) d.copy(os.path.join(str(source_url), FILE_A_level_0), FILE_A_level_0) d.close() f = saga.filesystem.File(os.path.join(str(target_url), FILE_A_level_0)) assert f.is_file() == True assert f.is_dir() == False assert f.is_link() == False f.close() print('Create file in level 1 ...') tree = LEVEL_1 os.mkdir(os.path.join(tmp_dir, tree)) touch(os.path.join(tmp_dir, tree), FILE_A_level_1) print("Test local file ...") f = saga.filesystem.File( os.path.join(str(source_url), tree, FILE_A_level_1)) assert f.is_file() == True assert f.is_dir() == False assert f.is_link() == False f.close() print("Copy local file to remote, keeping filename in tact ...") d = saga.filesystem.Directory(os.path.join(str(target_url), tree), flags=saga.filesystem.CREATE_PARENTS) d.copy(os.path.join(str(source_url), tree, FILE_A_level_1), FILE_A_level_1) d.close() print("Test file after transfer ...") f = saga.filesystem.File( os.path.join(str(target_url), tree, FILE_A_level_1)) assert f.is_file() == True assert f.is_dir() == False assert f.is_link() == False f.close() print( "Copy non-existent local file to remote, keeping filename in tact ..." ) d = saga.filesystem.Directory(str(target_url), flags=saga.filesystem.CREATE_PARENTS) try: d.copy(os.path.join(str(source_url), NON_EXISTING_FILE), NON_EXISTING_FILE) except saga.DoesNotExist: caught = True assert caught == True print("Test file after (non-)transfer ...") caught = False try: saga.filesystem.File( os.path.join(str(target_url), NON_EXISTING_FILE)) except saga.DoesNotExist: caught = True assert caught == True # destination = "go://gridftp.stampede.tacc.xsede.org/~/tmp/" # #destination = "go://oasis-dm.sdsc.xsede.org/~/tmp/" # #destination = "go://ncsa#BlueWaters/~/tmp/" # #destination = "go://marksant#netbook/Users/mark/tmp/go/" # src_filename = "my_file" # dst_filename = "my_file_" # rt_filename = "my_file__" # # # open home directory on a remote machine # source_dir = saga.filesystem.Directory(source) # # # copy .bash_history to /tmp/ on the local machine # source_dir.copy(src_filename, os.path.join(destination, dst_filename)) # # # list 'm*' in local /tmp/ directory # dest_dir = saga.filesystem.Directory(destination) # for entry in dest_dir.list(pattern='%s*' % src_filename[0]): # print entry # # dest_file = saga.filesystem.File(os.path.join(destination, dst_filename)) # assert dest_file.is_file() == True # assert dest_file.is_link() == False # assert dest_file.is_dir() == False # print 'Size: %d' % dest_file.get_size() # # dest_file.copy(source) # # dest_file.copy(os.path.join(source+'broken', rt_filename)) print("Before return 0") return 0 except saga.SagaException as ex: # Catch all saga exceptions print("An exception occurred: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. print(" \n*** Backtrace:\n %s" % ex.traceback) print("before return -1") return -1 finally: print("and finally ...") if CLEANUP and tmp_dir: shutil.rmtree(tmp_dir)
def main(): try: # Your ssh identity on the remote machine. ctx = rs.Context("ssh") # Change e.g., if you have a differnent username on the remote machine #ctx.user_id = "your_ssh_username" session = rs.Session() session.add_context(ctx) # Create a job service object that represent a remote pbs cluster. # The keyword 'pbs' in the url scheme triggers the SGE adaptors # and '+ssh' enables SGE remote access via SSH. js = rs.job.Service("sge+ssh://login1.ls4.tacc.utexas.edu", session=session) # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd = rs.job.Description() jd.environment = {'FILENAME': 'testfile'} jd.wall_time_limit = 1 # minutes jd.executable = '/bin/touch' jd.arguments = ['$FILENAME'] jd.total_cpu_count = 12 # for lonestar this has to be a multiple of 12 jd.spmd_variation = '12way' # translates to the qsub -pe flag #jd.total_physical_memory = 1024 # Memory requirements in Megabyte jd.queue = "development" jd.project = "TG-SEE100004" jd.working_directory = "$SCRATCH/A/B/C" jd.output = "examplejob.out" jd.error = "examplejob.err" # Create a new job from the job description. The initial state of # the job is 'New'. touchjob = js.create_job(jd) # Check our job's id and state print("Job ID : %s" % (touchjob.id)) print("Job State : %s" % (touchjob.state)) # Now we can start our job. print("\n...starting job...\n") touchjob.run() print("Job ID : %s" % (touchjob.id)) print("Job State : %s" % (touchjob.state)) # List all jobs that are known by the adaptor. # This should show our job as well. print("\nListing active jobs: ") for job in js.list(): print(" * %s" % job) # wait for our job to complete print("\n...waiting for job...\n") touchjob.wait() print("Job State : %s" % (touchjob.state)) print("Exitcode : %s" % (touchjob.exit_code)) print("Exec. hosts : %s" % (touchjob.execution_hosts)) print("Create time : %s" % (touchjob.created)) print("Start time : %s" % (touchjob.started)) print("End time : %s" % (touchjob.finished)) js.close() return 0 except rs.SagaException as ex: # Catch all saga exceptions print("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Get the whole traceback in case of an exception - # this can be helpful for debugging the problem print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
#!/usr/bin/env python import radical.saga as rs class MyContextA(rs.Context): def __init__(self, ctype): self._apitype = 'rs.Context' super(MyContextA, self).__init__(ctype) class MyContextB(rs.Context): def __init__(self, ctype): self._apitype = 'rs.Context' rs.Context.__init__(self, ctype) cs = rs.Context('ssh') print "saga: %s" % cs ca = MyContextA('ssh') print "mc a: %s" % ca cb = MyContextB('ssh') print "mc b: %s" % cb
def main(): # In order to connect to EC2, we need an EC2 ID and KEY. We read those # from the environment. ec2_ctx = rs.Context('EC2') ec2_ctx.user_id = os.environ['EC2_ACCESS_KEY'] ec2_ctx.user_key = os.environ['EC2_SECRET_KEY'] # The SSH keypair we want to use the access the EC2 VM. If the keypair is # not yet registered on EC2 saga will register it automatically. This # context specifies the key for VM startup, ie. the VM will be configured to # accept this key ec2keypair_ctx = rs.Context('EC2_KEYPAIR') ec2keypair_ctx.token = os.environ['EC2_KEYPAIR_ID'] ec2keypair_ctx.user_key = os.environ['EC2_KEYPAIR'] ec2keypair_ctx.user_id = 'root' # the user id on the target VM # We specify the *same* ssh key for ssh access to the VM. That now should # work if the VM go configured correctly per the 'EC2_KEYPAIR' context # above. ssh_ctx = rs.Context('SSH') ssh_ctx.user_id = 'root' ssh_ctx.user_key = os.environ['EC2_KEYPAIR'] session = rs.Session(False) # FALSE: don't use other (default) contexts session.contexts.append(ec2_ctx) session.contexts.append(ec2keypair_ctx) session.contexts.append(ssh_ctx) cr = None # compute resource handle rid = None # compute resource ID try: # ---------------------------------------------------------------------- # # reconnect to VM (ID given in ARGV[1]) # if len(sys.argv) > 1: rid = sys.argv[1] # reconnect to the given resource print 'reconnecting to %s' % rid cr = rs.resource.Compute(id=rid, session=session) print 'reconnected to %s' % rid print " state : %s (%s)" % (cr.state, cr.state_detail) # ---------------------------------------------------------------------- # # start a new VM # else: # start a VM if needed # in our session, connect to the EC2 resource manager rm = rs.resource.Manager("ec2://aws.amazon.com/", session=session) # Create a resource description with an image and an OS template,. # We pick a small VM and a plain Ubuntu image... cd = rs.resource.ComputeDescription() cd.image = 'ami-0256b16b' # plain ubuntu cd.template = 'Small Instance' # Create a VM instance from that description. cr = rm.acquire(cd) rid = cr.id print "\nWaiting for VM to become active..." # ---------------------------------------------------------------------- # # use the VM # # Wait for the VM to 'boot up', i.e., become 'ACTIVE' cr.wait(rs.resource.ACTIVE) # Query some information about the newly created VM print "Created VM: %s" % cr.id print " state : %s (%s)" % (cr.state, cr.state_detail) print " access : %s" % cr.access # give the VM some time to start up comlpetely, otherwise the subsequent # job submission might end up failing... time.sleep(60) # create a job service which uses the VM's access URL (cr.access) js = rs.job.Service(cr.access, session=session) jd = rs.job.Description() jd.executable = '/bin/sleep' jd.arguments = ['30'] job = js.create_job(jd) job.run() print "\nRunning Job: %s" % job.id print " state : %s" % job.state job.wait() print " state : %s" % job.state except rs.SagaException, ex: # Catch all saga exceptions print "An exception occured: (%s) %s " % (ex.type, (str(ex))) raise
def main(): try: # Your ssh identity on the remote machine. ctx = rs.Context("ssh") # Change e.g., if you have a differnent username on the remote machine #ctx.user_id = "your_ssh_username" session = rs.Session() session.add_context(ctx) # Create a job service object that represent a remote pbs cluster. # The keyword 'pbs' in the url scheme triggers the PBS adaptors # and '+ssh' enables PBS remote access via SSH. js = rs.job.Service("lsf://localhost", session=session) # Next, we describe the job we want to run. A complete set of job # description attributes can be found in the API documentation. jd = rs.job.Description() jd.environment = {'FILENAME': 'testfile'} jd.wall_time_limit = 1 # minutes jd.executable = '/bin/touch' jd.arguments = ['$FILENAME'] jd.total_cpu_count = 42 jd.total_cpu_count = 4 jd.queue = "batch" jd.project = "GEO111" jd.working_directory = "$HOME/A/B/C" jd.output = "examplejob.out" jd.error = "examplejob.err" # Create a new job from the job description. The initial state of # the job is 'New'. touchjob = js.create_job(jd) # Register our callback. We want it to 'fire' on job state change touchjob.add_callback(rs.STATE, job_state_change_cb) # Check our job's id and state print("Job ID : %s" % (touchjob.id)) print("Job State : %s" % (touchjob.state)) # Now we can start our job. print("\n...starting job...\n") touchjob.run() print("Job ID : %s" % (touchjob.id)) # List all jobs that are known by the adaptor. # This should show our job as well. #print "\nListing active jobs: " #for job in js.list(): # print " * %s" % job # wait for our job to complete print("\n...waiting for job...\n") touchjob.wait() print("Job State : %s" % (touchjob.state)) print("Exitcode : %s" % (touchjob.exit_code)) print("Exec. hosts : %s" % (touchjob.execution_hosts)) print("Create time : %s" % (touchjob.created)) print("Start time : %s" % (touchjob.started)) print("End time : %s" % (touchjob.finished)) js.close() return 0 except rs.SagaException as ex: # Catch all saga exceptions print("An exception occurred: (%s) %s " % (ex.type, (str(ex)))) # Get the whole traceback in case of an exception - # this can be helpful for debugging the problem print(" \n*** Backtrace:\n %s" % ex.traceback) return -1
import radical.saga as saga c = saga.Context('ssh') c.user_id = 'dinesh' s = saga.Session() s.add_context(c) js = saga.job.Service("lsf+ssh://yellowstone.ucar.edu", session=s)