def _do_submit(self, context): if not self.options.access_token: delegator = Delegator(context) delegator.delegate( timedelta(minutes=self.options.proxy_lifetime), delegate_when_lifetime_lt=timedelta(minutes=self.options.delegate_when_lifetime_lt) ) submitter = Submitter(context) job_id = submitter.submit( transfers=self.transfers, params=self.params ) if self.options.json: self.logger.info(json.dumps(job_id)) else: self.logger.info("Job successfully submitted.") self.logger.info("Job id: %s" % job_id) if job_id and self.options.blocking: inquirer = Inquirer(context) job = inquirer.get_job_status(job_id) while job['job_state'] in ['SUBMITTED', 'READY', 'STAGING', 'ACTIVE', 'ARCHIVING', 'QOS_TRANSITION', 'QOS_REQUEST_SUBMITTED']: self.logger.info("Job in state %s" % job['job_state']) time.sleep(self.options.poll_interval) job = inquirer.get_job_status(job_id) self.logger.info("Job finished with state %s" % job['job_state']) if job['reason']: self.logger.info("Reason: %s" % job['reason']) return job_id
def _do_dry_run(self, context): submitter = Submitter(context) print submitter.build_submission( transfers=self.transfers, params=self.params ) return None
def _do_submit(self, context): delegator = Delegator(context) delegator.delegate(timedelta(minutes=self.options.proxy_lifetime)) submitter = Submitter(context) job_id = submitter.submit(delete=self._build_delete(), spacetoken=self.options.spacetoken, job_metadata=_metadata( self.options.job_metadata), retry=self.options.retry, credential=self.options.cloud_cred) if self.options.json: self.logger.info(json.dumps(job_id)) else: self.logger.info("Job successfully submitted.") self.logger.info("Job id: %s" % job_id) if job_id and self.options.blocking: inquirer = Inquirer(context) job = inquirer.get_job_status(job_id) while job['job_state'] in [ 'SUBMITTED', 'READY', 'STAGING', 'ACTIVE', 'DELETE' ]: self.logger.info("Job in state %s" % job['job_state']) time.sleep(self.options.poll_interval) job = inquirer.get_job_status(job_id) self.logger.info("Job finished with state %s" % job['job_state']) if job['reason']: self.logger.info("Reason: %s" % job['reason']) return job_id
def __call__(self): context = Context(self.options.endpoint) submitter = Submitter(context) job = submitter.cancel(self.jobId) self.logger.info(job['job_state'])
def submit(context, job, delegation_lifetime=timedelta(hours=7), force_delegation=False, delegate_when_lifetime_lt=timedelta(hours=2)): """ Submits a job Args: context: fts3.rest.client.context.Context instance job: Dictionary representing the job delegation_lifetime: Delegation lifetime force_delegation: Force delegation even if there is a valid proxy delegate_when_lifetime_lt: If the remaining lifetime on the delegated proxy is less than this interval, do a new delegation Returns: The job id """ delegate(context, delegation_lifetime, force_delegation, delegate_when_lifetime_lt) submitter = Submitter(context) params = job.get('params', {}) return submitter.submit(transfers=job.get('files', None), delete=job.get('delete', None), staging=job.get('staging', None), **params)
def _do_dry_run(self, context): submitter = Submitter(context) print submitter.build_submission(delete=self._build_delete(), spacetoken=self.options.spacetoken, job_metadata=_metadata( self.options.job_metadata), retry=self.options.retry, credential=self.options.cloud_cred) return None
def cancel_all(context, vo_name=None): """ Cancel all jobs within a given VO or FTS3 (needs enough privileges) Args: context: fts3.rest.client.context.Context instance vo_name: The VO name, or None to cancell all jobs Returns: None """ submitter = Submitter(context) return submitter.cancel_all(vo_name)
def cancel(context, job_id, file_ids=None): """ Cancels a job Args: context: fts3.rest.client.context.Context instance job_id: The job to cancel Returns: The terminal state in which the job has been left. Note that it may not be CANCELED if the job finished already! """ submitter = Submitter(context) return submitter.cancel(job_id, file_ids)
def _doSubmit(self): verify_checksum = None if self.options.compare_checksum: verify_checksum = True delegator = Delegator(self.context) delegationId = delegator.delegate( timedelta(minutes=self.options.proxy_lifetime)) submitter = Submitter(self.context) jobId = submitter.submit(self.source, self.destination, checksum=self.checksum, bring_online=self.options.bring_online, verify_checksum=verify_checksum, spacetoken=self.options.destination_token, source_spacetoken=self.options.source_token, fail_nearline=self.options.fail_nearline, file_metadata=self.options.file_metadata, filesize=self.options.file_size, gridftp=self.options.gridftp_params, job_metadata=self.options.job_metadata, overwrite=self.options.overwrite, copy_pin_lifetime=self.options.pin_lifetime, reuse=self.options.reuse) if self.options.json: self.logger.info(jobId) else: self.logger.info("Job successfully submitted.") self.logger.info("Job id: %s" % jobId) if jobId and self.options.blocking: inquirer = Inquirer(self.context) while True: time.sleep(self.options.poll_interval) job = inquirer.getJobStatus(jobId) if job['job_state'] not in [ 'SUBMITTED', 'READY', 'STAGING', 'ACTIVE' ]: break self.logger.info("Job in state %s" % job['job_state']) self.logger.info("Job finished with state %s" % job['job_state']) if job['reason']: self.logger.info("Reason: %s" % job['reason']) return jobId
def run(self): if ':' in self.args[0]: job_id, file_ids = self.args[0].split(':') file_ids = file_ids.split(',') else: job_id = self.args[0] file_ids = None context = self._create_context() submitter = Submitter(context) result = submitter.cancel(job_id, file_ids) if file_ids: if isinstance(result, basestring): self.logger.info(result) else: self.logger.info('\n'.join(result)) else: self.logger.info(result['job_state'])
def _do_dry_run(self, context): #Backwards compatibility: compare_checksum parameter if self.options.compare_checksum: checksum_mode = 'both' else: if self.checksum: checksum_mode = 'target' else: checksum = 'none' #Compare checksum has major priority than checksum_mode if not self.options.compare_checksum: if len(self.options.checksum_mode) > 0: checksum_mode = self.options.checksum_mode else: checksum_mode = 'none' if not self.checksum: self.checksum = DEFAULT_CHECKSUM submitter = Submitter(context) print submitter.build_submission( self._build_transfers(), checksum=self.checksum, bring_online=self.options.bring_online, timeout=self.options.timeout, verify_checksum=checksum_mode, spacetoken=self.options.destination_token, source_spacetoken=self.options.source_token, fail_nearline=self.options.fail_nearline, file_metadata=_metadata(self.options.file_metadata), filesize=self.options.file_size, gridftp=self.options.gridftp_params, job_metadata=_metadata(self.options.job_metadata), overwrite=self.options.overwrite, copy_pin_lifetime=self.options.pin_lifetime, reuse=self.options.reuse, retry=self.options.retry, multihop=self.options.multihop, credential=self.options.cloud_cred, nostreams=self.options.nostreams, ipv4=self.options.ipv4, ipv6=self.options.ipv6) return None
def _doDryRun(self): verify_checksum = None if self.options.compare_checksum: verify_checksum = True submitter = Submitter(self.context) print submitter.buildSubmission( self.source, self.destination, checksum=self.checksum, bring_online=self.options.bring_online, verify_checksum=verify_checksum, spacetoken=self.options.destination_token, source_spacetoken=self.options.source_token, fail_nearline=self.options.fail_nearline, file_metadata=self.options.file_metadata, filesize=self.options.file_size, gridftp=self.options.gridftp_params, job_metadata=self.options.job_metadata, overwrite=self.options.overwrite, copy_pin_lifetime=self.options.pin_lifetime, reuse=self.options.reuse) return None
def _do_submit(self, context): #Backwards compatibility: compare_checksum parameter if self.options.compare_checksum: checksum_mode = 'both' else: if self.checksum: checksum_mode = 'target' else: checksum = 'none' #Compare checksum has major priority than checksum_mode if not self.options.compare_checksum: if len(self.options.checksum_mode) > 0: checksum_mode = self.options.checksum_mode else: checksum_mode = 'none' if not self.checksum: self.checksum = DEFAULT_CHECKSUM delegator = Delegator(context) delegator.delegate(timedelta(minutes=self.options.proxy_lifetime), delegate_when_lifetime_lt=timedelta( minutes=self.options.delegate_when_lifetime_lt)) submitter = Submitter(context) job_id = submitter.submit( self._build_transfers(), checksum=self.checksum, bring_online=self.options.bring_online, timeout=self.options.timeout, verify_checksum=checksum_mode[0], spacetoken=self.options.destination_token, source_spacetoken=self.options.source_token, fail_nearline=self.options.fail_nearline, file_metadata=_metadata(self.options.file_metadata), filesize=self.options.file_size, gridftp=self.options.gridftp_params, job_metadata=_metadata(self.options.job_metadata), overwrite=self.options.overwrite, copy_pin_lifetime=self.options.pin_lifetime, reuse=self.options.reuse, retry=self.options.retry, multihop=self.options.multihop, credential=self.options.cloud_cred, nostreams=self.options.nostreams, ipv4=self.options.ipv4, ipv6=self.options.ipv6) if self.options.json: self.logger.info(json.dumps(job_id)) else: self.logger.info("Job successfully submitted.") self.logger.info("Job id: %s" % job_id) if job_id and self.options.blocking: inquirer = Inquirer(context) job = inquirer.get_job_status(job_id) while job['job_state'] in [ 'SUBMITTED', 'READY', 'STAGING', 'ACTIVE' ]: self.logger.info("Job in state %s" % job['job_state']) time.sleep(self.options.poll_interval) job = inquirer.get_job_status(job_id) self.logger.info("Job finished with state %s" % job['job_state']) if job['reason']: self.logger.info("Reason: %s" % job['reason']) return job_id