def _cancel_jobs(dn): """ Cancel all jobs that belong to dn. Returns the list of affected jobs ids. """ jobs = Session.query(Job.job_id).filter(Job.job_state.in_(JobActiveStates)).filter(Job.user_dn == dn) job_ids = map(lambda j: j[0], jobs) try: now = datetime.utcnow() for job_id in job_ids: Session.query(File).filter(File.job_id == job_id).filter(File.file_state.in_(FileActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'User banned', 'job_finished': now, 'finish_time': now }, synchronize_session=False) Session.query(Job).filter(Job.job_id == job_id)\ .update({ 'job_state': 'CANCELED', 'reason': 'User banned', 'job_finished': now, 'finish_time': now }, synchronize_session=False) Session.commit() Session.expire_all() return job_ids except Exception: Session.rollback() raise
def _cancel_jobs(dn): """ Cancel all jobs that belong to dn. Returns the list of affected jobs ids. """ jobs = Session.query(Job.job_id).filter(Job.job_state.in_(JobActiveStates), Job.user_dn == dn, Job.job_finished == None) job_ids = map(lambda j: j[0], jobs) try: now = datetime.utcnow() for job_id in job_ids: Session.query(File).filter(File.job_id == job_id).filter(File.file_state.in_(FileActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'User banned', 'finish_time': now }, synchronize_session=False) Session.query(Job).filter(Job.job_id == job_id)\ .update({ 'job_state': 'CANCELED', 'reason': 'User banned', 'job_finished': now }, synchronize_session=False) Session.commit() Session.expire_all() return job_ids except Exception: Session.rollback() raise
def cancel_all_by_vo(self, vo_name): """ Cancel all files by the given vo_name """ user = request.environ['fts3.User.Credentials'] now = datetime.utcnow() if not user.is_root: raise HTTPForbidden( 'User does not have root privileges' ) try: # FTS3 daemon expects finish_time to be NULL in order to trigger the signal # to fts_url_copy file_count = Session.query(File).filter(File.vo_name == vo_name)\ .filter(File.file_state.in_(FileActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'Job canceled by the user', 'dest_surl_uuid':None, 'finish_time': None }, synchronize_session=False) # However, for data management operations there is nothing to signal, so # set job_finished dm_count = Session.query(DataManagement).filter(DataManagement.vo_name == vo_name)\ .filter(DataManagement.file_state.in_(DataManagementActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'Job canceled by the user', 'job_finished': now, 'finish_time': now }, synchronize_session=False) job_count = Session.query(Job).filter(Job.vo_name == vo_name)\ .filter(Job.job_state.in_(JobActiveStates))\ .update({ 'job_state': 'CANCELED', 'reason': 'Job canceled by the user', 'job_finished': now }, synchronize_session=False) Session.commit() Session.expire_all() log.info("Active jobs for VO %s canceled" % vo_name) except: Session.rollback() raise return { "affected_files": file_count, "affected_dm": dm_count, "affected_jobs": job_count }
def _set_to_wait(storage, vo_name): """ Updates the transfers that have the given storage either in source or destination, and belong to the given VO. """ try: job_ids = _set_to_wait_helper(storage, vo_name, 'SUBMITTED', 'ON_HOLD') job_ids.update( _set_to_wait_helper(storage, vo_name, 'STAGING', 'ON_HOLD_STAGING')) Session.commit() Session.expire_all() except Exception: Session.rollback() raise return job_ids
def _set_to_wait(storage=None, vo_name=None, timeout=0): """ Updates the transfers that have the given storage either in source or destination, and belong to the given VO. Returns the list of affected jobs ids. """ job_ids = Session.query(distinct(File.job_id))\ .filter((File.source_se == storage) | (File.dest_se == storage)).filter(File.file_state.in_(FileActiveStates)) if vo_name: job_ids = job_ids.filter(File.vo_name == vo_name) job_ids = map(lambda j: j[0], job_ids.all()) try: for job_id in job_ids: Session.query(File).filter(File.job_id == job_id).filter(File.file_state.in_(FileActiveStates))\ .update({'wait_timestamp': datetime.utcnow(), 'wait_timeout': timeout}, synchronize_session=False) Session.commit() Session.expire_all() return job_ids except Exception: Session.rollback() raise
# set job_finished Session.query(DataManagement).filter(DataManagement.job_id == job.job_id)\ .filter(DataManagement.file_state.in_(DataManagementActiveStates))\ .update({ 'file_state': 'CANCELED', 'reason': 'Job canceled by the user', 'job_finished': now, 'finish_time': now }, synchronize_session=False) job = Session.merge(job) log.info("Job %s canceled" % job.job_id) setattr(job, 'http_status', "200 Ok") setattr(job, 'http_message', None) responses.append(job) Session.expunge(job) Session.commit() Session.expire_all() except: Session.rollback() raise return _multistatus(responses, start_response, expecting_multistatus=len(requested_job_ids) > 1) @doc.response(207, 'For multiple job requests if there has been any error') @doc.response(403, 'The user doesn\'t have enough privileges') @doc.response(404, 'The job doesn\'t exist') @jsonify def modify(self, job_id_list, start_response): """ Modify a job, or set of jobs
def _cancel_transfers(storage=None, vo_name=None): """ Cancels the transfers that have the given storage either in source or destination, and belong to the given VO. Returns the list of affected jobs ids. """ affected_job_ids = set() files = Session.query(File.file_id).filter( and_((File.source_se == storage) | (File.dest_se == storage), File.file_state.in_(FileActiveStates + ['NOT_USED']))) if vo_name and vo_name != '*': files = files.filter(File.vo_name == vo_name) now = datetime.utcnow() try: for row in files: file_id = row[0] job_id, file_index = Session.query( File.job_id, File.file_index).filter(File.file_id == file_id).one() affected_job_ids.add(job_id) # Cancel the affected file Session.query(File).filter(File.file_id == file_id)\ .update({ 'file_state': 'CANCELED', 'reason': 'Storage banned', 'finish_time': now, 'dest_surl_uuid': None }, synchronize_session=False) # If there are alternatives, enable them if Session.bind.dialect.name == 'mysql': limit = " LIMIT 1" else: limit = '' Session.execute( "UPDATE t_file SET" " file_state = 'SUBMITTED' " "WHERE" " job_id = :job_id AND file_index = :file_index AND file_state = 'NOT_USED' " + limit, dict(job_id=job_id, file_index=file_index)) Session.commit() Session.expire_all() except Exception: Session.rollback() raise # Set each job terminal state if needed try: for job_id in affected_job_ids: n_files = Session.query(func.count(distinct( File.file_id))).filter(File.job_id == job_id).all()[0][0] n_canceled = Session.query(func.count(distinct(File.file_id)))\ .filter(File.job_id == job_id).filter(File.file_state == 'CANCELED').all()[0][0] n_finished = Session.query(func.count(distinct(File.file_id)))\ .filter(File.job_id == job_id).filter(File.file_state == 'FINISHED').all()[0][0] n_failed = Session.query(func.count(distinct(File.file_id)))\ .filter(File.job_id == job_id).filter(File.file_state == 'FAILED').all()[0][0] n_terminal = n_canceled + n_finished + n_failed # Job finished! if n_terminal == n_files: reason = None Session.query(Job).filter(Job.job_id == job_id).update({ 'job_state': 'CANCELED', 'job_finished': now, 'reason': reason }) Session.commit() except Exception: Session.rollback() raise return affected_job_ids