def test_copy_progress_handler(): c = client.CopyProcess() c.add_job( source=bigfile, target=bigcopy, force=True ) c.prepare() h = TestProgressHandler() c.run(handler=h)
def run(self): """ Run method """ self.retries += 1 proc = client.CopyProcess() for job in self.lst_jobs: # If file is 0-size then we do a normal copy, otherwise we enforce # a TPC transfer tpc_flag = "none" if (int(job[2]) != 0): tpc_flag = "only" # TODO: use the parallel mode starting with XRootD 4.1 proc.add_job(job[0].encode("utf-8"), job[1].encode("utf-8"), force=True, thirdparty=tpc_flag, tpctimeout=3600) self.xrd_status = proc.prepare() if self.xrd_status.ok: self.xrd_status, __ = proc.run()
def upload_archive(self): """ Upload archive file to EOS directory. Note that we save it the the name .archive.purge since this is the only possible operation when we do such a reconstruct. """ cp = client.CopyProcess() dst = ''.join( [str(self.dst_url), ".archive.purge.done?eos.ruid=0&eos.rgid=0"]) cp.add_job(self.farchive.name, dst, force=True) status = cp.prepare() if not status.ok: msg = "Failed while preparing to upload archive file to EOS" raise EosAccessException(msg) status = cp.run() if not status.ok: msg = "Failed while copying the archive file to EOS" raise EosAccessException(msg) else: # Delete local archive file try: os.remove(self.farchive.name) except OSError as __: pass
def copy(self, source, destination): # Prepare the source path for XRootD if not self._parse_url(source): source = abspath(source) else: domain, dirname, filename = self._parse_url(source) source = f"{domain}/{dirname}/{filename}" # Prepare the destination path for XRootD assert os.path.basename(source) == os.path.basename(destination) if self._parse_url(destination): domain, dirname, filename = self._parse_url(destination) destination = f"{domain}/{dirname}/{filename}" self.makedirs(domain, dirname) else: destination = abspath(destination) if not os.path.isdir(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) # Perform the copy operation process = client.CopyProcess() process.add_job(source, destination, force=True) process.prepare() status, returns = process.run() if not status.ok or not returns[0]["status"].ok: raise XRootDFileException( "Error copying from " + source + " to " + destination, repr(status), repr(returns), )
def run(self): """ Run method """ self.retries += 1 proc = client.CopyProcess() for job in self.lst_jobs: # TODO: use the parallel mode starting with XRootD 4.1 proc.add_job(job[0].encode("utf-8"), job[1].encode("utf-8"), force=True, thirdparty="only") self.xrd_status = proc.prepare() if self.xrd_status.ok: self.xrd_status, __ = proc.run()
def file_downloader(self): """Download single file with XRootD.""" try: display_message( msg_type="note", msg="File: ./{}/{}".format( self.path, self.file_name, ), ) process = xrootdclient.CopyProcess() process.add_job(SERVER_ROOT_URI + self.file_src, os.getcwd() + os.sep + self.file_dest) process.prepare() process.run() except Exception: display_message(msg_type="error", msg="Download error occured. Please try again.")
def test_copy_bigfile(): f = client.File() s, r = f.open(bigfile) assert s.ok size1 = f.stat(force=True)[1].size f.close() c = client.CopyProcess() c.add_job( source=bigfile, target=bigcopy, force=True ) s = c.prepare() assert s.ok s, __ = c.run() assert s.ok f = client.File() s, r = f.open(bigcopy, OpenFlags.READ) size2 = f.stat()[1].size assert size1 == size2 f.close()
def test_copy_smallfile(): f = client.File() s, r = f.open(smallfile, OpenFlags.DELETE ) assert s.ok f.write(smallbuffer) size1 = f.stat(force=True)[1].size f.close() c = client.CopyProcess() c.add_job( source=smallfile, target=smallcopy, force=True ) s = c.prepare() assert s.ok s, __ = c.run() assert s.ok f = client.File() s, r = f.open(smallcopy, OpenFlags.READ) size2 = f.stat()[1].size assert size1 == size2 f.close()
def copy(self): print('Copying', len(self._jobs), 'files') replicas = sorted(self._jobs, key=lambda replica: replica.lfn) replicas_remaining = replicas[:] ret_val = {} for i in range(3): if not replicas_remaining: break process = client.CopyProcess() for replica in replicas_remaining: process.add_job(str(replica.pfn), str(self._jobs[replica])) status = process.prepare() assert status.ok, status status, results = process.run(CopyProgressHandler(len(self._jobs))) for i, (replica, result) in list(enumerate(zip(replicas_remaining, results)))[::-1]: if result['status'].ok: ret_val[replica] = result['status'].ok del replicas_remaining[i] elif result['status'].errno == 3006 and 'File exists' in result[ 'status'].message: print('Encountered error when copying "' + self._jobs[replica] + '", deleting for retry...') self._jobs[replica].rm() else: print('Unhanded error from XRootD', result) if replicas_remaining: raise IOError('Failed to copy some replcias ' + repr(replicas_remaining) + ' ' + repr(status)) return ret_val
class MyCopyProgressHandler(client.utils.CopyProgressHandler): def begin(self, jobId, total, source, target): print 'id: %d, total: %d' % (jobId, total) print 'source: %s' % source print 'target: %s' % target def end(self, jobId, result): print 'end status:', jobId, result def update(self, jobId, processed, total): print 'jobId: %d, processed: %d, total: %d' % (jobId, processed, total) def should_cancel(jobId): return False process = client.CopyProcess() # From local to local process.add_job('/tmp/spam', '/tmp/spam1') # From local to remote process.add_job('/tmp/spam', 'root://localhost//tmp/spam2') # From remote to local process.add_job('root://localhost//tmp/spam', '/tmp/spam3') # From remote to remote process.add_job('root://localhost//tmp/spam', 'root://localhost//tmp/spam4') handler = MyCopyProgressHandler() process.prepare() process.run(handler)
def test_copy_noprep(): c = client.CopyProcess() c.add_job( source=bigfile, target=bigcopy, force=True ) s, __ = c.run() assert s.ok
def test_copy_nojobs(): c = client.CopyProcess() s = c.prepare() assert s.ok s, __ = c.run() assert s.ok
def flush_files(self, wait_all): """ Flush all pending transfers from the list of jobs. Args: wait_all (bool): If true wait and collect the status from all executing threads. Returns: True if files flushed successfully, otherwise false. """ status = True # Wait until a thread from the pool gets freed if we reached the maximum # allowed number of running threads while len(self.threads) >= self.config.MAX_THREADS: for indx, thread in enumerate(self.threads): thread.join(self.config.JOIN_TIMEOUT) # If thread finished get the status and mark it for removal if not thread.isAlive(): status = status and thread.xrd_status.ok log_level = logging.INFO if thread.xrd_status.ok else logging.ERROR self.logger.log( log_level, ("Thread={0} status={1} msg={2}" "").format(thread.ident, thread.xrd_status.ok, thread.xrd_status.message.decode("utf-8"))) del self.threads[indx] break # If we still have jobs and previous archive jobs were successful or this # is a backup operartion (best-effort even if we have failed transfers) if (self.list_jobs and ((self.oper != self.config.BACKUP_OP and status) or (self.oper == self.config.BACKUP_OP))): proc = client.CopyProcess() for job in self.list_jobs: # TODO: use the parallel mode starting with XRootD 4.1 proc.add_job(job[0].encode("utf-8"), job[1].encode("utf-8"), force=self.do_retry, thirdparty=True) del self.list_jobs[:] thread = ThreadJob(proc) thread.start() self.threads.append(thread) # If a previous archive job failed or we need to wait for all jobs to # finish then join the threads and collect their status if (self.oper != self.config.BACKUP_OP and not status) or wait_all: for thread in self.threads: thread.join() status = status and thread.xrd_status.ok log_level = logging.INFO if thread.xrd_status.ok else logging.ERROR self.logger.log( log_level, ("Thread={0} status={1} msg={2}" "").format(thread.ident, thread.xrd_status.ok, thread.xrd_status.message.decode("utf-8"))) del self.threads[:] return status