def process(self, file): destURI = file.id uri_info = urisplit(destURI) cmd = 'uberftp %s "rm %s"' % (uri_info[1], uri_info[2]) rc, output, m = self.cacheObj.__cmd_retry_loop__(shell, cmd, self.cacheObj.max_try) if rc != 0: self.cacheObj.logger.error(output) return False else: self.__appendResult__(file.id, file) return True
def process(self, file): srcURI = file.id fname = os.path.basename(urisplit(srcURI)[2]) destURI = "file:%s/%s" % (dest_dir, fname) # cmd = 'uberftp %s %s' % (srcURI, destURI) cmd = "globus-url-copy %s %s" % (srcURI, destURI) rc, output, m = self.cacheObj.__cmd_retry_loop__(shell, cmd, self.cacheObj.max_try) if rc != 0: self.cacheObj.logger.error(output) return False else: self.__appendResult__(file.id, file) return True
def process(self, file): destURI = file.id uri_info = urisplit(destURI) cmd = 'uberftp %s "rm %s"' % (uri_info[1], uri_info[2]) rc, output, m = self.cacheObj.__cmd_retry_loop__( shell, cmd, self.cacheObj.max_try) if rc != 0: self.cacheObj.logger.error(output) return False else: self.__appendResult__(file.id, file) return True
def process(self, file): srcURI = file.id fname = os.path.basename(urisplit(srcURI)[2]) destURI = 'file:%s/%s' % (dest_dir, fname) #cmd = 'uberftp %s %s' % (srcURI, destURI) cmd = 'globus-url-copy %s %s' % (srcURI, destURI) rc, output, m = self.cacheObj.__cmd_retry_loop__( shell, cmd, self.cacheObj.max_try) if rc != 0: self.cacheObj.logger.error(output) return False else: self.__appendResult__(file.id, file) return True
def impl_upload(self, files=[], opts=""): """ Uploads multiple files to a remote gridftp server. """ shell = getShell() # making the directory on remove storage at destURI dirname = self.__get_unique_fname__() # creating subdirectory dir_ok = False destURI = "%s/%s" % (self.baseURI, dirname) uri_info = urisplit(destURI) cmd = 'uberftp %s "cd %s"' % (uri_info[1], uri_info[2]) rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1) if rc != 0: for l in output.split("\n"): l.strip() if re.match(r"^550.*", l): # the directory is not found (error code 550), try to creat # the lowest level one cmd = 'uberftp %s "mkdir %s"' % (uri_info[1], uri_info[2]) rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1) if rc != 0: self.logger.error(output) else: dir_ok = True break else: self.logger.debug("parent directory already available: %s" % destURI) dir_ok = True if not dir_ok: self.logger.error("parent directory not available: %s" % destURI) return [] # the algorithm of uploading one file class MyAlgorithm(Algorithm): def __init__(self, cacheObj): Algorithm.__init__(self) self.cacheObj = cacheObj def process(self, file): # decide number of parallel stream to be used fsize = os.path.getsize(urlparse(file)[2]) fname = os.path.basename(urlparse(file)[2]) fpath = os.path.abspath(urlparse(file)[2]) md5sum = get_md5sum(fpath, ignoreGzipTimestamp=True) nbstream = int((fsize * 1.0) / (10.0 * 1024 * 1024 * 1024)) if nbstream < 1: nbstream = 1 # min stream if nbstream > 8: nbstream = 8 # max stream myDestURI = "%s/%s" % (destURI, fname) # uploading the file cmd = "uberftp" if nbstream > 1: cmd += " -c %d" % nbstream cmd += " file:%s %s" % (fpath, myDestURI) rc, output, m = self.cacheObj.__cmd_retry_loop__(shell, cmd, self.cacheObj.max_try) if rc != 0: self.cacheObj.logger.error(output) return False else: fidx = GridftpFileIndex() fidx.id = myDestURI fidx.name = fname fidx.md5sum = md5sum fidx.attributes["fpath"] = fpath self.__appendResult__(file, fidx) return True myAlg = MyAlgorithm(cacheObj=self) myData = Data(collection=files) runner = MTRunner(name="sandboxcache_gridftp", algorithm=myAlg, data=myData) runner.start() runner.join(-1) return runner.getResults().values()
def impl_upload(self, files=[], opts=''): """ Uploads multiple files to a remote gridftp server. """ shell = getShell(self.middleware) # making the directory on remove storage at destURI dirname = self.__get_unique_fname__() # creating subdirectory dir_ok = False destURI = '%s/%s' % (self.baseURI, dirname) uri_info = urisplit(destURI) cmd = 'uberftp %s "cd %s"' % (uri_info[1], uri_info[2]) rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1) if rc != 0: for l in output.split('\n'): l.strip() if re.match(r'^550.*', l): # the directory is not found (error code 550), try to creat # the lowest level one cmd = 'uberftp %s "mkdir %s"' % (uri_info[1], uri_info[2]) rc, output, m = self.__cmd_retry_loop__(shell, cmd, 1) if rc != 0: self.logger.error(output) else: dir_ok = True break else: self.logger.debug('parent directory already available: %s' % destURI) dir_ok = True if not dir_ok: self.logger.error('parent directory not available: %s' % destURI) return [] # the algorithm of uploading one file class MyAlgorithm(Algorithm): def __init__(self, cacheObj): Algorithm.__init__(self) self.cacheObj = cacheObj def process(self, file): # decide number of parallel stream to be used fsize = os.path.getsize(urlparse(file)[2]) fname = os.path.basename(urlparse(file)[2]) fpath = os.path.abspath(urlparse(file)[2]) md5sum = get_md5sum(fpath, ignoreGzipTimestamp=True) nbstream = int((fsize * 1.0) / (10.0 * 1024 * 1024 * 1024)) if nbstream < 1: nbstream = 1 # min stream if nbstream > 8: nbstream = 8 # max stream myDestURI = '%s/%s' % (destURI, fname) # uploading the file cmd = 'uberftp' if nbstream > 1: cmd += ' -c %d' % nbstream cmd += ' file:%s %s' % (fpath, myDestURI) rc, output, m = self.cacheObj.__cmd_retry_loop__( shell, cmd, self.cacheObj.max_try) if rc != 0: self.cacheObj.logger.error(output) return False else: fidx = GridftpFileIndex() fidx.id = myDestURI fidx.name = fname fidx.md5sum = md5sum fidx.attributes['fpath'] = fpath self.__appendResult__(file, fidx) return True myAlg = MyAlgorithm(cacheObj=self) myData = Data(collection=files) runner = MTRunner(name='sandboxcache_gridftp', algorithm=myAlg, data=myData) runner.start() runner.join(-1) return runner.getResults().values()