def test_filesystem(): c = client.FileSystem(SERVER_URL) funcspecs = [ (c.locate, ('/tmp', OpenFlags.REFRESH), True), (c.deeplocate, ('/tmp', OpenFlags.REFRESH), True), (c.query, (QueryCode.SPACE, '/tmp'), True), (c.truncate, ('/tmp/spam', 1000), False), (c.mv, ('/tmp/spam', '/tmp/ham'), False), (c.chmod, ('/tmp/ham', AccessMode.UR | AccessMode.UW), False), (c.rm, ('/tmp/ham', ), False), (c.mkdir, ('/tmp/somedir', MkDirFlags.MAKEPATH), False), (c.rmdir, ('/tmp/somedir', ), False), (c.ping, (), False), (c.stat, ('/tmp', ), True), (c.statvfs, ('/tmp', ), True), (c.protocol, (), True), (c.dirlist, ('/tmp', DirListFlags.STAT), True), (c.sendinfo, ('important info', ), False), (c.prepare, (['/tmp/foo'], PrepareFlags.STAGE), True), ] for func, args, hasReturnObject in funcspecs: sync(func, args, hasReturnObject) # Create new temp file f = client.File() status, response = f.open(smallfile, OpenFlags.NEW) for func, args, hasReturnObject in funcspecs: async (func, args, hasReturnObject)
def test_locate_sync(): c = client.FileSystem(SERVER_URL) status, response = c.locate('/tmp', OpenFlags.REFRESH) assert status.ok for item in response: assert item
def get_filelist(xrootd_path): print("Use pyxrootd tools") print("xrootd path: %s" % xrootd_path) gridpath = xrootd_path.replace("*.root", "") gridserver = 'root://' + gridpath.split("//")[1] gridpath = '/' + gridpath.split("//")[2] gridpath = gridpath.replace('\n', '') print("server: %s" % gridserver) print("path: %s" % gridpath) from XRootD import client from XRootD.client.flags import DirListFlags, OpenFlags, MkDirFlags, QueryCode myclient = client.FileSystem(gridserver) print('Getting file list from XRootD server') status, listing = myclient.dirlist(gridpath, DirListFlags.LOCATE, timeout=10) out_files = [] if listing is not None: for entry in listing: if entry.name.endswith('.root') and not entry.name == '': out_files.append(gridserver + '/' + gridpath + '/' + entry.name) print("Successfully queried") else: print("XRootD path not accessable: %s", xrootd_path) return out_files
def backup_tx_clean(self): """ Clean after a backup tranfer by copying the log file in the same directory as the destiantion of the backup. """ # Copy local log file to EOS directory eos_log = ''.join( [self.efile_root, ".sys.b#.backup.log?eos.ruid=0&eos.rgid=0"]) self.logger.debug("Copy log:{0} to {1}".format(self.config.LOG_FILE, eos_log)) self.config.handler.flush() cp_client = client.FileSystem(self.efile_full.encode("utf-8")) st, __ = cp_client.copy(self.config.LOG_FILE, eos_log, force=True) if not st.ok: self.logger.error(("Failed to copy log file {0} to EOS at {1}" "").format(self.config.LOG_FILE, eos_log)) else: # Delete log file if it was successfully copied to EOS try: os.remove(self.config.LOG_FILE) except OSError as __: pass # Delete all local files associated with this transfer try: os.remove(self.tx_file) except OSError as __: pass # Join async status thread self.thread_status.do_finish() self.thread_status.join()
def isFile(lfn): global error_code if debug: print 'checking file access' if debug: print "LFN: ", lfn if lfn.startswith("root://"): server = "root://{server}".format(server=lfn.split("/")[2]) if debug: print "server: ", server xc = client.FileSystem(server) fname = lfn.replace(server, "") if debug: print "LFN: ", fname if debug: print xc.stat(fname) is_ok, res = xc.stat(fname) if debug: print 'is_ok: ', is_ok print 'res: ', res if not is_ok.ok: error_code = 2000 raise Exception(is_ok.message) if debug: print "res.flags: ", res.flags print "StatInfoFlags.IS_READABLE: ", StatInfoFlags.IS_READABLE if res.flags == 0: if debug: print '[!] FIXME: XRootD.client.FileSystem.stat() returned StatInfoFlags = 0, this flag is not supported' res.flags = StatInfoFlags.IS_READABLE return True if res.flags >= StatInfoFlags.IS_READABLE else False else: return isfile(lfn)
def test_args(): c = client.FileSystem(url=SERVER_URL) assert c pytest.raises(TypeError, "c = client.FileSystem(foo='root://localhost')") pytest.raises(TypeError, "c = client.FileSystem(path='root://localhost', foo='bar')")
def tchain_files_together(tree_name, channel_to_filelist, on_eos=True): ''' Given a tree_name, and a dictionary of channel to file list, return a dictionary of channel to filename to tchain. ''' trees = {} print("\n" * 10) print("Chaining files together for {}".format(list(trees.keys()))) for channel in channel_to_filelist: trees[channel] = {} files = channel_to_filelist[channel] print("For channel {}, found files {}".format(channel, files)) for f in files: #create the tchain for these files assert f not in trees[channel] trees[channel][f] = ROOT.TChain(tree_name) #check if this file was a directory or a file if os.path.isfile(f): print("For channel {}, and file {}, found files {}".format( channel, f, f)) if on_eos: trees[channel][f].Add('root://eosatlas.cern.ch/' + f) else: trees[channel][f].Add(f) else: #this was a directory #go and get all of the files in the directory if not on_eos: wildcards = ["*.root", "*.root*"] files = [] for wild_card in wildcards: files += glob.glob(os.path.join(f, wild_card)) files = list(set(files)) else: from XRootD import client from XRootD.client.flags import DirListFlags xrootd_client = client.FileSystem( 'root://eosatlas.cern.ch') files = [ el.name for el in xrootd_client.dirlist( f, DirListFlags.STAT)[1] if ".root" in os.path.split(el.name)[-1] ] files = [ os.path.join(f, el) if f not in el else el for el in files ] unique_files = [] for file_with_path in files: assert "//" not in file_with_path print("Found file {}".format(file_with_path)) if on_eos: trees[channel][f].Add('root://eosatlas.cern.ch/' + file_with_path) else: trees[channel][f].Add(file_with_path) print("Retrieved Trees") return trees
def __init__(self, config=None, xrootdRedirector=None, usePythonXrootD=True): self.config = config self.debug = 'XBBDEBUG' in os.environ self.timeBetweenAttempts = 30 try: self.xrootdRedirectors = [x.strip() for x in self.config.get('Configuration', 'xrootdRedirectors').split(',') if len(x.strip())>0] if len(self.xrootdRedirectors) < 1: print("WARNING: empty list of xrootd redirectors given!") except Exception as e: if config: print (e) print("WARNING: no xrootd redirector given!") self.xrootdRedirectors = None # needed for cases there are /store/ path accessible locally but with additional prefix path try: self.pnfsStoragePath = self.config.get('Configuration', 'pnfsStoragePath').strip() except: if config: print("WARNING: no pnfs storage path given!") self.pnfsStoragePath = None # if redirector is given as argument, use it as the main redirector if xrootdRedirector: if self.xrootdRedirectors: self.xrootdRedirectors = [xrootdRedirector]+self.xrootdRedirectors else: self.xrootdRedirectors = [xrootdRedirector] # use python bindings for xrootd (can be disabled setting the 'usePythonXrootD' option to False self.usePythonXrootD = eval(self.config.get('Configuration', 'usePythonXrootD')) if self.config and self.config.has_option('Configuration', 'usePythonXrootD') else True self.client = None self.server = None if self.usePythonXrootD or usePythonXrootD: try: from XRootD import client self.server = self.xrootdRedirectors[0].strip('/') self.client = client.FileSystem(self.server) if self.debug: print('DEBUG: initialized xrootd client, server:', self.server) print('DEBUG: client:', self.client) except: if self.debug: print('DEBUG: xrootd could not be initialized, trying to use xrdfs as fallback. To use the faster Python bindings upgrade CMSSW to version 9.') # prefixes to distinguish remote file paths from local ones self.storagePathPrefix = '/store/' self.pnfsPrefix = '/pnfs/' self.remotePrefixes = [self.storagePathPrefix, self.pnfsPrefix] if self.config and self.config.has_option('Configuration', 'remotePrefixes'): self.remotePrefixes = self.config.get('Configuration', 'remotePrefixes').split(',') # TODO: use XrootD python bindings self.remoteStatDirectory = 'xrdfs {server} stat -q IsDir {path}' self.remoteStatFile = 'xrdfs {server} stat {path}' self.remoteMkdir = 'xrdfs {server} mkdir {path}' self.remoteRm = 'xrdfs {server} rm {path}' self.remoteCp = 'xrdcp -d 1 -f --posc --nopbar {source} {target}' self.makedirsMinLevel = 5 # don't even try to create/access the 5 lowest levels in the path
def test_deletion(): c = client.FileSystem(SERVER_URL) del c if sys.hexversion > 0x03000000: pytest.raises(UnboundLocalError, 'assert c') else: pytest.raises(NameError, 'assert c')
def check_eos_access(url): """ Check that the current user executing the programm is mapped as root in EOS otherwise he will not be able to set all the necessary attributes for the newly built archive. Make sure also that the root destination does not exist already. Args: url (XRootD.URL): EOS URL to the destination path Raises: EosAccessException """ fwhoami = ''.join( [url.protocol, "://", url.hostid, "//proc/user/?mgm.cmd=whoami"]) (status, out, __) = exec_cmd(fwhoami) if not status: msg = "Failed to execute EOS whoami command" raise EosAccessException(msg) # Extrach the uid and gid from the response out.strip("\0\n ") lst = out.split(' ') try: for token in lst: if token.startswith("uid="): uid = int(token[4:]) elif token.startswith("gid="): gid = int(token[4:]) except ValueError as __: msg = "Failed while parsing uid/gid response to EOS whoami command" raise EosAccessException(msg) if uid != 0 or gid != 0: msg = "User {0} does not have full rights in EOS - aborting".format( os.getuid()) raise EosAccessException(msg) # Check that root directory does not exist already fs = client.FileSystem(str(url)) st, __ = fs.stat(url.path) if st.ok: msg = "EOS root directory already exists" raise EosAccessException(msg) fmkdir = ''.join([ url.protocol, "://", url.hostid, "//proc/user/?mgm.cmd=mkdir&" "mgm.path=", url.path ]) (status, __, __) = exec_cmd(fmkdir) if not status: msg = "Failed to create EOS directory: {0}".format(url.path) raise EosAccessException(msg)
def test_query_async(): c = client.FileSystem(SERVER_URL) handler = AsyncResponseHandler() status = c.query(QueryCode.STATS, 'a', callback=handler) assert status.ok status, response, hostlist = handler.wait() assert status.ok assert response print response
def test_locate_async(): c = client.FileSystem(SERVER_URL) handler = AsyncResponseHandler() response = c.locate('/tmp', OpenFlags.REFRESH, callback=handler) status, response, hostlist = handler.wait() assert status.ok for item in response: assert item
def addToList(self, baseDir, fileName): redirector = "root://cmseos.fnal.gov//" xrdfs = client.FileSystem(redirector) status, listing = xrdfs.dirlist(baseDir) if status.status != 0: raise Exception("XRootD failed to stat %s%s" % (str(xrdfs.url), baseDir)) files = [ redirector + baseDir + entry.name for entry in listing if fileName.strip('*') in entry.name ] self.fileList.extend(files)
def _getxrdfor(endpoint): '''Look up the xrootd client for the given endpoint, create it if missing. Supports "default" for the defaultstorage endpoint.''' global xrdfs # pylint: disable=global-statement global defaultstorage # pylint: disable=global-statement if endpoint == 'default': return xrdfs[defaultstorage] try: return xrdfs[endpoint] except KeyError: # not found, create it xrdfs[endpoint] = XrdClient.FileSystem(endpoint) return xrdfs[endpoint]
def test_getters(): u = client.FileSystem( "root://*****:*****@host1:123//path?param1=val1¶m2=val2").url assert u.is_valid() assert u.hostid == 'user1:passwd1@host1:123' assert u.protocol == 'root' assert u.username == 'user1' assert u.password == 'passwd1' assert u.hostname == 'host1' assert u.port == 123 assert u.path == '/path' assert u.path_with_params == '/path?param1=val1¶m2=val2'
def __init__(self, path, d2t): """Initialize ArchiveFile object. Args: path (str): Local path to archive file. d2t (bool): True if transfer is to be disk to tape. Raises: IOError: Failed to open local transfer file. """ self.logger = logging.getLogger("transfer") self.d2t = d2t try: self.file = open(path, 'r') except IOError as __: self.logger.error("Failed to open file={0}".format(path)) raise line = self.file.readline().decode("utf-8") self.header = json.loads(line) self.fseek_dir = self.file.tell() # save start position for dirs pos = self.fseek_dir while line: line = self.file.readline().decode("utf-8") entry = json.loads(line) if entry[0] == 'f': self.fseek_file = pos # save start position for files break pos = self.file.tell() # Create two XRootD.FileSystem object for source and destination # which are to be reused throughout the transfer. self.fs_src = client.FileSystem(self.header['src'].encode("utf-8")) self.fs_dst = client.FileSystem(self.header['dst'].encode("utf-8")) self.logger.debug("fseek_dir={0}, fseek_file={1}".format( self.fseek_dir, self.fseek_file))
def test_dirlist_sync(): c = client.FileSystem(SERVER_URL) status, response = c.dirlist('/tmp', DirListFlags.STAT) assert status.ok for item in response: assert item.name print item.statinfo assert item.statinfo assert item.hostaddr status, response = c.dirlist('invalid', DirListFlags.STAT) assert not status.ok
def getSize(lfn): global error_code if debug: print 'extracting file size' if lfn.startswith("root://"): server = "root://{server}".format(server=lfn.split("/")[2]) xc = client.FileSystem(server) is_ok, res = xc.stat(lfn.replace(server, "")) if not is_ok.ok: error_code = 2000 raise Exception(is_ok.message) return res.size else: return getsize(lfn)
def init(inconfig, inlog): '''Init module-level variables''' global config global log global storageserver global xrdfs global homepath config = inconfig log = inlog storageserver = config.get('general', 'storageserver') homepath = config.get('general', 'storagehomepath') # prepare the xroot client xrdfs = XrdClient.FileSystem(storageserver)
def get_client(self, mgm): mgm = mgm.strip('/') if mgm not in self.clients: seutils.logger.info('Starting new client for %s', mgm) from XRootD import client filesystem = client.FileSystem(mgm) status, _ = filesystem.ping() seutils.logger.info('Filesystem %s status: %s', mgm, status) if not status.ok: raise ValueError('client {0} is not responsive: {1}'.format( mgm, status)) self.clients[mgm] = filesystem return self.clients[mgm]
def mkdir(Dir): if Dir.startswith("root://"): server = Dir.split("/")[2] lfn = Dir.replace("root://{server}/".format(server=server),"") xc = client.FileSystem("root://{server}".format(server=server)) mode = AccessMode.OR | AccessMode.OW | AccessMode.OX | AccessMode.GR | AccessMode.UR is_ok, reply = xc.mkdir(lfn,flags=MkDirFlags.MAKEPATH,mode=mode) if not is_ok.ok: #print is_ok.message raise IOError(is_ok.message) log.debug(is_ok.message) else: if not isdir(Dir): makedirs(Dir)
def getModDate(lfn): from datetime import datetime global error_code if debug: print 'creation date' if lfn.startswith("root://"): server = "root://{server}".format(server=lfn.split("/")[2]) xc = client.FileSystem(server) is_ok, res = xc.stat(lfn.replace(server, "")) if not is_ok.ok: error_code = 2000 raise Exception(is_ok.message) return datetime.strptime(res.modtimestr, "%Y-%m-%d %H:%M:%S") else: return datetime.fromtimestamp(getmtime(lfn))
def do_backup(self): """ Perform a backup operation using the provided backup file. """ t0 = time.time() indx_dir = 0 # Root owns the .sys.b#.backup.file fs = client.FileSystem(self.efile_full) efile_url = client.URL(self.efile_full) arg = ''.join([ efile_url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=0&gid=0" ]) xrd_st, __ = fs.query(QueryCode.OPAQUEFILE, arg.encode("utf-8")) if not xrd_st.ok: err_msg = "Failed setting ownership of the backup file: {0}".format( self.efile_full) self.logger.error(err_msg) raise IOError(err_msg) # Create directories for dentry in self.archive.dirs(): # Do special checks for root directory if dentry[1] == "./": self.archive.check_root_dir() indx_dir += 1 self.archive.mkdir(dentry) msg = "create dir {0}/{1}".format(indx_dir, self.archive.header['num_dirs']) self.set_status(msg) # Copy files and set metadata information self.copy_files() self.update_file_access() self.set_status("verifying") check_ok, lst_failed = self.archive.verify(True) self.backup_write_status(lst_failed, check_ok) # Delete empty dirs if this was a backup with a time window if self.archive.header['twindow_type'] and self.archive.header[ 'twindow_val']: self.archive.del_empty_dirs() self.set_status("cleaning") self.logger.info("TIMING_transfer={0} sec".format(time.time() - t0)) self.backup_tx_clean()
def init(inconfig, inlog): '''Init module-level variables''' global config # pylint: disable=global-statement global log # pylint: disable=global-statement global storageserver # pylint: disable=global-statement global xrdfs # pylint: disable=global-statement global homepath # pylint: disable=global-statement config = inconfig log = inlog storageserver = config.get('general', 'storageserver') if config.has_option('general', 'storagehomepath'): homepath = config.get('general', 'storagehomepath') else: homepath = '' # prepare the xroot client xrdfs = XrdClient.FileSystem(storageserver)
def check_filepath(inputfile): logger.debug("Checking if file is available:{}".format(inputfile)) if "root://" in inputfile: # check if file exists via xrootd serverurl = inputfile.split("/")[0] + "//" + inputfile.split("/")[2] filepath = "//" + inputfile.strip(serverurl) + ".root" myclient = client.FileSystem(serverurl) status, info = myclient.stat(filepath) if info is None: logger.fatal("File not found: {}".format(inputfile)) raise Exception else: # check is file is available locally if os.path.isfile(inputfile) is False: logger.fatal("File not found: {}".format(inputfile)) raise Exception
def get_client(mgm): global _CLIENTCACHE mgm = mgm.strip('/') if not mgm in _CLIENTCACHE: logger.info('Starting new client for %s', mgm) from XRootD import client xrdclient = client.FileSystem(mgm) status, _ = xrdclient.ping() logger.info('Filesystem %s status: %s', mgm, status) if not status.ok: raise ValueError('client {0} is not responsive: {1}'.format( mgm, status)) _CLIENTCACHE[mgm] = xrdclient else: xrdclient = _CLIENTCACHE[mgm] return xrdclient
def test_setters(): u = client.FileSystem(SERVER_URL).url u.protocol = 'root' assert u.protocol == 'root' u.username = '******' assert u.username == 'user1' u.password = '******' assert u.password == 'passwd1' u.hostname = 'host1' assert u.hostname == 'host1' u.port = 123 assert u.port == 123 u.path = '/path' assert u.path == '/path' u.clear() assert str(u) == ''
def test_copy_sync(): c = client.FileSystem(SERVER_URL) f = client.File() status, response = f.open(smallfile, OpenFlags.DELETE) assert status.ok status, response = c.copy(smallfile, '/tmp/eggs', force=True) assert status.ok status, response = c.copy('/tmp/nonexistent', '/tmp/eggs') assert not status.ok try: os.remove('/tmp/eggs') except OSError, __: pass
def backup_prepare(self): """ Prepare requested backup operation. Raises: IOError: Failed to transfer backup file. """ # Copy backup file from EOS to the local disk self.logger.info(("Prepare backup copy from {0} to {1}" "").format(self.efile_full, self.tx_file)) eos_fs = client.FileSystem(self.efile_full.encode("utf-8")) st, _ = eos_fs.copy( (self.efile_full + "?eos.ruid=0&eos.rgid=0").encode("utf-8"), self.tx_file.encode("utf-8"), True) if not st.ok: err_msg = ( "Failed to copy backup file={0} to local disk at={1} err_msg={2}" "").format(self.efile_full, self.tx_file, st.message) self.logger.error(err_msg) raise IOError(err_msg) # Create the ArchiveFile object for the backup which is similar to a # tape to disk transfer self.archive = ArchiveFile(self.tx_file, False) # Check that the destination directory exists and has mode 777, if # forced then skip checks if not self.force: surl = self.archive.header['dst'] url = client.URL(surl.encode("utf-8")) fs = self.archive.get_fs(surl) st_stat, resp_stat = fs.stat( (url.path, + "?eos.ruid=0&eos.rgid=0").encode("utf-8")) if st_stat.ok: err_msg = ("Failed to stat backup destination url={0}" "").format(surl) self.logger.error(err_msg) raise IOError(err_msg) if resp_stat.flags != (client.StatInfoFlags.IS_READABLE | client.StatInfoFlags.IS_WRITABLE): err_msg = ("Backup destination url={0} must have move 777" ).format(surl) self.logger.error(err_msg) raise IOError(err_msg)
def test_dirlist_async(): c = client.FileSystem(SERVER_URL) handler = AsyncResponseHandler() status = c.dirlist('/tmp', DirListFlags.STAT, callback=handler) assert status.ok status, response, hostlist = handler.wait() assert status.ok for h in hostlist: print h.url for item in response: assert item.name print item.statinfo assert item.statinfo assert item.hostaddr assert hostlist