def get_all_replicas(files): #--------------------------------------------------------------------------- # Build the list of GUID for all regular files #--------------------------------------------------------------------------- guids = [ myfile['guid'] \ for myfile in files \ if not (myfile['mode'] & 060000) ] # Exclude folders and symbolic links #--------------------------------------------------------------------------- # Get all replicas at once and build dictionaries per GUID #--------------------------------------------------------------------------- guidSizes = {} guidReplicas = {} (res, rep_entries) = lfc.lfc_getreplicas(guids, '') if res != 0: print 'lfc_getreplicas : Error ' + str(res) + "\n" else: for entry in rep_entries: if entry.errcode == 0: if entry.guid in guidReplicas: guidReplicas[entry.guid].append(entry.sfn) else: guidSizes[entry.guid] = entry.filesize guidReplicas[entry.guid] = [entry.sfn] #--------------------------------------------------------------------------- # Loop on the list of files to print their Filemode, Name and Replicas #--------------------------------------------------------------------------- for myfile in files: print myfile['name'] guid = myfile['guid'] if guid in guidReplicas: for replica in guidReplicas[guid]: if replica != '': print ' ==>', replica
def getreplicas_bulk(guids, allLFCs): """ allLFCs=[] for cl in TiersOfATLAS.ToACache.dbcloud: id=TiersOfATLAS.ToACache.dbcloud[cl] l = TiersOfATLAS.getLocalCatalog(id) if l: allLFCs += [l] # shuffle these random.shuffle(allLFCs) # Add the old central LFC last - only use if no-other replica found centralLFC = TiersOfATLAS.getLocalCatalog('OLDLCG') allLFCs += [ centralLFC ] #print allLFCs """ guidSizes = {} guidReplicas = {} guidmd5sum = {} for lfcstring in allLFCs: if setLfcHost(lfcstring) == 'OK': print lfcstring sys.stdout.flush() signal.signal(signal.SIGQUIT, handler) signal.alarm(30) try: (res, rep_entries) = lfc.lfc_getreplicas(guids, '') except AttributeError: print 'ERROR LCG UI does not support LFC bulk reading - please upgrade !' sys.exit(EC_Configuration) signal.alarm(0) if res != 0: print 'lfc_getreplicas : Error ' + str(res) + "\n" else: done = False for rep in rep_entries: if rep.errcode == 0 and rep.sfn != '': if rep.sfn.startswith("srm"): # Add 8443 port surl = rep.sfn.split("//") surl[1] = rep.sfn.split("//")[1].split( "/")[0] + ":8443/" + '/'.join( rep.sfn.split("//")[1].split("/")[1:]) surl_8443 = '//'.join(surl) # Use original sfn surl_8443 = rep.sfn else: surl = rep.sfn.split("//") surl[0] = 'gsiftp:' surl[1] = rep.sfn.split("//")[1].split( "/")[0] + ":2811/" + '/'.join( rep.sfn.split("//")[1].split("/")[1:]) surl_8443 = '//'.join(surl) surl_8443 = re.sub(':*\d*/srm/managerv2\?SFN=', '', surl_8443) if rep.guid in guidReplicas.keys(): guidReplicas[rep.guid].append(surl_8443) else: guidSizes[rep.guid] = rep.filesize #guidReplicas[rep.guid] = [rep.sfn] guidReplicas[rep.guid] = [surl_8443] guidmd5sum[rep.guid] = rep.csumvalue return 'OK', guidReplicas, guidSizes, guidmd5sum
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict): """ copy input file from SE to local dir """ error = PilotErrors() pilotErrorDiag = "" # Get input parameters from pdict token = pdict.get('token', None) jobId = pdict.get('jobId', '') workDir = pdict.get('workDir', '') proxycheck = pdict.get('proxycheck', False) # try to get the direct reading control variable (False for direct reading mode; file should not be copied) useCT = pdict.get('usect', True) prodDBlockToken = pdict.get('access', '') # get the DQ2 tracing report report = self.getStubTracingReport(pdict['report'], 'lcg', lfn, guid) # get a proper envsetup envsetup = self.getEnvsetup(get=True) ec, pilotErrorDiag = verifySetupCommand(error, envsetup) if ec != 0: self.__sendReport('RFCP_FAIL', report) return ec, pilotErrorDiag if proxycheck: # do we have a valid proxy? s, pilotErrorDiag = self.verifyProxy(envsetup=envsetup) if s != 0: self.__sendReport('PROXYFAIL', report) return s, pilotErrorDiag else: tolog("Proxy verification turned off") getfile = gpfn if path == '': path = './' fullname = os.path.join(path, lfn) # should the root file be copied or read directly by athena? directIn, useFileStager = self.getTransferModes() if directIn: if useCT: directIn = False tolog("Direct access mode is switched off (file will be transferred with the copy tool)") updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input") else: # determine if the file is a root file according to its name rootFile = self.isRootFileName(lfn) if prodDBlockToken == 'local' or not rootFile: directIn = False tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)") updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input") elif rootFile: tolog("Found root file according to file name: %s (will not be transferred in direct reading mode)" % (lfn)) report['relativeStart'] = None report['transferStart'] = None self.__sendReport('FOUND_ROOT', report) if useFileStager: updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", type="input") else: updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", type="input") return error.ERR_DIRECTIOFILE, pilotErrorDiag else: tolog("Normal file transfer") # get remote filesize and checksum if fsize == 0 or fchecksum == 0: try: import lfc except Exception, e: pilotErrorDiag = "get_data() could not import lfc module: %s" % str(e) tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag)) self.__sendReport('LFC_IMPORT', report) return error.ERR_GETLFCIMPORT, pilotErrorDiag os.environ['LFC_HOST'] = readpar('lfchost') try: ret, res = lfc.lfc_getreplicas([str(guid)],"") except Exception, e: pilotErrorDiag = "Failed to get LFC replicas: %s" % str(e) tolog("!!WARNING!!2990!! Exception caught: %s" % (pilotErrorDiag)) tolog("Mover get_data finished (failed)") self.__sendReport('NO_LFC_REPS', report) return error.ERR_FAILEDLFCGETREPS, pilotErrorDiag
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict): """ copy input file from SE to local dir """ error = PilotErrors() pilotErrorDiag = "" # Get input parameters from pdict token = pdict.get('token', None) jobId = pdict.get('jobId', '') workDir = pdict.get('workDir', '') experiment = pdict.get('experiment', '') proxycheck = pdict.get('proxycheck', False) # try to get the direct reading control variable (False for direct reading mode; file should not be copied) useCT = pdict.get('usect', True) prodDBlockToken = pdict.get('access', '') # get the DQ2 tracing report report = self.getStubTracingReport(pdict['report'], 'lcg', lfn, guid) # get a proper envsetup envsetup = self.getEnvsetup(get=True) ec, pilotErrorDiag = verifySetupCommand(error, envsetup) if ec != 0: self.prepareReport('RFCP_FAIL', report) return ec, pilotErrorDiag # get the experiment object thisExperiment = getExperiment(experiment) if proxycheck: # do we have a valid proxy? s, pilotErrorDiag = thisExperiment.verifyProxy(envsetup=envsetup) if s != 0: self.prepareReport('PROXYFAIL', report) return s, pilotErrorDiag else: tolog("Proxy verification turned off") getfile = gpfn if path == '': path = './' fullname = os.path.join(path, lfn) # should the root file be copied or read directly by athena? directIn, useFileStager = self.getTransferModes() if directIn: if useCT: directIn = False tolog("Direct access mode is switched off (file will be transferred with the copy tool)") updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input") else: # determine if the file is a root file according to its name rootFile = self.isRootFileName(lfn) if prodDBlockToken == 'local' or not rootFile: directIn = False tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)") updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input") elif rootFile: tolog("Found root file according to file name: %s (will not be transferred in direct reading mode)" % (lfn)) report['relativeStart'] = None report['transferStart'] = None self.prepareReport('FOUND_ROOT', report) if useFileStager: updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", type="input") else: updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", type="input") return error.ERR_DIRECTIOFILE, pilotErrorDiag else: tolog("Normal file transfer") # get remote filesize and checksum if fsize == 0 or fchecksum == 0: try: import lfc except Exception, e: pilotErrorDiag = "get_data() could not import lfc module: %s" % str(e) tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag)) self.prepareReport('LFC_IMPORT', report) return error.ERR_GETLFCIMPORT, pilotErrorDiag os.environ['LFC_HOST'] = readpar('lfchost') try: ret, res = lfc.lfc_getreplicas([str(guid)],"") except Exception, e: pilotErrorDiag = "Failed to get LFC replicas: %s" % str(e) tolog("!!WARNING!!2990!! Exception caught: %s" % (pilotErrorDiag)) tolog("Mover get_data finished (failed)") self.prepareReport('NO_LFC_REPS', report) return error.ERR_FAILEDLFCGETREPS, pilotErrorDiag
def _getPFNsLFC(guids,lfcHost,storages,nFiles,verbose=False): # set LFC HOST os.environ['LFC_HOST'] = lfcHost # timeout os.environ['LFC_CONNTIMEOUT'] = '60' os.environ['LFC_CONRETRY'] = '2' os.environ['LFC_CONRETRYINT'] = '6' if verbose: print "Get file info from %s" % lfcHost # get PFN iGUID = 0 nGUID = 10000 pfnMap = {} listGUID = [] for guid in guids: iGUID += 1 listGUID.append(guid) if iGUID % nGUID == 0 or iGUID == len(guids): sys.stdout.write('.') sys.stdout.flush() # get replica nTry = 3 for iTry in range(nTry): ret,resList = lfc.lfc_getreplicas(listGUID,'') if ret != 0 and iTry+1<nTry: print "retry to access %s %s/%s" % (lfcHost,iTry,nTry) time.sleep(30) else: break if ret == 0: for fr in resList: if fr != None and ((not hasattr(fr,'errcode')) or \ (hasattr(fr,'errcode') and fr.errcode == 0)): # get host match = re.search('[^:]+://([^:/]+):*\d*/',fr.sfn) if match==None: continue # check host host = match.group(1) if storages != [] and (not host in storages): continue # skip tape onTapeFlag = False for tapePath in ['/MCTAPE/','/BNLT1D0/','/atlasmctape/','/atlasdatatape/', '/castor/cern.ch/grid/atlas/tzero/', '/castor/cern.ch/grid/atlas/DAQ/']: if re.search(tapePath,fr.sfn) != None: onTapeFlag = True break if onTapeFlag: continue # append if not pfnMap.has_key(fr.guid): pfnMap[fr.guid] = [] pfnMap[fr.guid].append(fr.sfn) else: print "ERROR : %s" % lfc.sstrerror(lfc.cvar.serrno) sys.exit(EC_LFC) # reset listGUID = [] # break if nFiles > 0 and len(pfnMap) >= nFiles: break # return return pfnMap
def getreplicas_bulk(guids, allLFCs): """ allLFCs=[] for cl in TiersOfATLAS.ToACache.dbcloud: id=TiersOfATLAS.ToACache.dbcloud[cl] l = TiersOfATLAS.getLocalCatalog(id) if l: allLFCs += [l] # shuffle these random.shuffle(allLFCs) # Add the old central LFC last - only use if no-other replica found centralLFC = TiersOfATLAS.getLocalCatalog('OLDLCG') allLFCs += [ centralLFC ] #print allLFCs """ guidSizes = {} guidReplicas = {} guidmd5sum = {} for lfcstring in allLFCs: if setLfcHost(lfcstring) == "OK": print lfcstring sys.stdout.flush() signal.signal(signal.SIGQUIT, handler) signal.alarm(30) try: (res, rep_entries) = lfc.lfc_getreplicas(guids, "") except AttributeError: print "ERROR LCG UI does not support LFC bulk reading - please upgrade !" sys.exit(EC_Configuration) signal.alarm(0) if res != 0: print "lfc_getreplicas : Error " + str(res) + "\n" else: done = False for rep in rep_entries: if rep.errcode == 0 and rep.sfn != "": if rep.sfn.startswith("srm"): # Add 8443 port surl = rep.sfn.split("//") surl[1] = ( rep.sfn.split("//")[1].split("/")[0] + ":8443/" + "/".join(rep.sfn.split("//")[1].split("/")[1:]) ) surl_8443 = "//".join(surl) # Use original sfn surl_8443 = rep.sfn else: surl = rep.sfn.split("//") surl[0] = "gsiftp:" surl[1] = ( rep.sfn.split("//")[1].split("/")[0] + ":2811/" + "/".join(rep.sfn.split("//")[1].split("/")[1:]) ) surl_8443 = "//".join(surl) surl_8443 = re.sub(":*\d*/srm/managerv2\?SFN=", "", surl_8443) if rep.guid in guidReplicas.keys(): guidReplicas[rep.guid].append(surl_8443) else: guidSizes[rep.guid] = rep.filesize # guidReplicas[rep.guid] = [rep.sfn] guidReplicas[rep.guid] = [surl_8443] guidmd5sum[rep.guid] = rep.csumvalue return "OK", guidReplicas, guidSizes, guidmd5sum
def readdirg_grs(*args): if len(args) < 1: folder = '' else: folder = args[0] if (folder == '') or (folder[0] != '/'): if 'LFC_HOME' in os.environ: folder = os.environ['LFC_HOME'] + '/' + folder else: sys.exit('Relative folder path requires LFC_HOME to be set and exported') #--------------------------------------------------------------------------- # Open the folder #--------------------------------------------------------------------------- dir = lfc.lfc_opendirg(folder, '') if dir == None: err_num = lfc.cvar.serrno err_string = lfc.sstrerror(err_num) sys.exit('Error ' + str(err_num) + ' on folder ' + folder + ': ' + err_string) files = [] guids = [] listp = lfc.lfc_list() #--------------------------------------------------------------------------- # Loop on the entries of the folder to build : # - the list of all files with Name, Filemode and GUID, # - the list of GUID for all regular files. #--------------------------------------------------------------------------- while 1: entry = lfc.lfc_readdirg(dir) if entry == None: break if entry.filemode & 040000: marker = '/' elif entry.filemode & 020000: # not entry.guid: marker = '@' else: marker = '' files.append({'name': entry.d_name + marker, 'mode': entry.filemode, 'guid': entry.guid}) if not (entry.filemode & 060000): # Exclude folders and symbolic links guids.append(entry.guid) lfc.lfc_closedir(dir) #--------------------------------------------------------------------------- # Get all replicas at once and build dictionaries per GUID #--------------------------------------------------------------------------- guidSizes = {} guidReplicas = {} (res, rep_entries) = lfc.lfc_getreplicas(guids, '') if res != 0: print 'lfc_getreplicas : Error ' + str(res) + "\n" else: for entry in rep_entries: if entry.errcode == 0: if entry.guid in guidReplicas: guidReplicas[entry.guid].append(entry.sfn) else: guidSizes[entry.guid] = entry.filesize guidReplicas[entry.guid] = [entry.sfn] #--------------------------------------------------------------------------- # Loop on the list of files to print their Filemode, Name and Replicas #--------------------------------------------------------------------------- for myfile in files: print ('%06o' % myfile['mode']) + ' ' + myfile['name'] guid = myfile['guid'] if guid in guidReplicas: nb_replicas = 0 for replica in guidReplicas[guid]: if replica != '': nb_replicas += 1 print ' ==>', replica print 'Found ' + str(nb_replicas) + ' replica(s) Size=' + \ str(guidSizes[guid]) print
def _getPFNsLFC(guids, lfcHost, storages, nFiles, verbose=False): # set LFC HOST os.environ['LFC_HOST'] = lfcHost # timeout os.environ['LFC_CONNTIMEOUT'] = '60' os.environ['LFC_CONRETRY'] = '2' os.environ['LFC_CONRETRYINT'] = '6' if verbose: print "Get file info from %s" % lfcHost # get PFN iGUID = 0 nGUID = 10000 pfnMap = {} listGUID = [] for guid in guids: iGUID += 1 listGUID.append(guid) if iGUID % nGUID == 0 or iGUID == len(guids): sys.stdout.write('.') sys.stdout.flush() # get replica nTry = 3 for iTry in range(nTry): ret, resList = lfc.lfc_getreplicas(listGUID, '') if ret != 0 and iTry + 1 < nTry: print "retry to access %s %s/%s" % (lfcHost, iTry, nTry) time.sleep(30) else: break if ret == 0: for fr in resList: if fr != None and ((not hasattr(fr,'errcode')) or \ (hasattr(fr,'errcode') and fr.errcode == 0)): # get host match = re.search('[^:]+://([^:/]+):*\d*/', fr.sfn) if match == None: continue # check host host = match.group(1) if storages != [] and (not host in storages): continue # skip tape onTapeFlag = False for tapePath in [ '/MCTAPE/', '/BNLT1D0/', '/atlasmctape/', '/atlasdatatape/', '/castor/cern.ch/grid/atlas/tzero/', '/castor/cern.ch/grid/atlas/DAQ/' ]: if re.search(tapePath, fr.sfn) != None: onTapeFlag = True break if onTapeFlag: continue # append if not pfnMap.has_key(fr.guid): pfnMap[fr.guid] = [] pfnMap[fr.guid].append(fr.sfn) else: print "ERROR : %s" % lfc.sstrerror(lfc.cvar.serrno) sys.exit(EC_LFC) # reset listGUID = [] # break if nFiles > 0 and len(pfnMap) >= nFiles: break # return return pfnMap