def performFAsetup(self):
     buildDir(self.installPipes)
     buildDir(self.installIni)
     self.__createLHLL__()
     print "Creating each pipe configuration script."
     countMe = 0
     modValue = 10
     for pipe in self.FApipeNames:
         self.FAconfigure(pipe[3], pipe[4], pipe[0])
         if countMe % modValue == 0:
             sys.stdout.writelines('.')
             sys.stdout.flush()
         countMe = countMe + 1
     print " "
     print "Building ", self.FApipeNames.__len__(
     ), " pipes. This may take awhile."
     countMe = 0
     for pipe in self.FApipeNames:
         #False turns off possible injections.
         self.createPipe(pipe[0], False)
         if countMe % modValue == 0:
             sys.stdout.writelines('.')
             sys.stdout.flush()
         countMe = countMe + 1
     print " "
     print "Ok. Finished"
     #Search the workspace to construct a huge parent DAG for submitting all the DAGs
     root2dags = os.path.normpath(self.dagpath + '/FA/')
     dagFilename = os.path.normpath(self.dagpath + '/FA/FA_tuning.dag')
     self.__buildDAGfrom__(dagFilename, root2dags)
 def performFAsetup(self):
     buildDir(self.installPipes)
     buildDir(self.installIni)
     self.__createLHLL__()
     print "Creating each pipe configuration script."
     countMe=0
     modValue=10
     for pipe in self.FApipeNames:
         self.FAconfigure(pipe[3],pipe[4],pipe[0])
         if countMe%modValue==0:
             sys.stdout.writelines('.')
             sys.stdout.flush()
         countMe=countMe+1
     print " "
     print "Building ",self.FApipeNames.__len__()," pipes. This may take awhile."
     countMe=0
     for pipe in self.FApipeNames:
         #False turns off possible injections.
         self.createPipe(pipe[0],False)
         if countMe%modValue==0:
             sys.stdout.writelines('.')
             sys.stdout.flush()
         countMe=countMe+1
     print " "
     print "Ok. Finished"
     #Search the workspace to construct a huge parent DAG for submitting all the DAGs
     root2dags=os.path.normpath(self.dagpath+'/FA/')
     dagFilename=os.path.normpath(self.dagpath+'/FA/FA_tuning.dag')
     self.__buildDAGfrom__(dagFilename,root2dags)
 def performDEsetup(self):
     #1) Read in lambaH lambaL P L result file
     #2) Create corresponding ini file
     #3) Run pipe creation script to create injection pipes
     buildDir(self.installPipes2)
     buildDir(self.installIni2)
     myIni = self.myIni
     Orig_results = self.readFAresults()
     #Re-encode results to get one ini file for H,L with P and then H,L with Len
     results = []
     for entry in Orig_results:
         h = entry[0]
         l = entry[1]
         p = entry[2]
         len = entry[3]
         #Setup P entry
         results.append([h, l, p, 3])
         #Setup L entry
         results.append([h, l, 0, len])
     print "The detection efficiency pipes to be created are", results.__len__(
     ), " this may take a while."
     countMe = 0
     modValue = 10
     for entry in results:
         if countMe % modValue == 0:
             sys.stdout.writelines('.')
             sys.stdout.flush()
         countMe = countMe + 1
         h = entry[0]
         l = entry[1]
         p = entry[2]
         len = entry[3]
         pipeIniName = self.installIni2 + '/' + self.batchMask + ':' + str(
             h) + ':' + str(float(l)) + ':' + str(float(p)) + ':' + str(
                 int(len)) + ':' + '.ini'
         self.DEpipeNames.append(pipeIniName)
         #Check to see if the threshold section exits...
         #If it does we implicity keep all triggers for FA and DE
         #Else the DE runs try to save disk space and allow tracksearch
         #to perform our simple L and P thresholds. Check MasterIni
         if self.cpIni.has_section('candidatethreshold'):
             self.keepTrigs_DEeditIniFile(h, l, p, len, pipeIniName)
         else:
             self.saveDisk_DEeditIniFile(h, l, p, len, pipeIniName)
         #True states use the ini file to set injection into the pipeline.
         self.createPipe(pipeIniName, True)
     #Search the workspace to construct a huge parent DAG for submitting all the DAGs
     root2dags = os.path.normpath(self.dagpath + '/DE/')
     dagFilename = os.path.normpath(self.dagpath + '/DE/DE_tuning.dag')
     self.__buildDAGfrom__(dagFilename, root2dags)
 def performDEsetup(self):
     #1) Read in lambaH lambaL P L result file
     #2) Create corresponding ini file
     #3) Run pipe creation script to create injection pipes
     buildDir(self.installPipes2)
     buildDir(self.installIni2)
     myIni=self.myIni
     Orig_results=self.readFAresults()
     #Re-encode results to get one ini file for H,L with P and then H,L with Len
     results=[]
     for entry in Orig_results:
         h=entry[0]
         l=entry[1]
         p=entry[2]
         len=entry[3]
         #Setup P entry
         results.append([h,l,p,3])
         #Setup L entry
         results.append([h,l,0,len])
     print "The detection efficiency pipes to be created are",results.__len__()," this may take a while."
     countMe=0
     modValue=10
     for entry in results:
         if countMe%modValue==0:
             sys.stdout.writelines('.')
             sys.stdout.flush()
         countMe=countMe+1
         h=entry[0]
         l=entry[1]
         p=entry[2]
         len=entry[3]
         pipeIniName=self.installIni2+'/'+self.batchMask+':'+str(h)+':'+str(float(l))+':'+str(float(p))+':'+str(int(len))+':'+'.ini'
         self.DEpipeNames.append(pipeIniName)
         #Check to see if the threshold section exits...
         #If it does we implicity keep all triggers for FA and DE
         #Else the DE runs try to save disk space and allow tracksearch
         #to perform our simple L and P thresholds. Check MasterIni
         if self.cpIni.has_section('candidatethreshold'):
             self.keepTrigs_DEeditIniFile(h,l,p,len,pipeIniName)
         else:
             self.saveDisk_DEeditIniFile(h,l,p,len,pipeIniName)
         #True states use the ini file to set injection into the pipeline.
         self.createPipe(pipeIniName,True)
     #Search the workspace to construct a huge parent DAG for submitting all the DAGs
     root2dags=os.path.normpath(self.dagpath+'/DE/')
     dagFilename=os.path.normpath(self.dagpath+'/DE/DE_tuning.dag')
     self.__buildDAGfrom__(dagFilename,root2dags)
 def __init__(self, tunFileCP):
     self.cpTun = tunFileCP
     self.masterIni = cp.get('all', 'masterini')
     self.cpIni = ConfigParser.ConfigParser()
     if not os.path.isfile(self.masterIni):
         print "Error with masterIni in tun configuration file!"
         print self.masterIni
         os.abort()
     self.cpIni.read(self.masterIni)
     self.lambaList = []
     self.lamHopts = cp.get('all', 'LH')
     self.lamLopts = cp.get('all', 'LL')
     if self.lamHopts.count(";") != 2:
         print "Error with LH ini file delimiters!"
         os.abort()
     if self.lamLopts.count(";") != 2:
         print "Error with LL ini file delimiters!"
         os.abort()
     self.LH = self.lamHopts.split(";")
     self.LL = self.lamLopts.split(";")
     self.myIni = cp.get('all', 'masterini')
     self.batchMask = cp.get('all', 'iniBatchLabel')
     self.home = cp.get('all', 'tuningHome')
     self.installPipes = os.path.normpath(self.home + '/FA_pipes')
     self.installIni = os.path.normpath(self.home + '/FA_ini')
     self.installPipes2 = os.path.normpath(self.home + '/DE_pipes')
     self.installIni2 = os.path.normpath(self.home + '/DE_ini')
     self.log = cp.get('all', 'tuningLogs')
     self.dagpath = cp.get('all', 'tuningDags')
     self.seglist = cp.get('all', 'seglist')
     self.mySigmaFA = float(cp.get('false-alarm-calculate', 'FAR'))
     self.FApipeNames = []
     self.DEpipeNames = []
     self.pipeBuilder = cp.get('all', 'pipeProgram')
     #Set the pickle file from Curves Found in FA_pipe run.
     self.curveFoundPickle = []
     #Create directory to place all pipes if not already present!
     buildDir(self.home)
 def __init__(self,tunFileCP):
     self.cpTun=tunFileCP
     self.masterIni=cp.get('all','masterini')
     self.cpIni=ConfigParser.ConfigParser()
     if not os.path.isfile(self.masterIni):
         print "Error with masterIni in tun configuration file!"
         print self.masterIni
         os.abort()
     self.cpIni.read(self.masterIni)
     self.lambaList=[]
     self.lamHopts=cp.get('all','LH')
     self.lamLopts=cp.get('all','LL')
     if self.lamHopts.count(";")!=2:
         print "Error with LH ini file delimiters!"
         os.abort()
     if self.lamLopts.count(";")!=2:
         print "Error with LL ini file delimiters!"
         os.abort()
     self.LH=self.lamHopts.split(";")
     self.LL=self.lamLopts.split(";")
     self.myIni=cp.get('all','masterini')
     self.batchMask=cp.get('all','iniBatchLabel')
     self.home=cp.get('all','tuningHome')
     self.installPipes=os.path.normpath(self.home+'/FA_pipes')
     self.installIni=os.path.normpath(self.home+'/FA_ini')
     self.installPipes2=os.path.normpath(self.home+'/DE_pipes')
     self.installIni2=os.path.normpath(self.home+'/DE_ini')
     self.log=cp.get('all','tuningLogs')
     self.dagpath=cp.get('all','tuningDags')
     self.seglist=cp.get('all','seglist')
     self.mySigmaFA=float(cp.get('false-alarm-calculate','FAR'))
     self.FApipeNames=[]
     self.DEpipeNames=[]
     self.pipeBuilder=cp.get('all','pipeProgram')
     #Set the pickle file from Curves Found in FA_pipe run.
     self.curveFoundPickle=[]
     #Create directory to place all pipes if not already present!
     buildDir(self.home)
示例#7
0
parser.add_option("-o","--output_name",dest="outputName",
                  help="Setting this will create a dag via the form  MYPIPE.dag and a matching submit file via MYJOB.sub. These two files will be placed in the current working directory. These files should be launched from a local disk area, but this can be overridden with the appropriate condor flag.",
                  default="MYPIPE"
                  )
parser.add_option("-u","--output_path",dest="outputPath",
                  default="./",
                  help="Setting this will set the output the cut pipe to write the resulting candidate files and figures to the path specified by OUTPUTPATH, creating it if needed.  If you don't set this then it is assumed that you will want OUTPUTPATH to be instead in the current directory and named MYPIPE.RESULTDIR"
                  )
parser.add_option("-d","--dag_locks",dest="dagLockPath",
                  default="/tmp/dagLocks",
                  help="This is an optional location (local disk) for the lock files Condor needs to keep the log files on a non-local disk.  The default is /tmp/dagLocks"
                  )

(options,args)=parser.parse_args()
dagLocks=str(os.path.normpath(options.dagLockPath))
buildDir(dagLocks)
outputName=str(os.path.normpath(options.outputName))
outputPath=str(os.path.normpath(options.outputPath))
outputResultsPath=str(os.path.normpath(outputPath+"/"+outputName+".RESULTS"))
singleList=os.path.normpath(options.singleList)
buildFigures=options.buildFigures
plotTriggers=options.plotTriggers
iniFile=os.path.normpath(options.iniFile)


listOfFiles=generateFileList(singleList)

if not os.path.exists(iniFile):
    print 'Can not find iniFile: ',os.path.basename(iniFile)
    os.abort()
if not os.path.exists(singleList):
            print("Aborting pipeline construction.")
            os.abort
    rubberSegList=tracksearch.tracksearchConvertSegList(segmentList,minSize,cp,topBlockFloat,overrideBurn)
    rubberSegList.writeSegList()
    rubberSegListName=rubberSegList.getSegmentName()
    allData=pipeline.ScienceData()
    allData.read(rubberSegListName,minSize)
    allData.make_optimised_chunks(0,dataBlockSize)
    
#Setup logfile mask
logFilePath=cp.get('filelayout','logpath')
logFileMask=logFilePath+'/logFile_'
if not(os.path.isdir(logFilePath)):
       print('Log path does not exist!')
       print('Expected to find:',logFilePath)
       tracksearch.buildDir(logFilePath)
       print('Created...')

indexA=0
# The pipeline needs a method change for handling large segment lists
# We should switch from 1 DAG per analysis segment to
# a single DAG with the proper relationships to analyze each segment
# each segment should be logically independant and thus one dag
# can handle a search on a limitless amount of data using a single DAG
# to manage the workflow.  The results of each segment should still be
# stored independently

#Write out the data chunks actually configured for search.
print("Writing the actual segment list to disk for reference.")
tracksearch.writeChunkListToDisk(allData,str(segmentListName+".dataUsed"))
            os.abort
    rubberSegList = tracksearch.tracksearchConvertSegList(
        segmentList, minSize, cp, topBlockFloat, overrideBurn)
    rubberSegList.writeSegList()
    rubberSegListName = rubberSegList.getSegmentName()
    allData = pipeline.ScienceData()
    allData.read(rubberSegListName, minSize)
    allData.make_optimised_chunks(0, dataBlockSize)

#Setup logfile mask
logFilePath = cp.get('filelayout', 'logpath')
logFileMask = logFilePath + '/logFile_'
if not (os.path.isdir(logFilePath)):
    print('Log path does not exist!')
    print('Expected to find:', logFilePath)
    tracksearch.buildDir(logFilePath)
    print('Created...')

indexA = 0
# The pipeline needs a method change for handling large segment lists
# We should switch from 1 DAG per analysis segment to
# a single DAG with the proper relationships to analyze each segment
# each segment should be logically independant and thus one dag
# can handle a search on a limitless amount of data using a single DAG
# to manage the workflow.  The results of each segment should still be
# stored independently

#Write out the data chunks actually configured for search.
print("Writing the actual segment list to disk for reference.")
tracksearch.writeChunkListToDisk(allData, str(segmentListName + ".dataUsed"))