def init_job_types(config_parser, job_types = ("datafind", "rm", "binj", "power", "lladd", "binjfind", "bucluster", "bucut", "burca", "burca2", "sqlite", "burcatailor")):
	"""
	Construct definitions of the submit files.
	"""
	global datafindjob, rmjob, binjjob, powerjob, lladdjob, binjfindjob, buclusterjob, llb2mjob, bucutjob, burcajob, burca2job, sqlitejob, burcatailorjob

	# ligo_data_find
	if "datafind" in job_types:
		datafindjob = pipeline.LSCDataFindJob(os.path.join(os.getcwd(), get_cache_dir(config_parser)), os.path.join(os.getcwd(), get_out_dir(config_parser)), config_parser)

	# rm
	if "rm" in job_types:
		rmjob = RMJob(config_parser)

	# lalapps_binj
	if "binj" in job_types:
		binjjob = BurstInjJob(config_parser)

	# lalapps_power
	if "power" in job_types:
		powerjob = PowerJob(config_parser)

	# ligolw_add
	if "lladd" in job_types:
		lladdjob = pipeline.LigolwAddJob(os.path.join(get_out_dir(config_parser)), config_parser)
		lladdjob.cache_dir = get_cache_dir(config_parser)

	# lalapps_binjfind
	if "binjfind" in job_types:
		binjfindjob = BinjfindJob(config_parser)

	# lalapps_bucut
	if "bucut" in job_types:
		bucutjob = BucutJob(config_parser)

	# lalapps_bucluster
	if "bucluster" in job_types:
		buclusterjob = BuclusterJob(config_parser)

	# lalapps_burca
	if "burca" in job_types:
		burcajob = BurcaJob(config_parser)

	# lalapps_burca2
	if "burca2" in job_types:
		burca2job = Burca2Job(config_parser)

	# ligolw_sqlite
	if "sqlite" in job_types:
		sqlitejob = SQLiteJob(config_parser)

	# lalapps_burca_tailor
	if "burcatailor" in job_types:
		burcatailorjob = BurcaTailorJob(config_parser)
Beispiel #2
0
for ifo in ifo_list:
    if not cp.has_option('segments', ifo.lower() + '-analyze'):
        continue

    # decide if we need to segment the data
    available_segments = get_valid_segments(
        cp.get('segfind', 'segment-url'), cp.get('framefind', 'base-dir'), ifo,
        cp.get('segments',
               ifo.lower() + '-analyze'), gps_start_time, gps_end_time)

    if not available_segments:
        print("No available segments for %s, skipping" % ifo)
        continue

    # create the Condor jobs that will be used in the DAG
    df_job = pipeline.LSCDataFindJob('cache', 'logs', cp)
    tmplt_job = inspiral.TmpltBankJob(cp)

    # Based on S6A results ttrigscan clustering has
    # been replaced with 30-ms window clustering
    # ts_job = TrigscanJob(cp)

    si_job_coarse = SiClusterJobCoarse(cp)
    si_job_fine = SiClusterJobFine(cp)
    cp_job = FilesystemJob('cp')

    # Add ifo-specific template config
    if cp.has_section(ifo.lower() + '-tmpltbank'):
        tmplt_job.add_ini_opts(cp, ifo.lower() + '-tmpltbank')

    # Create a job to split the template into parallelization pieces
Beispiel #3
0
segdir=os.path.join(opts.run_path,'segments')
checkDir(segdir)
os.chdir(segdir)
science_segs={}
seg_files={}
segs={}

basename="nest_%.3f-%.3f"%(starttime,endtime)
# Create DAG #################
daglogfile=os.path.join(opts.dag_log_path,basename+'.log')
dagfile=os.path.join(opts.run_path,basename)

dag = pipeline.CondorDAG(daglogfile)
dag.set_dag_file(dagfile)

datafind_job = pipeline.LSCDataFindJob(cache_dir,opts.jobs_log_path,cp)
datafind_job.add_opt('url-type','file')
datafind_job.set_sub_file(os.path.join(opts.run_path,'datafind.sub'))

# Build list of science segments
# Covers entire time range. Not necessarily all used
for ifo in ifos:
    if not opts.ignore_science_mode and types[ifo] not in fakeTypes:
        seg_files[ifo]=inspiralutils.science_segments(ifo,cp)
        segfile=open(seg_files[ifo])
        #segs[ifo]=segmentsUtils.fromfilenames([seg_files[ifo]])
        segs[ifo]=segmentsUtils.fromsegwizard(segfile)
        segs[ifo].coalesce()
        segfile.close()
    else:   # If we skip the segdb step, just construct a large segment
        print 'Faking segment from %i to %i\n'%(datastart,dataend)
Beispiel #4
0
subsuffix = '.sub'

# create the Condor jobs that will be used in the DAG
mkdir_job = strain.MkdirJob('logs',cp)
mkdir_node = strain.MkdirNode(mkdir_job,'cache')
if opts.write_dax: dag.add_node(mkdir_node)

# try and make a directory to store the cache files and job logs
try: os.mkdir('logs')
except: pass
#
#try: os.mkdir('cache')
#except: pass

df_job = pipeline.LSCDataFindJob('cache','logs',cp,opts.write_dax)
noise_job = strain.NoiseJob(cp,opts.write_dax)
qscan_job = strain.qscanJob(cp)

# submit files
df_job.set_sub_file( opts.basename + '.datafind'+ subsuffix )
noise_job.set_sub_file( opts.basename + '.noisecomp' + subsuffix )

# get the pad and chunk lengths from the values in the ini file
length = int(cp.get('pipeline', 'segment-length'))

# get the ifo to filter
ifo = cp.get('pipeline','ifo')
datatype_hoft = cp.get('input','type-hoft')
datatype_derr = cp.get('input','type-derr')
Beispiel #5
0
# create the Condor jobs that will be used in the DAG

# datafind:
frame_types = []
try:
  lsync_file = cp.get('pipeline','lsync-cache-file')
  try: frame_types.append(cp.get('input','ligo-type'))
  except: pass
  try: frame_types.append(cp.get('input','virgo-type'))
  except: pass
  try: frame_types.append(cp.get('input','geo-type'))
  except: pass
  frame_types = [t for t in frame_types if t]
except:
  lsync_file = None
df_job = pipeline.LSCDataFindJob(
  'cache','logs',cp,opts.dax,lsync_file,'|'.join(frame_types))
df_job.set_sub_file( basename + '.datafind'+ subsuffix )

# tmpltbank:
tmplt_jobs = {}

for ifo in ifo_list:
  tmplt_jobs[ifo] = inspiral.TmpltBankJob(cp,opts.dax)
  tmplt_jobs[ifo].set_sub_file( basename + '.tmpltbank_' + ifo + subsuffix )

# inspinj:
inspinj_job = inspiral.InspInjJob(cp) 
inspinj_job.set_sub_file( basename + '.inspinj' + subsuffix )

if opts.noop_inspinj:
  inspinj_job.add_condor_cmd("noop_job", "true")