Example #1
0
# realtime
#========================
realtimedir = config.get('general', 'realtimedir')

#========================
# calibration
#========================
calibrationdir = config.get('general', 'calibrationdir')

stride = config.getint('calibration', 'stride')
delay = config.getint('calibration', 'delay')

calibration_cache = dict(
    (classifier,
     idq.Cachefile(
         idq.cache(calibrationdir, classifier, tag='_calibration%s' %
                   usertag))) for classifier in classifiers)
kde_cache = dict(
    (classifier,
     idq.Cachefile(
         idq.cache(
             calibrationdir, classifier, tag='_calibration-kde%s' % usertag)))
    for classifier in classifiers)

min_num_gch = config.getfloat('calibration', 'min_num_gch')
min_num_cln = config.getfloat('calibration', 'min_num_cln')

emaillist = config.get('warnings', 'calibration')
errorthr = config.getfloat('warnings', 'calibration_errorthr')

uroc_nsamples = config.getint('calibration', 'urank_nsamples')
    os.makedirs(realtimedir)

samples_header = config.get('realtime',
                            'dat_columns').split()  ### headers for dat files

### slave the realtime job to the kw stride
stride = int(float(kwconfig['stride'])
             )  ### this is given as a decimal, so we must cast as float first
delay = config.getint('realtime', 'delay')  ### buffer to let jobs finish

#========================
# train cache
#========================
traindir = config.get('general', 'traindir')
train_cache = dict(
    (classifier, idq.Cachefile(idq.cache(traindir, classifier, tag='train')))
    for classifier in classifiers)
for cache in train_cache.values():
    cache.time = 0

#========================
# calibration cache
#========================
calibrationdir = config.get('general', 'calibrationdir')
calibration_cache = dict(
    (classifier,
     idq.Cachefile(idq.cache(calibrationdir, classifier, tag='calibration')))
    for classifier in classifiers)
for cache in calibration_cache.values():
    cache.time = 0
#========================
# train
#========================
traindir = config.get('general', 'traindir')
if ovl:  ### need snglchandir
    snglchndir = config.get('general', 'snglchndir')

stride = config.getint('train', 'stride')
delay = config.getint('train', 'delay')

#train_script = config.get('condor', 'train')

train_cache = dict(
    (classifier,
     idq.Cachefile(idq.cache(traindir, classifier, tag='_train%s' % usertag)))
    for classifier in classifiers)

build_auxmvc_vectors = mla and (
    not os.path.exists(realtimedir)
)  ### if realtimedir does not exist, we cannot rely on patfiles from the realtime job
### we need to build our own auxmvc_vectors

max_gch_samples = config.getint("train", "max-glitch-samples")
max_cln_samples = config.getint("train", "max-clean-samples")

#========================
# data discovery
#========================
if not opts.ignore_science_segments:
    ### load settings for accessing dmt segment files
Example #4
0
clean_window = config.getfloat('realtime', 'clean_window')
clean_threshold = config.getfloat('realtime', 'clean_threshold')

#========================
# train
#========================
traindir = config.get('general', 'traindir')
if ovl: ### need snglchandir
   snglchndir = config.get('general', 'snglchndir') 

stride = config.getint('train', 'stride')
delay = config.getint('train', 'delay')

#train_script = config.get('condor', 'train')

train_cache = dict( (classifier, idq.Cachefile(idq.cache(traindir, classifier, tag='_train%s'%usertag))) for classifier in classifiers )

build_auxmvc_vectors = mla and (not os.path.exists(realtimedir)) ### if realtimedir does not exist, we cannot rely on patfiles from the realtime job
                                                                 ### we need to build our own auxmvc_vectors

max_gch_samples = config.getint("train", "max-glitch-samples")
max_cln_samples = config.getint("train", "max-clean-samples")

#========================
# data discovery
#========================
if not opts.ignore_science_segments:
    ### load settings for accessing dmt segment files
#    dmt_segments_location = config.get('get_science_segments', 'xmlurl')
    dq_name = config.get('get_science_segments', 'include')
#    dq_name = config.get('get_science_segments', 'include').split(':')[1]