ax.fill_between(dt, l, h, color=color, alpha=0.25, edgecolor='none')

### add curve from dat files
if opts.verbose:
    print "finding all *dat files"
dats = [
    dat for dat in idq.get_all_files_in_range(
        realtimedir, opts.start, opts.end, pad=0, suffix='.dat')
    if (opts.classifier == idq.extract_dat_name(dat)) and event.livetime(
        event.andsegments([[idq.extract_start_stop(dat, suffix=".dat")],
                           idqsegs]))
]
if opts.verbose:
    print "reading samples from %d dat files" % len(dats)
output = idq.slim_load_datfiles(dats,
                                skip_lines=0,
                                columns=['GPS', 'i', 'rank'])
output = idq.filter_datfile_output(output, idqsegs)
#output['GPS'] = [float(l) for l in output['GPS']] ### not necessary because values are cast appropriately within idq.filter_datfile_output
#output['i'] = [float(l) for l in output['i']]
#output['rank'] = [float(l) for l in output['rank']]

r, c, g, = idq.dat_to_rcg(output)

if g[-1] and c[-1]:
    color = ax.step(1.0 * c / c[-1],
                    1.0 * g / g[-1],
                    label='datfiles: $N_c=%d$, $N_g=%d$' % (c[-1], g[-1]),
                    linewidth=2,
                    where='post')[0].get_color()
    for G, C in zip(g, c):
Ejemplo n.º 2
0
    #====================
    urocs = {}  ### stores uroc files for kde estimation
    for classifier in classifiers:
        ### write list of dats to cache file
        cache = idq.cache(output_dir, classifier, "_datcache%s" % usertag)
        logger.info('writing list of dat files to %s' % cache)
        f = open(cache, 'w')
        for dat in datsD[classifier]:
            print >> f, dat
        f.close()

        logger.info('  computing new calibration for %s' % classifier)

        ### extract data from dat files
        output = idq.slim_load_datfiles(datsD[classifier],
                                        skip_lines=0,
                                        columns='GPS i rank'.split() +
                                        [cluster_key])

        ### filter times by scisegs -> keep only the ones within scisegs
        output = idq.filter_datfile_output(output, idqsegs)

        ### cluster
        if not opts.dont_cluster:
            output = idq.cluster_datfile_output(output,
                                                cluster_key=cluster_key,
                                                cluster_win=cluster_win)

        ### downselect to only keep the most recent max_num_gch and max_num_cln
        these_columns, glitches, cleans = idq.separate_output(output)
        glitches.sort(key=lambda l: l[these_columns['GPS']])
        cleans.sort(key=lambda l: l[these_columns['GPS']])
    # update uroc for each classifier
    #====================
    urocs = {} ### stores uroc files for kde estimation
    for classifier in classifiers:
        ### write list of dats to cache file
        cache = idq.cache(output_dir, classifier, "_datcache%s"%usertag)
        logger.info('writing list of dat files to %s'%cache)
        f = open(cache, 'w')
        for dat in datsD[classifier]:
            print >>f, dat
        f.close()

        logger.info('  computing new calibration for %s'%classifier)

        ### extract data from dat files
        output = idq.slim_load_datfiles(datsD[classifier], skip_lines=0, columns='GPS i rank'.split()+[cluster_key])

        ### filter times by scisegs -> keep only the ones within scisegs
        output = idq.filter_datfile_output( output, idqsegs )

        ### cluster
        if not opts.dont_cluster:
            output = idq.cluster_datfile_output( output, cluster_key=cluster_key, cluster_win=cluster_win)

        ### downselect to only keep the most recent max_num_gch and max_num_cln
        these_columns, glitches, cleans = idq.separate_output( output )
        glitches.sort(key=lambda l: l[these_columns['GPS']])
        cleans.sort(key=lambda l: l[these_columns['GPS']])
        if len(glitches) > max_num_gch:
            logger.info('  downselecting to the %d most recent glitches'%max_num_gch)
            glitches = glitches[-max_num_gch:]
    print( "finding maximum veto window used in vetolist" )
win=0.001 ### use this as an absolute mimimum. Will almost certainly be replaced, unless vetolist is empty
file_obj = open(vetolist, 'r')
for line in file_obj:
    if line[0]=="#":
        pass
    else:
        win = max(win, float(line.strip().split()[ovl.vD['vwin']]))
file_obj.close()

#---

### find time ranges of interest from patfile
if opts.verbose:
    print( "defining segments in which we need KW triggers" )
gps = idq.slim_load_datfiles(patfiles, columns=['GPS_s', 'GPS_ms'])
gps = np.array(gps['GPS_s'], dtype=float) + 1e-6*np.array(gps['GPS_ms'], dtype=float)

Ngps = len(gps)
if Ngps==0:
    raise ValueError, 'please supply at least one GPS time within : '+patfile

elif opts.verbose:
    print( "found %d times"%Ngps )

segs = event.fixsegments([[t-win, t+win] for t in gps]) ### the segments in which we need KW triggers

#---

### look up KW trg files that intersect segs
if opts.verbose:
    jsonD[signif] = {'observed deadtime':dt, 'observed efficiency':eff, 'number of glitches':N, 'duration':T}

    l, h = [], []
    for e in eff:
        cr = idq.binomialCR( e*N, N, conf=0.68 )
        l.append(cr[0])
        h.append(cr[1])
    ax.fill_between( dt, l, h, color=color, alpha=0.25, edgecolor='none' )    

### add curve from dat files
if opts.verbose:
    print "finding all *dat files"
dats = [dat for dat in idq.get_all_files_in_range( realtimedir, opts.start, opts.end, pad=0, suffix='.dat') if (opts.classifier==idq.extract_dat_name( dat )) and event.livetime(event.andsegments([[idq.extract_start_stop(dat, suffix=".dat")], idqsegs]))]
if opts.verbose:
    print "reading samples from %d dat files"%len(dats)
output = idq.slim_load_datfiles( dats, skip_lines=0, columns=['GPS', 'i', 'rank'])
output = idq.filter_datfile_output( output, idqsegs )
#output['GPS'] = [float(l) for l in output['GPS']] ### not necessary because values are cast appropriately within idq.filter_datfile_output
#output['i'] = [float(l) for l in output['i']]
#output['rank'] = [float(l) for l in output['rank']]

r, c, g, = idq.dat_to_rcg( output )

if g[-1] and c[-1]:
    color = ax.step( 1.0*c/c[-1], 1.0*g/g[-1], label='datfiles: $N_c=%d$, $N_g=%d$'%(c[-1], g[-1]), linewidth=2, where='post' )[0].get_color()
    for G, C in zip(g, c):
        c_cr = idq.binomialCR( C, c[-1], conf=0.68 )
        g_cr = idq.binomialCR( G, g[-1], conf=0.68 )
        ax.fill_between( c_cr, [g_cr[0]]*2, [g_cr[1]]*2, color=color, alpha=0.25, edgecolor='none' )

jsonD['dat'] = {'rank':list(r), 'cumulative cleans':list(c), 'cumulative glitches':list(g)}
Ejemplo n.º 6
0
    print("finding maximum veto window used in vetolist")
win = 0.001  ### use this as an absolute mimimum. Will almost certainly be replaced, unless vetolist is empty
file_obj = open(vetolist, 'r')
for line in file_obj:
    if line[0] == "#":
        pass
    else:
        win = max(win, float(line.strip().split()[ovl.vD['vwin']]))
file_obj.close()

#---

### find time ranges of interest from patfile
if opts.verbose:
    print("defining segments in which we need KW triggers")
gps = idq.slim_load_datfiles(patfiles, columns=['GPS_s', 'GPS_ms'])
gps = np.array(gps['GPS_s'],
               dtype=float) + 1e-6 * np.array(gps['GPS_ms'], dtype=float)

Ngps = len(gps)
if Ngps == 0:
    raise ValueError, 'please supply at least one GPS time within : ' + patfile

elif opts.verbose:
    print("found %d times" % Ngps)

segs = event.fixsegments([[t - win, t + win] for t in gps
                          ])  ### the segments in which we need KW triggers

#---