}

    l, h = [], []
    for e in eff:
        cr = idq.binomialCR(e * N, N, conf=0.68)
        l.append(cr[0])
        h.append(cr[1])
    ax.fill_between(dt, l, h, color=color, alpha=0.25, edgecolor='none')

### add curve from dat files
if opts.verbose:
    print "finding all *dat files"
dats = [
    dat for dat in idq.get_all_files_in_range(
        realtimedir, opts.start, opts.end, pad=0, suffix='.dat')
    if (opts.classifier == idq.extract_dat_name(dat)) and event.livetime(
        event.andsegments([[idq.extract_start_stop(dat, suffix=".dat")],
                           idqsegs]))
]
if opts.verbose:
    print "reading samples from %d dat files" % len(dats)
output = idq.slim_load_datfiles(dats,
                                skip_lines=0,
                                columns=['GPS', 'i', 'rank'])
output = idq.filter_datfile_output(output, idqsegs)
#output['GPS'] = [float(l) for l in output['GPS']] ### not necessary because values are cast appropriately within idq.filter_datfile_output
#output['i'] = [float(l) for l in output['i']]
#output['rank'] = [float(l) for l in output['rank']]

r, c, g, = idq.dat_to_rcg(output)
示例#2
0
    f.close()

    #===============================================================================================
    # update mappings via uroc files
    #===============================================================================================

    ### find all *dat files, bin them according to classifier
    ### needed for opts.mode=="dat" and KDE estimates
    logger.info('finding all *dat files')
    datsD = defaultdict(list)
    for dat in idq.get_all_files_in_range(realtimedir,
                                          gpsstart - lookback,
                                          gpsstart + stride,
                                          pad=0,
                                          suffix='.dat'):
        datsD[idq.extract_dat_name(dat)].append(dat)

    ### throw away any un-needed files
    for key in datsD.keys():
        if key not in classifiers:
            datsD.pop(key)
        else:  ### throw out files that don't contain any science time
            datsD[key] = [
                dat for dat in datsD[key] if event.livetime(
                    event.andsegments([
                        idqsegs, [idq.extract_start_stop(dat, suffix='.dat')]
                    ]))
            ]

    if opts.mode == "npy":  ### need rank files
        ### find all *rank*npy.gz files, bin them according to classifier
        idqseg_path = idq.idqsegascii(output_dir, '_%s'%dq_name, gpsstart - lookback, lookback+stride)
    f = open(idqseg_path, 'w')
    for seg in idqsegs:
        print >> f, seg[0], seg[1]
    f.close()

    #===============================================================================================
    # update mappings via uroc files
    #===============================================================================================

    ### find all *dat files, bin them according to classifier
    ### needed for opts.mode=="dat" and KDE estimates
    logger.info('finding all *dat files')
    datsD = defaultdict( list )
    for dat in idq.get_all_files_in_range(realtimedir, gpsstart-lookback, gpsstart+stride, pad=0, suffix='.dat' ):
        datsD[idq.extract_dat_name( dat )].append( dat )

    ### throw away any un-needed files
    for key in datsD.keys():
        if key not in classifiers:
            datsD.pop(key) 
        else: ### throw out files that don't contain any science time
            datsD[key] = [ dat for dat in datsD[key] if event.livetime(event.andsegments([idqsegs, [idq.extract_start_stop(dat, suffix='.dat')]])) ]

    if opts.mode=="npy": ### need rank files
        ### find all *rank*npy.gz files, bin them according to classifier
        logger.info('  finding all *rank*.npy.gz files')
        ranksD = defaultdict( list )
        for rank in [rank for rank in  idq.get_all_files_in_range(realtimedir, gpsstart-lookback, gpsstart+stride, pad=0, suffix='.npy.gz') if "rank" in rank]:
            ranksD[idq.extract_fap_name( rank )].append( rank ) ### should just work...
#    color = ax.step( dt, eff, label='%d events with KWsignif $\geq %.1f$'%(N, signif), where='post' )[0].get_color()
    color = ax.plot( dt, eff, label='%d events with KWsignif $\geq %.1f$'%(N, signif) )[0].get_color()

    jsonD[signif] = {'observed deadtime':dt, 'observed efficiency':eff, 'number of glitches':N, 'duration':T}

    l, h = [], []
    for e in eff:
        cr = idq.binomialCR( e*N, N, conf=0.68 )
        l.append(cr[0])
        h.append(cr[1])
    ax.fill_between( dt, l, h, color=color, alpha=0.25, edgecolor='none' )    

### add curve from dat files
if opts.verbose:
    print "finding all *dat files"
dats = [dat for dat in idq.get_all_files_in_range( realtimedir, opts.start, opts.end, pad=0, suffix='.dat') if (opts.classifier==idq.extract_dat_name( dat )) and event.livetime(event.andsegments([[idq.extract_start_stop(dat, suffix=".dat")], idqsegs]))]
if opts.verbose:
    print "reading samples from %d dat files"%len(dats)
output = idq.slim_load_datfiles( dats, skip_lines=0, columns=['GPS', 'i', 'rank'])
output = idq.filter_datfile_output( output, idqsegs )
#output['GPS'] = [float(l) for l in output['GPS']] ### not necessary because values are cast appropriately within idq.filter_datfile_output
#output['i'] = [float(l) for l in output['i']]
#output['rank'] = [float(l) for l in output['rank']]

r, c, g, = idq.dat_to_rcg( output )

if g[-1] and c[-1]:
    color = ax.step( 1.0*c/c[-1], 1.0*g/g[-1], label='datfiles: $N_c=%d$, $N_g=%d$'%(c[-1], g[-1]), linewidth=2, where='post' )[0].get_color()
    for G, C in zip(g, c):
        c_cr = idq.binomialCR( C, c[-1], conf=0.68 )
        g_cr = idq.binomialCR( G, g[-1], conf=0.68 )
#    color = ax.step( dt, eff, label='%d events with KWsignif $\geq %.1f$'%(N, signif), where='post' )[0].get_color()
    color = ax.plot( dt, eff, label='%d events with KWsignif $\geq %.1f$'%(N, signif) )[0].get_color()

    jsonD[signif] = {'observed deadtime':dt, 'observed efficiency':eff, 'number of glitches':N, 'duration':T}

    l, h = [], []
    for e in eff:
        cr = idq.binomialCR( e*N, N, conf=0.68 )
        l.append(cr[0])
        h.append(cr[1])
    ax.fill_between( dt, l, h, color=color, alpha=0.25, edgecolor='none' )    

### add curve from dat files
if opts.verbose:
    print "finding all *dat files"
dats = [dat for dat in idq.get_all_files_in_range( realtimedir, opts.start, opts.end, pad=0, suffix='.dat') if (opts.classifier==idq.extract_dat_name( dat )) and event.livetime(event.andsegments([[idq.extract_start_stop(dat, suffix=".dat")], idqsegs]))]
if opts.verbose:
    print "reading samples from %d dat files"%len(dats)
output = idq.slim_load_datfiles( dats, skip_lines=0, columns=['GPS', 'i', 'rank'])
output = idq.filter_datfile_output( output, idqsegs )
#output['GPS'] = [float(l) for l in output['GPS']] ### not necessary because values are cast appropriately within idq.filter_datfile_output
#output['i'] = [float(l) for l in output['i']]
#output['rank'] = [float(l) for l in output['rank']]

r, c, g, = idq.dat_to_rcg( output )

if g[-1] and c[-1]:
    color = ax.step( 1.0*c/c[-1], 1.0*g/g[-1], label='datfiles: $N_c=%d$, $N_g=%d$'%(c[-1], g[-1]), linewidth=2, where='post' )[0].get_color()
    for G, C in zip(g, c):
        c_cr = idq.binomialCR( C, c[-1], conf=0.68 )
        g_cr = idq.binomialCR( G, g[-1], conf=0.68 )