Beispiel #1
0
 def test_write_hdf5(self, delete=True):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     hdfout = self.tmpfile % 'hdf'
     try:
         flag.write(hdfout)
     except ImportError as e:
         self.skipTest(str(e))
     else:
         if delete:
             os.remove(hdfout)
     return hdfout
Beispiel #2
0
 def test_write_hdf5(self, delete=True):
     flag = DataQualityFlag(FLAG1, active=ACTIVE, known=KNOWN)
     hdfout = self.tmpfile % 'hdf'
     try:
         flag.write(hdfout)
     except ImportError as e:
         self.skipTest(str(e))
     else:
         if delete:
             os.remove(hdfout)
     return hdfout
	idxhighscat = argwhere(scatf2>=thresh)
	highscatf2 = scatf2[idxhighscat]
	highscattimes = times[idxhighscat]
	highscattimesgps = highscattimes+start_time

	# save text file with values above threshold [GPS f2 index]
	outdata = hstack((highscattimesgps,highscatf2,idxhighscat))
	savetxt('%s-ALL-TIMES-SCATTER-GT%dHZ-%d-%d.txt' % (ifo,thresh,start_time,dur),outdata,fmt='%f %f %i')

	# save segments XML file with segments (based off code from Duncan Macleod)
	from math import (floor, ceil)
	from gwpy.segments import (Segment, DataQualityFlag)

	flag = '%s:DCH-SCATTERED_LIGHT_GT%dHZ:1' % (ifo,thresh) 
	flag = DataQualityFlag(flag)
	segs = []
	append = segs.append

	for gps in highscattimesgps:
    		if len(segs) and gps in segs[-1]:
        		continue
    		seg = Segment(floor(gps), ceil(gps))
    		append(seg)

	flag.active = segs
	flag.known = [Segment(start_time, end_time)]
	flag.coalesce()
	flag.write('%s-%s_%d-%d-%d.xml.gz' % (flag.ifo, flag.tag.replace('-', '_'), flag.version,start_time, dur))

#EOF
Beispiel #4
0
            break
f.close()

#construct flag and filename
flag_name = 'H1:UPVh-RND:1'  #NEEDS TO BE CHANGED
name = 'segments_UPVh_RND.xml'  #NEEDS TO BE CHANGED

try:
    knownsegments = numpy.loadtxt('total_UPVh_segs.txt')
except:
    print 'No total_UPVh_segs.txt file in current working directory. It should have been produced from last loop.'

#knownsegments = numpy.loadtxt(total_UPVh_segs.txt'
known_start = [knownsegments[i, 0] for i in range(len(knownsegments))]
known_end = [knownsegments[i, 1] for i in range(len(knownsegments))]
# read the data
data = numpy.loadtxt('total_UPVh_trigs.txt', dtype=float)

# get an array for the start_time and end_time of each segment
start_time = [data[i, 0] for i in range(len(data))]
end_time = [data[i, 1] for i in range(len(data))]

# create a data quality flag object
#zip will truncate the start and end time. is this OK?
flag = DataQualityFlag(flag_name,
                       active=zip(start_time, end_time),
                       known=zip(known_start, known_end))

# write flag
flag.write(name)
Beispiel #5
0
def main(args=None):
    """Run the gwvet-hug CLI tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    # check to make sure we're within the time window of aLIGO,
    # and that end_time is after start_time
    if args.gpsstart < 971574400:  # roughly the end of S6
        parser.error("gpsstart before S6")
    if args.gpsend < args.gpsstart:
        parser.error("end_time is before gpsstart")

    # finds beginning of day for given gps time
    start_of_day = tconvert(args.gpsstart)
    start_of_day_utc = start_of_day.replace(hour=0, minute=0, second=0)
    start_of_day_gps = tconvert(start_of_day)

    # finds UTC version of start/end times
    start_time_utc = tconvert(args.gpsstart)
    end_time_utc = tconvert(args.gpsend)

    # opens files to be ready for writing
    f = open("total_" + args.dq_flag_type + "_trigs.txt", "w")  # all triggers
    g = open("total_" + args.dq_flag_type + "_segs.txt", "w")  # all segments

    # choosing to read in hveto
    if args.dq_flag_type == 'hveto':

        LOGGER.info('Data Quality Flag chosen is hveto, stored in the path '
                    '%s' % args.directory_path)

        # choosing the offline hveto option for O1, runs by Josh Smith
        if args.online_offline == 'offline':
            analysis_segs_45689 = ['4', '5', '6', '7', '9']
            analysis_segs_237 = ['2', '3']
            if args.hveto_analysis_seg in analysis_segs_45689:
                pattern_trigs_hveto = os.path.join(
                    args.directory_path,
                    'analysis%s' % args.hveto_analysis_seg,
                    'H1-omicron_BOTH-*-DARM',
                    '*VETO_SEGS_ROUND*.txt',
                )
                pattern_segs_hveto = os.path.join(
                    args.directory_path,
                    'analysis%s' % args.hveto_analysis_seg,
                    'H1-omicron_BOTH-*-DARM',
                    'segs.txt',
                )

            elif args.hveto_analysis_seg in analysis_segs_237:
                pattern_trigs_hveto = os.path.join(args.directory_path,
                                                   'H1-omicron_BOTH-*-DARM',
                                                   '*VETO_SEGS_ROUND*.txt')
                pattern_segs_hveto = os.path.join(args.directory_path,
                                                  'H1-omicron_BOTH-*-DARM',
                                                  'segs.txt')

            elif args.hveto_analysis_seg == '8':
                pattern_trigs_hveto = os.path.join(args.directory_path,
                                                   '*VETO_SEGS_ROUND*.txt')
                pattern_segs_hveto = os.path.join(args.directory_path,
                                                  'segs.txt')
            else:
                raise ValueError('Must choose from O1 analysis segments '
                                 '1 through 9')
            LOGGER.info(
                'Data Quality Flag chosen is hveto, stored in the path '
                '%s' % args.directory_path)

            while start_time_utc < end_time_utc:
                day = start_time_utc.day
                month = start_time_utc.month
                year = start_time_utc.year

                triggers = grab_time_triggers(pattern_trigs_hveto,
                                              args.gpsstart, args.gpsend)

                # Ideally we would be able to use the same algorithm, but
                # SegmentList.read doesn't support csv, which is the format
                # that segment files are recorded in. So, we want to
                # temporarily use another method to read in segments.
                segments = grab_time_segments(pattern_segs_hveto,
                                              args.gpsstart, g)

                start_time_utc += datetime.timedelta(days=1)

            write_segs(triggers, f)

        elif args.online_offline == 'online':

            # These paths are currently hardwired for online searches.
            pattern_trigs_hveto = os.path.join(args.directory_path, '{}{:02}',
                                               '{}{:02}{:02}', '*86400-DARM',
                                               '*VETO_SEGS_ROUND*.txt')
            pattern_segs_hveto = os.path.join(args.directory_path, '{}{:02}',
                                              '{}{:02}{:02}', '*86400-DARM',
                                              'segs.txt')

            triggers = SegmentList([])
            segments = SegmentList([])

            while start_time_utc < end_time_utc:
                day = start_time_utc.day
                month = start_time_utc.month
                year = start_time_utc.year
                wildcard_trigs_hveto = pattern_trigs_hveto.format(
                    year, month, year, month, day)
                wildcard_segs_hveto = pattern_segs_hveto.format(
                    year, month, year, month, day)
                triggers = grab_time_triggers(wildcard_trigs_hveto,
                                              args.gpsstart, args.gpsend)

                # Ideally we would be able to use the same algorithm, but
                # SegmentList.read doesn't support csv, which is the format
                # segment files are recorded in. So, we want to temporarily
                # use another method to read segments in.
                segments = grab_time_segments(wildcard_segs_hveto,
                                              args.gpsstart, g)

                start_time_utc += datetime.timedelta(days=1)

            write_segs(triggers, f)

            # segments.write(g)

        else:
            LOGGER.info('Did not choose online or offline. Please choose.')

    # choosing to read in UPVh!
    elif args.dq_flag_type == 'UPVh':

        LOGGER.info('Data-quality flag chosen is %s, stored in the path %s' %
                    (args.dq_flag_type, args.directory_path))

        pattern_trigs_UPVh = os.path.join(args.directory_path,
                                          'DARM_LOCK_{}_{}-H', 'H1:*veto.txt')
        pattern_segs_UPVh = os.path.join(args.directory_path,
                                         'DARM_LOCK_{}_{}-H', 'segments.txt')
        triggers = SegmentList([])
        segments = SegmentList([])
        while start_of_day_utc < end_time_utc:
            start_of_day_gps = tconvert(start_of_day_utc)
            nextday_utc = start_of_day_utc + datetime.timedelta(days=1)
            nextday_gps = tconvert(nextday_utc)
            wildcard_UPVh_trigs = pattern_trigs_UPVh.format(
                start_of_day_gps, nextday_gps)
            wildcard_UPVh_segs = pattern_segs_UPVh.format(
                start_of_day_gps, nextday_gps)
            triggers = grab_time_triggers(wildcard_UPVh_trigs, args.gpsstart,
                                          args.gpsend)
            segments = grab_time_triggers(wildcard_UPVh_segs, args.gpsstart,
                                          args.gpsend)
            start_of_day_utc += datetime.timedelta(days=1)
            write_segs(triggers, f)
            write_segs(segments, g)

    else:  # forgot to choose UPVh or hveto
        raise ValueError('Did not give a valid data-quality tool, '
                         'please choose from hveto, UPVh, or OVL')
    f.close()
    g.close()

    # creating DQ .xml file

    # construct flag and filename
    flag_name = 'H1:' + args.dq_flag_type + '-RND:1'
    name = 'segments_' + args.dq_flag_type + '_RND.xml'

    # reading in segment files
    try:
        knownsegments = numpy.loadtxt('total_' + args.dq_flag_type +
                                      '_segs.txt')
    except OSError:
        LOGGER.info("No total_{}_segs.txt file in current working directory. "
                    "It should have been produced from last loop. "
                    "If this file is empty, that may mean you have no active "
                    "segments during this time period.".format(
                        args.dq_flag_type))

    known_start = [knownsegments[i, 0] for i in range(len(knownsegments))]
    known_end = [knownsegments[i, 1] for i in range(len(knownsegments))]

    # reading in trigger files
    data = numpy.loadtxt('total_' + args.dq_flag_type + '_trigs.txt')

    # get an array for the start_time and end_time of each segment
    start_time = [data[i, 1] for i in range(len(data))]
    end_time = [data[i, 2] for i in range(len(data))]

    # create a data quality flag object
    flag = DataQualityFlag(flag_name,
                           active=zip(start_time, end_time),
                           known=zip(known_start, known_end))

    # write flag
    flag.write(name)

    LOGGER.info("Created DQ Flag: " + flag_name + " in .xml form as: " + name)

    # creating VET .ini file

    config = ConfigParser()

    config.add_section('plugins')
    config.set('plugins', 'gwvet.tabs', ' ')

    config.add_section('states')
    config.set('states', 'Science', '%(ifo)s:DMT-ANALYSIS_READY:1')

    config.add_section('segment-database')
    config.set('segment-database', 'url', 'https://segments.ligo.org')

    config.add_section('')
    config.set('', 'type', 'veto-flag')
    config.set('', 'event-channel', '%(ifo)s:GDS-CALIB_STRAIN')
    config.set('', 'event-generator', 'Omicron')
    config.set(
        '', 'metrics', "'Deadtime',\n'Efficiency', \n'Efficiency/Deadtime', "
        "\n'Efficiency | SNR>=8', \n'Efficiency/Deadtime | SNR>=8', "
        "\n'Efficiency | SNR>=20', \n'Efficiency/Deadtime | SNR>=20', "
        "\n'Efficiency | SNR>=100', \n'Efficiency/Deadtime | SNR>=100',"
        " \n'Use percentage', \n'Loudest event by SNR'")

    config.add_section('tab-SNR-6')
    config.set('tab-SNR-6', 'name', 'SNR 6')
    config.set('tab-SNR-6', 'type', 'veto-flag')
    config.set('tab-SNR-6', 'shortname', 'SNR 6')
    config.set('tab-SNR-6', 'flags', flag_name)
    config.set('tab-SNR-6', 'states', "Science")
    config.set('tab-SNR-6', 'segmentfile', name)

    with open(args.dq_flag_type + '_segs.ini', 'wb') as configfile:
        config.write(configfile)

    LOGGER.info(
        '\n Created %s_segs.ini. You have everything you need to run VET now! '
        '\n' % args.dq_flag_type)
    LOGGER.info(
        'To run VET,first go into %s_segs.ini, and delete the line that only '
        'contains [], then save and exit the .ini file.\n' % args.dq_flag_type)
    LOGGER.info(
        'Finally, run the command: \n'
        '$ gw_summary gps %s %s -f /home/detchar/etc/summary/configurations/'
        'defaults.ini -f %s_segs.ini' %
        (args.gpsstart, args.gpsend, args.dq_flag_type))
# name the DQ flag
optic = channel[0].split('_')[2]
flag_name = 'L1:DCH-EQ_%s_GT_%s:1' % (optic, thresh)

# grab all observing (or whatever is defined) time
active = DataQualityFlag.query_dqsegdb(args.science, args.start, args.end).active

# grab only data for the STS channel in observing time
data = get_timeseries_dict(channel, active, frametype='L1_M')

# find times above threshold
time = [j.times[j > thresh] for j in data[channel[0]]]
times = numpy.concatenate(time)

# put all times above threshold in to segments
segs = segments.segmentlist()
segs.extend([segments.segment(int(t.value), int(t.value)+args.stride) for t in times])
segs = segs.coalesce()

# set up the xml file by making a list of the start and end times of the flag
start_time = []
start_time.extend([t[0] for t in segs])
end_time = []
end_time.extend([t[1] for t in segs])

# put in to dq flag object
flag = DataQualityFlag(flag_name, active=zip(start_time,end_time), known=[[args.start,args.end]])

# write flag to xml
flag.write(segment_file)
Beispiel #7
0
		else:
			print filename + " does not exist. Looking for the segment file in next time increment."
			break
f.close()

#construct flag and filename
flag_name = 'H1:UPVh-RND:1' #NEEDS TO BE CHANGED
name =  'segments_UPVh_RND.xml' #NEEDS TO BE CHANGED


try: knownsegments = numpy.loadtxt('total_UPVh_segs.txt')
except:
        print 'No total_UPVh_segs.txt file in current working directory. It should have been produced from last loop.'

#knownsegments = numpy.loadtxt(total_UPVh_segs.txt'
known_start = [knownsegments[i,0] for i in range(len(knownsegments))]
known_end = [knownsegments[i,1] for i in range(len(knownsegments))]
# read the data
data = numpy.loadtxt('total_UPVh_trigs.txt', dtype=float)

# get an array for the start_time and end_time of each segment
start_time = [data[i,0] for i in range(len(data))]
end_time = [data[i,1] for i in range(len(data))]

# create a data quality flag object 
#zip will truncate the start and end time. is this OK?
flag = DataQualityFlag(flag_name, active=zip(start_time, end_time), known=zip(known_start, known_end))

# write flag
flag.write(name)
Beispiel #8
0
def mkSegment(gst, get, utc_date, txt=True):

    for key in keys:
        sources = GetFilelist(gst, get, key)

        first = True
        for source in sources:
            events = EventTable.read(
                source,
                tablename='sngl_burst',
                columns=['start_time', 'start_time_ns', 'duration', 'snr'])
            #events = EventTable.read(source, tablename='sngl_burst',columns=['peak_time', 'peak_time_ns','start_time', 'start_time_ns', 'duration', 'peak_frequency', 'central_freq', 'bandwidth', 'channel', 'amplitude', 'snr', 'confidence', 'chisq', 'chisq_dof', 'param_one_name', 'param_one_value'])
            col = events.get_column('start_time')
            if first:
                if len(col) > 0:
                    mergedevents = events
                    first = False
                else:
                    pass
            else:
                mergedevents = vstack([mergedevents, events])

        for snr in snrs[key]:
            Triggered = DataQualityFlag(name="K1:" + key,
                                        known=[(gst, get)],
                                        active=[],
                                        label="Glitch",
                                        description="Glitch veto segment K1:" +
                                        key + " >= SNR" + str(snr))
            #Triggered.ifo = "K1"

            if not first:

                fevents = mergedevents.filter(
                    ('snr', mylib.Islargerequal, snr))
                durations = fevents.get_column('duration')
                start_times = fevents.get_column('start_time')
                for start_time, duration in zip(start_times, durations):
                    tmpstart = int(start_time)
                    #tmpend = start_time + duration
                    tmpend = int(start_time + 1)
                    tmpsegment = Segment(tmpstart, tmpend)

                    tmpTriggered = DataQualityFlag(known=[(gst, get)],
                                                   active=[(tmpstart, tmpend)])
                    Triggered |= tmpTriggered

                    #dqflag['K1-GRD_SCIENCE_MODE'].description = "Observation mode. K1:GRD-IFO_STATE_N == 1000"
                    #dqflag['K1-GRD_LOCKED'].name = "K1:GRD-LSC_LOCK_STATE_N >= 300 & K1:GRD-LSC_LOCK_STATE_N <= 1000"

            # write down 15 min segments.
            if txt:
                with open(filepath_txt[key + str(snr)], mode='w') as f:
                    for seg in Triggered.active:
                        f.write('{0} {1}\n'.format(int(seg[0]), int(seg[1])))

            # if accumulated file exists, it is added.
            if os.path.exists(filepath_xml[key + str(snr)]):
                tmp = DataQualityFlag.read(filepath_xml[key + str(snr)])
                Triggered = Triggered + tmp

            Triggered.write(filepath_xml[key + str(snr)], overwrite=True)
Beispiel #9
0
start_time_nss = fevents.get_column('start_time_ns')

# col can be used like array. col[0] will give first value.
            
for peak_time,peak_time_ns,duration,start_time,start_time_ns in zip(peak_times,peak_time_nss,durations,start_times,start_time_nss):

    #tmpstart=start_time-tfile
    tmpstart=start_time
    tmpstart+=start_time_ns*1e-9
    tmpend=tmpstart+duration
    tmpstart=round(tmpstart,4)
    tmpend=round(tmpend,4)
    
    tmpTriggered = DataQualityFlag(known=[(gpsstart,gpsend)],active=[(tmpstart,tmpend)])
    Triggered |= tmpTriggered

#if kamioka:
#    fname='/users/.ckozakai/KashiwaAnalysis/analysis/code/gwpy/trigger/triggerStudy/condor/'+date+'/segment/'+date+'_segment_SNR' + str(SNR) + '_' + channel + '_' + str(tfile) +'_60.xml'
#else:
#    fname='/home/chihiro.kozakai/detchar/analysis/code/gwpy/trigger/triggerStudy/condor/'+date+'/segment/'+date+'_segment_SNR' + str(SNR) + '_' + channel + '_' + str(tfile) +'_60.xml'
        
#Triggered.write(fname,overwrite=True)
Triggered.write(outfile,overwrite=True)
print(Triggered)

txt = outfile.replace('.xml','.txt')
with open(txt, mode='w') as f:
    for seg in Triggered.active :
        #f.write('{0} {1}\n'.format(int(seg[0]), int(seg[1])))
        f.write('{0} {1}\n'.format(seg[0], seg[1]))