Exemple #1
0
def open_cache(lcf):
    """Read a LAL-format cache file into memory as a
    :class:`glue.lal.Cache`.
    """
    if isinstance(lcf, file):
        return Cache.fromfile(lcf)
    else:
        with open(lcf, 'r') as f:
            return Cache.fromfile(f)
Exemple #2
0
def open_cache(lcf):
    """Read a LAL-format cache file into memory as a
    :class:`glue.lal.Cache`.
    """
    if isinstance(lcf, file):
        return Cache.fromfile(lcf)
    else:
        with open(lcf, 'r') as f:
            return Cache.fromfile(f)
Exemple #3
0
 def process(self, *args, **kwargs):
     error = None
     # read the cache files
     if isinstance(self.cache, string_types) and os.path.isfile(self.cache):
         with open(self.cache, 'r') as fobj:
             try:
                 self.cache = Cache.fromfile(fobj).sieve(
                                          segment=self.span)
             except ValueError as e:
                 if "could not convert \'\\n\' to CacheEntry" in str(e):
                     error = 'could not parse event cache file'
                 else:
                     raise
     elif isinstance(self.cache, string_types):
         error = 'could not locate event cache file'
         warn("Cache file %s not found." % self.cache)
     elif self.cache is not None and not isinstance(self.cache, Cache):
         raise ValueError("Cannot parse EventTriggerTab.cache of type %r"
                          % type(self.cache))
     # push error to all states for HTML writing
     if error:
         for state in self.states:
             self.error[state] = (
                 'danger',
                 'This analysis seems to have failed: %s.' % error)
     # only process if the cachfile was found
     if kwargs.get('trigcache', None) is None:
         kwargs['trigcache'] = self.cache
     try:
         super(EventTriggerTab, self).process(*args, **kwargs)
     except IOError as e:
         msg = "GWSumm failed to process these data.<pre>%s</pre>" % str(e)
         for state in self.states:
             self.error[state] = ('danger', msg)
Exemple #4
0
 def process(self, *args, **kwargs):
     error = None
     # read the cache files
     if isinstance(self.cache, str) and os.path.isfile(self.cache):
         with open(self.cache, 'r') as fobj:
             try:
                 self.cache = Cache.fromfile(fobj).sieve(
                                          segment=self.span)
             except ValueError as e:
                 if "could not convert \'\\n\' to CacheEntry" in str(e):
                     error = 'could not parse event cache file'
                 else:
                     raise
     elif isinstance(self.cache, str):
         error = 'could not locate event cache file'
         warn("Cache file %s not found." % self.cache)
     elif self.cache is not None and not isinstance(self.cache, Cache):
         raise ValueError("Cannot parse EventTriggerTab.cache of type %r"
                          % type(self.cache))
     # push error to all states for HTML writing
     if error:
         for state in self.states:
             self.error[state] = (
                 'danger', 'This analysis seems to have failed: %s.' % error)
     # only process if the cachfile was found
     if kwargs.get('trigcache', None) is None:
         kwargs['trigcache'] = self.cache
     try:
         super(EventTriggerTab, self).process(*args, **kwargs)
     except IOError as e:
         warn('Caught %s: %s' % (type(e).__name__, str(e)))
         msg = "GWSumm failed to process these data.<pre>%s</pre>" % str(e)
         for state in self.states:
             self.error[state] = ( 'danger', msg)
Exemple #5
0
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                        segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                        segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
Exemple #6
0
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                                             segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                                              segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
def get_omicron_triggers(channel, ifo, segments, cachefile):
  print "Reading channel: %s\n" %channel
  with open(cachefile, 'r') as f:
    mycache = Cache.fromfile(f)
  # Let's try and catch failed reads
  try:
    triggers = get_triggers(ifo + ':' + channel, 'sngl_burst', segments,\
        cache=mycache)
  except:
    print "No Omicron triggers read for channel %s" %channel
    return None

  return triggers
def load_omic_trigs(omicroncachefile, segs):
  # Read in the Omicron triggers
  with open(omicroncachefile, 'r') as cachefile:
    cache = Cache.fromfile(cachefile)

  omic_trigs = SnglBurstTable.read(cache, verbose=True, filt=lambda x: x.snr <\
      100 and x.peak_frequency < 100)

  # Check if Omicron triggers have been read in successfully
  if not omic_trigs:
    sys.exit("ERROR: No triggers for Omicron channel: %s" % cachefile.split('.')[0])
  else:
    print "%d Omicron triggers read" % len(omic_trigs)

  #Get the Omicron triggers that lie within the valid segment list
  omic_trigs = omic_trigs.vetoed(segs)
  return omic_trigs
    def run_command(self, options={}, args=[]):
        if len(args) not in [1]:
            self.parser.error("cachfile is required.")

        config = getLarsConfig()
        if not config:
            print "This analysis does not appear to have a reservation. (no %s)" % INI_NAME
            print "If a reservation has been lost, try 'lars info [--repair]'"
            print "to try to recover your '%s'" % INI_NAME
            return

        id = config.get('lars', 'id')

        cachefilename = args[0]
        cachefile = open(cachefilename, "r")

        cache = Cache.fromfile(cachefile)
        segdir = cache.to_segmentlistdict()
        extent = segdir.extent_all()
        gpsStart = int(extent[0])
        gpsEnd = int(extent[1])
        ifos = mkIfos(segdir.keys())

        duration = gpsEnd - gpsStart

        url = makeNiceUrl(os.getcwd())

        if options.dry_run:
            print "Dry run.  Results not saved"
            print "gpsStart:  ", gpsStart
            print "gpsEnd:    ", gpsEnd
            print "duration:  ", duration
            print "IFOs:      ", ifos
            print "Cachefile: ", cachefilename
            print "Location:  ", url
            return

        server = serviceProxy(config.get('lars', 'serviceUrl'))
        rv = server.publish(id, ifos, gpsStart, duration, url,
                            makeNiceUrl(cachefilename))
        rv = objectify(rv)
        print "Published:", rv.uid
Exemple #10
0
    def run_command(self, options={}, args=[]):
        if len(args) not in [1]:
            self.parser.error("cachfile is required.")

        config = getLarsConfig()
        if not config:
            print "This analysis does not appear to have a reservation. (no %s)" % INI_NAME
            print "If a reservation has been lost, try 'lars info [--repair]'"
            print "to try to recover your '%s'" % INI_NAME
            return

        id = config.get('lars','id')

        cachefilename = args[0]
        cachefile = open(cachefilename, "r")
        
        cache = Cache.fromfile(cachefile)
        segdir = cache.to_segmentlistdict()
        extent = segdir.extent_all()
        gpsStart = int(extent[0])
        gpsEnd = int(extent[1])
        ifos = mkIfos(segdir.keys())

        duration = gpsEnd - gpsStart

        url = makeNiceUrl(os.getcwd())

        if options.dry_run:
            print "Dry run.  Results not saved"
            print "gpsStart:  ", gpsStart
            print "gpsEnd:    ", gpsEnd
            print "duration:  ", duration
            print "IFOs:      ", ifos
            print "Cachefile: ", cachefilename
            print "Location:  ", url
            return

        server = serviceProxy(config.get('lars', 'serviceUrl'))
        rv = server.publish(id, ifos, gpsStart, duration, url, makeNiceUrl(cachefilename))
        rv = objectify(rv)
        print "Published:", rv.uid
Exemple #11
0
def hoge(gps):
    if np.isnan(gps):
        return None
    from gwpy.time import tconvert
    #print tconvert(gps)
    from gwpy.timeseries import TimeSeries
    from glue.lal import Cache
    channel = 'K1:PEM-EXV_SEIS_Z_SENSINF_OUT_DQ'
    start = gps #- 5*60
    end = gps + 30*60
    gwf_cache = 'full_Sep01-Nov01.cache'
    with open(gwf_cache, 'r') as fobj:
        cache = Cache.fromfile(fobj)
    #print cache
    data = TimeSeries.read(cache,channel,start=start,end=end,verbose=True,nproc=2,pad=np.nan,format='gwf.lalframe')    
    plot = data.plot(
        title = 'hoge'
        #ylabel='Strain amplitude',
    )
    plot.savefig('{0}.png'.format(gps))
    plot.close()
Exemple #12
0
def find_daily_cache(start,
                     end,
                     ifo,
                     clustering=None,
                     check_files=False,
                     **kwargs):
    """Find Daily ihope files from the daily runs for the given span

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param ifo
        observatory for search
    @param clustering
        tag for clustering stage to search, default: unclustered
    @param check_files
        check that the returned files can be read on disk, default False
    @param kwargs UNDOCUMENTED
    """
    out = Cache()

    # set clustering tag
    if clustering == None or clustering.upper() == 'UNCLUSTERED':
        file_tag = 'INSPIRAL_UNCLUSTERED'
    elif clustering.upper() in ["100MS", "100MILLISEC"]:
        file_tag = 'INSPIRAL_100MILLISEC_CLUSTERED'
    elif clustering.upper() in ["30MS", "30MILLISEC"]:
        file_tag = 'INSPIRAL_30MILLISEC_CLUSTERED'
    elif clustering.upper() in ["16S", "16SECOND"]:
        file_tag = 'INSPIRAL_16SEC_CLUSTERED'

    # set base directory
    directory = kwargs.pop("directory", os.path.expanduser("~cbc/ihope_daily"))

    # work out days
    span = Segment(start, end)
    start = int(start)
    start_d = lal.UTCToGPS(
        datetime(*lal.GPSToUTC(start)[:6]).replace(hour=0, minute=0,
                                                   second=0).timetuple())
    days = []
    day = start_d
    while day <= end:
        days.append(day)
        day += 86400

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    pjoin = os.path.join
    intersects = span.intersects
    from_T050017 = CacheEntry.from_T050017

    # loop over days gathering files
    for day in days:
        utc = datetime(*lal.GPSToUTC(day)[:6])
        day_path = pjoin(directory, utc.strftime("%Y%m"),
                         utc.strftime("%Y%m%d"))
        day_cache = os.path.join(day_path, "%s-%s.cache" % (ifo, file_tag))
        if isfile(day_cache):
            with open(day_cache, "r") as f:
                filenames = Cache.fromfile(f).pfnlist()
        else:
            filenames = glob(
                os.path.join(day_path, ("%s-%s-*.xml.gz" % (ifo, file_tag))))
        for filename in filenames:
            e = from_T050017(filename)
            if intersects(e.segment):
                append(e)

    out.sort(key=lambda e: e.path)
    return out
def find_daily_cache(start, end, ifo, clustering=None, check_files=False,
                     **kwargs):
    """Find Daily ihope files from the daily runs for the given span

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param ifo
        observatory for search
    @param clustering
        tag for clustering stage to search, default: unclustered
    @param check_files
        check that the returned files can be read on disk, default False
    @param kwargs UNDOCUMENTED
    """
    out = Cache()

    # set clustering tag
    if clustering==None or clustering.upper()=='UNCLUSTERED':
        file_tag='INSPIRAL_UNCLUSTERED'
    elif clustering.upper() in ["100MS", "100MILLISEC"]:
        file_tag='INSPIRAL_100MILLISEC_CLUSTERED'
    elif clustering.upper() in ["30MS", "30MILLISEC"]:
        file_tag='INSPIRAL_30MILLISEC_CLUSTERED'
    elif clustering.upper() in ["16S", "16SECOND"]:
        file_tag='INSPIRAL_16SEC_CLUSTERED'

    # set base directory
    directory = kwargs.pop("directory", os.path.expanduser("~cbc/ihope_daily"))

    # work out days
    span = Segment(start, end)
    start = int(start)
    start_d = lal.UTCToGPS(datetime(*lal.GPSToUTC(start)[:6]).replace(
                                       hour=0, minute=0, second=0).timetuple())
    days = []
    day = start_d
    while day <= end:
        days.append(day)
        day+=86400

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    pjoin = os.path.join
    intersects = span.intersects
    from_T050017 = CacheEntry.from_T050017

    # loop over days gathering files
    for day in days:
        utc = datetime(*lal.GPSToUTC(day)[:6])
        day_path = pjoin(directory, utc.strftime("%Y%m"),
                         utc.strftime("%Y%m%d"))
        day_cache = os.path.join(day_path, "%s-%s.cache" % (ifo, file_tag))
        if isfile(day_cache):
            with open(day_cache, "r") as f:
                filenames = Cache.fromfile(f).pfnlist()
        else:
            filenames = glob(os.path.join(day_path,
                                               ("%s-%s-*.xml.gz"
                                                % (ifo, file_tag))))
        for filename in filenames:
            e = from_T050017(filename)
            if intersects(e.segment):
                append(e)

    out.sort(key=lambda e: e.path)
    return out
########## apply segment list (of good times)

segments = SegmentList.read('L1_ER7_segments.txt')

for cachefile in cachelist:
  ### open trigger cache
  # Make a tag for the saving the plot as well as the title
  # The tag is the name of the channel extracted from the path
  tag = cachefile.split('/')[-1]
  tag = tag.split('.')[0]
  tag = tag.replace('_Omicron','')
 
  print ('\n\nReading file: %s now ...\n' % tag)
  with open(cachefile, 'r') as fobj:
      cache = Cache.fromfile(fobj)


  ### read triggers
  # filter to select for triggers with frequency < 100
  #trigs = SnglBurstTable.read(cache, verbose=True, filt=lambda t: t.peak_frequency < 100)

  #filter to select for triggers with frequency <100 and snr <100
  trigs = get_triggers('L1:'+tag, 'sngl_burst', segments, cache=cache)


  ### check triggers read successfully
  if not trigs:
    print("    WARNING: No triggers for channel '%s'." % channel,
                file=sys.stderr)
  else:
#
conditions = map(parse_specification, opts.specifier)

#
# Put the conditions together
#
channel_cond = defaultdict(list)
for inst, channel_name, op, threshold in conditions:
	channel = "%s:%s" % (inst, channel_name)
	channel_cond[channel].append((op, threshold))

#
# Read the datas and such
#
ifos = list(set([c[:2] for c in channel_cond.keys()]))
cache = Cache.fromfile(open(opts.frame_cache))
seg = cache.to_segmentlistdict()[ifos[0][0]][0]
if opts.verbose:
	print "Loaded %s, total coverage time: %f" % (opts.frame_cache, abs(seg))

#
# Set up the XML document
#
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
# Append the process information
procrow = utils.process.append_process(xmldoc, program=sys.argv[0])
utils.process.append_process_params(xmldoc, procrow, process.process_params_from_dict(opts.__dict__))

#
# Segment storage
Exemple #16
0
def mkSegment(gst, get, utc_date, txt=True) :

    chGRDLSC = 'K1:GRD-LSC_LOCK_STATE_N'
    chGRDIFO = 'K1:GRD-IFO_STATE_N'
    chGRDEQ = 'K1:GRD-PEM_EARTHQUAKE_STATE_N'
    chOMCADC    = 'K1:FEC-32_ADC_OVERFLOW_0_0'

    channels = [chGRDLSC,chGRDIFO,chGRDEQ,chOMCADC]
    
    if getpass.getuser() == "controls":
        gwf_cache = '/users/DET/Cache/latest.cache'
        with open(gwf_cache, 'r') as fobj:
            cache = Cache.fromfile(fobj)
    else:
        # add 1sec margin for locked segments contract.
        cache = GetFilelist(gst-1, get+1)

    #------------------------------------------------------------

    #print('Reading {0} timeseries data...'.format(date))
    # add 1sec margin for locked segments contract.
    channeldata = TimeSeriesDict.read(cache, channels, start=gst-1, end=get+1, format='gwf.lalframe', gap='pad')
    channeldataGRDIFO = channeldata[chGRDIFO]
    channeldataGRDLSC = channeldata[chGRDLSC]
    channeldataGRDEQ = channeldata[chGRDEQ]
    channeldataOMCADC = channeldata[chOMCADC]

    sv={}
    sv['K1-GRD_SCIENCE_MODE'] = channeldataGRDIFO == 1000 
    # Locked will be defined by inverse of unlocked segments for technical reason.    
    #sv['K1-GRD_LOCKED'] = channeldataGRDLSC == 1000 
    sv['K1-GRD_UNLOCKED'] = channeldataGRDLSC != 1000
    sv['K1-GRD_PEM_EARTHQUAKE'] = channeldataGRDEQ == 1000
    sv['K1-OMC_OVERFLOW_VETO'] = channeldataOMCADC != 0
    # OMC_OVERFLOW_OK will be defined by inverse of veto segments for technical reason.
    #sv['K1-OMC_OVERFLOW_OK'] = channeldataOMCADC == 0


    dqflag = {}
    for key in keys:
        if key == 'K1-GRD_LOCKED' or key == 'K1-OMC_OVERFLOW_OK':
            continue
        dqflag[key] = sv[key].to_dqflag(round=True)

    # To omit fraction. round=True option is inclusive in default.         

    dqflag['K1-GRD_SCIENCE_MODE'].active = dqflag['K1-GRD_SCIENCE_MODE'].active.contract(1.0)

    dqflag['K1-GRD_LOCKED'] = ~dqflag['K1-GRD_UNLOCKED']
    dqflag['K1-GRD_LOCKED'].name = "K1:GRD-LSC_LOCK_STATE_N == 1000"

    dqflag['K1-OMC_OVERFLOW_OK'] = ~dqflag['K1-OMC_OVERFLOW_VETO']
    dqflag['K1-OMC_OVERFLOW_OK'].name = "K1:FEC-32_ADC_OVERFLOW_0_0 == 0"
    
    dqflag['K1-GRD_SCIENCE_MODE'].description = "Observation mode. K1:GRD-IFO_STATE_N == 1000"
    dqflag['K1-GRD_UNLOCKED'].description = "Interferometer is not locked. K1:GRD-LSC_LOCK_STATE_N != 1000"
    dqflag['K1-GRD_LOCKED'].description = "Interferometer is locked. K1:GRD-LSC_LOCK_STATE_N == 1000"
    dqflag['K1-OMC_OVERFLOW_VETO'].description = "OMC overflow happened. K1:FEC-32_ADC_OVERFLOW_0_0 != 0"
    dqflag['K1-OMC_OVERFLOW_OK'].description = "OMC overflow does not happened. K1:FEC-32_ADC_OVERFLOW_0_0 == 0"

    for key in keys:

        # added 1sec margin for locked segments contract is removed.
        margin = DataQualityFlag(known=[(gst,get)],active=[(gst-1,gst),(get,get+1)])
        dqflag[key] -= margin

        # write down 15 min segments. 
        if txt:
            with open(filepath_txt[key], mode='w') as f:
                for seg in dqflag[key].active :
                    f.write('{0} {1}\n'.format(int(seg[0]), int(seg[1])))
        
        # if accumulated file exists, it is added. 
        if os.path.exists(filepath_xml[key]):
            tmp = DataQualityFlag.read(filepath_xml[key])        
            dqflag[key] = dqflag[key] + tmp

        dqflag[key].write(filepath_xml[key],overwrite=True)
Exemple #17
0
def CouplingArrange(cache_drc,
                    ach,
                    bch,
                    s,
                    d,
                    a=None,
                    df=None,
                    f0=None,
                    f1=None):

    ## [1] cache_drc   :: Enter Cache file direction
    ## [2] ach         :: Enter first channel name to compare
    ## [3] bch         :: Enter second channel name to compare
    ## [4] s           :: Enter start GPS time
    ## [5] d           :: Enter time from star GPS time
    ## [6] a           :: Enter Averaging Number, CAN NOT be used with -df at the same time
    ## [7] df          :: Enter Frequency interval, df, CAN NOT be used with -a at the same time.
    ## [8] f0          :: Enter start frequency to obtain amplitude value list
    ## [9] f1          :: Enter end frequency to obtain amplitude value list

    import sys
    import os
    import numpy as np
    from scipy.interpolate import interp1d

    from gwpy.timeseries import TimeSeries

    from glue.lal import Cache
    from gwpy.segments import Segment

    ########### List of times to want to calculate ###########

    gst = s  # GPS start time
    get = gst + d  # GPS end time
    dur = d  # Time duration

    ach = str(ach)  # Slected A Channel
    bch = str(bch)  # Slected B Chennel

    avg = a  # Slected Averaging Number
    df = df  # Slected Frequency Interval

    if avg == None and df == None:
        avg = 100
        bvg = float(dur / avg)

    elif avg == 0 and df == None:
        avg = 1
        bvg = float(dur / avg)

    elif avg != None and df == None:
        avg = int(avg)
        bvg = float(dur / avg)

    elif avg == None and df != None:
        df = float(df)
        avg = int(df * dur)
        bvg = float(1 / df)

    else:
        avg = 1
        bvg = float(dur / avg)

    ########### Reading Cache ###########

    gwf_cache = cache_drc

    with open(gwf_cache, 'r') as fobj:
        cache = Cache.fromfile(fobj)

    ########### Reaiding TimeSeries Data ###########

    data1 = TimeSeries.read(cache, ach, gst, get, format='lalframe')
    data2 = TimeSeries.read(cache, bch, gst, get, format='lalframe')

    ########### TimeSeries Data Averaging Process ###########

    data1_psd_seg = TimeSeries.read(cache,
                                    ach,
                                    gst,
                                    gst + (bvg),
                                    format='lalframe').psd()**(1 / 2.)
    for n in range(avg):
        if gst + (bvg * (n + 2)) > gst + (bvg *
                                          (n + 1)) and gst + (bvg *
                                                              (n + 2)) < get:
            data1_seg = TimeSeries.read(cache,
                                        ach,
                                        gst + (bvg * (n + 1)),
                                        gst + (bvg * (n + 2)),
                                        format='lalframe')
            data1_psd_seg += data1_seg.psd()**(1 / 2.)
        else:
            pass

    data1_psd = data1_psd_seg / int(avg)

    data2_psd_seg = TimeSeries.read(cache,
                                    bch,
                                    gst,
                                    gst + (bvg),
                                    format='lalframe').psd()**(1 / 2.)
    for n in range(avg):
        if gst + (bvg * (n + 2)) > gst + (bvg *
                                          (n + 1)) and gst + (bvg *
                                                              (n + 2)) < get:
            data2_seg = TimeSeries.read(cache,
                                        bch,
                                        gst + (bvg * (n + 1)),
                                        gst + (bvg * (n + 2)),
                                        format='lalframe')
            data2_psd_seg += data2_seg.psd()**(1 / 2.)
        else:
            pass

    data2_psd = data2_psd_seg / int(avg)

    ########### Calculate PSD Ratio of two channels using Interpolation process ###########

    data1_df = float(str(data1_psd.df)[:-2])
    data2_df = float(str(data2_psd.df)[:-2])

    data1_psd_yarr = np.array(data1_psd)
    data1_psd_xarr = np.linspace(data1_psd.xspan[0], data1_psd.xspan[-1],
                                 len(data1_psd_yarr))

    data2_psd_yarr = np.array(data2_psd)
    data2_psd_xarr = np.linspace(data2_psd.xspan[0], data2_psd.xspan[-1],
                                 len(data2_psd_yarr))

    g1 = interp1d(data1_psd_xarr, data1_psd_yarr)
    g2 = interp1d(data2_psd_xarr, data2_psd_yarr)

    h_xlist = []
    h_ylist = []

    if len(data1_psd_xarr) >= len(data2_psd_xarr):
        for a in range((len(data2_psd_xarr))):
            h_xlist.append(a * data2_df)
            h_ylist.append(float(g1(a * data2_df)) / float(g2(a * data2_df)))
        hx = np.array(h_xlist)
        hy = np.array(h_ylist)

    else:
        for b in range((len(data1_psd_xarr))):
            h_xlist.append(b * data1_df)
            h_ylist.append(float(g1(b * data1_df)) / float(g2(b * data1_df)))
        hx = np.array(h_xlist)
        hy = np.array(h_ylist)

    h = interp1d(hx, hy)

    ############ Find amplitude for selected frequency ############

    freq0 = f0
    freq1 = f1

    if freq0 != None and freq1 != None:
        amp_list = []
        amp_list_x = []
        amp_list_y = []
        for f in range(len(h_xlist)):
            if h_xlist[f] <= freq1 and freq0 <= h_xlist[f]:
                amp_list_x.append(h_xlist[f])
                amp_list_y.append(h(h_xlist[f]))
        amp_list.append(amp_list_x)
        amp_list.append(amp_list_y)

        return amp_list

    else:
        amp_list = []
        amp_list.append(h_xlist)
        amp_list.append(h_ylist)

        return amp_list
Exemple #18
0
def main(args=None):
    """Run the GWSumm command-line interface
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    if args.debug:
        warnings.simplefilter('error', DeprecationWarning)

    # set verbose output options
    globalv.VERBOSE = args.verbose

    # find all config files
    args.config_file = [
        os.path.expanduser(fp) for csv in args.config_file
        for fp in csv.split(',')
    ]

    # check segdb option
    if args.on_segdb_error not in ['raise', 'warn', 'ignore']:
        parser.error("Invalid option --on-segdb-error='%s'" %
                     args.on_segdb_error)

    # read configuration file
    config = GWSummConfigParser()
    config.optionxform = str
    if args.ifo:
        config.set_ifo_options(args.ifo, section=DEFAULTSECT)
    config.set(DEFAULTSECT, 'user', getpass.getuser())
    config.read(args.config_file)

    try:
        ifo = config.get(DEFAULTSECT, 'IFO')
    except NoOptionError:
        ifo = None
    finally:
        globalv.IFO = ifo

    # interpolate section names
    interp = {}
    if ifo:
        interp['ifo'] = ifo.lower()
        interp['IFO'] = ifo.title()
    config.interpolate_section_names(**interp)

    # double-check week mode matches calendar setting
    if args.mode == 'week':
        if config.has_option("calendar", "start-of-week"):
            weekday = getattr(calendar,
                              config.get("calendar", "start-of-week").upper())
            if weekday != args.week.timetuple().tm_wday:
                msg = ("Cannot process week starting on %s. The "
                       "'start-of-week' option in the [calendar] section "
                       "of the INI file specifies weeks start on %ss." %
                       (args.week.strftime('%Y%m%d'),
                        config.get("calendar", "start-of-week")))
                raise parser.error(msg)

    # record times in ConfigParser
    config.set_date_options(args.gpsstart, args.gpsend, section=DEFAULTSECT)

    # convert times for convenience
    span = Segment(args.gpsstart, args.gpsend)
    utc = tconvert(args.gpsstart)
    starttime = Time(float(args.gpsstart), format='gps')
    endtime = Time(float(args.gpsend), format='gps')

    # set mode and output directory
    mode.set_mode(args.mode)
    try:
        path = mode.get_base(utc)
    except ValueError:
        path = os.path.join('%d-%d' % (args.gpsstart, args.gpsend))

    # set LAL FFT plan wisdom level
    duration = min(globalv.NOW, args.gpsend) - args.gpsstart
    if duration > 200000:
        fft_lal.LAL_FFTPLAN_LEVEL = 3
    elif duration > 40000:
        fft_lal.LAL_FFTPLAN_LEVEL = 2
    else:
        fft_lal.LAL_FFTPLAN_LEVEL = 1

    # set global html only flag
    if args.html_only:
        globalv.HTMLONLY = True

    # build directories
    mkdir(args.output_dir)
    os.chdir(args.output_dir)
    plotdir = os.path.join(path, 'plots')
    mkdir(plotdir)

    # -- setup --------------------------------------

    LOGGER.info(" -- GW interferometer summary information system -- ")
    LOGGER.debug("This is process {}".format(os.getpid()))
    LOGGER.debug("You have selected {} mode".format(mode.get_mode().name))
    LOGGER.debug("Start time: {0} ({1})".format(starttime.utc.iso,
                                                starttime.gps))
    LOGGER.debug("End time: {0} ({1})".format(endtime.utc.iso, endtime.gps))
    LOGGER.debug("Output directory: {}".format(
        os.path.abspath(os.path.join(args.output_dir, path))))

    # -- Finalise configuration
    LOGGER.info("Loading configuration")
    plugins = config.load_plugins()
    if plugins:
        LOGGER.debug(" -- Loaded {} plugins:".format(len(plugins)))
        for mod in plugins:
            LOGGER.debug("        %s" % mod)
    units = config.load_units()
    LOGGER.debug("    Loaded %d units" % len(units))
    channels = config.load_channels()
    LOGGER.debug("    Loaded %d channels" % len(channels))
    states = config.load_states()
    LOGGER.debug("    Loaded %d states" % len(states))
    rcp = config.load_rcParams()
    LOGGER.debug("    Loaded %d rcParams" % len(rcp))

    # read list of tabs
    tablist = TabList.from_ini(config,
                               match=args.process_tab,
                               path=path,
                               plotdir=plotdir)
    tablist.sort(reverse=True)
    tabs = sorted(tablist.get_hierarchy(), key=tablist._sortkey)
    LOGGER.info("    Loaded %d tabs [%d parents overall]" %
                (len(tablist), len(tabs)))

    # read caches
    cache = {}
    for (key,
         var) in zip(['datacache', 'trigcache', 'segmentcache'],
                     [args.data_cache, args.event_cache, args.segment_cache]):
        if var:
            LOGGER.info("Reading %s from %d files... " % (key, len(var)))
            cache[key] = Cache()
            for fp in var:
                with open(fp, 'r') as f:
                    cache[key].extend(Cache.fromfile(f))
            cache[key] = cache[key].sieve(segment=span)
            LOGGER.debug("Done [%d entries]" % len(cache[key]))

    # -- read archive -------------------------------

    if not hasattr(args, 'archive'):
        args.archive = False

    if args.html_only:
        args.archive = False
        args.daily_archive = False
    elif args.archive is True:
        args.archive = 'GW_SUMMARY_ARCHIVE'

    archives = []

    if args.archive:
        archivedir = os.path.join(path, 'archive')
        mkdir(archivedir)
        args.archive = os.path.join(
            archivedir, '%s-%s-%d-%d.h5' %
            (ifo, args.archive, args.gpsstart, args.gpsend - args.gpsstart))
        if os.path.isfile(args.archive):
            archives.append(args.archive)
        else:
            LOGGER.debug(
                "No archive found in %s, one will be created at the end" %
                args.archive)

    # read daily archive for week/month/... mode
    if hasattr(args, 'daily_archive') and args.daily_archive:
        # find daily archive files
        archives.extend(
            archive.find_daily_archives(args.gpsstart, args.gpsend, ifo,
                                        args.daily_archive, archivedir))
        # then don't read any actual data
        cache['datacache'] = Cache()

    for arch in archives:
        LOGGER.info("Reading archived data from %s" % arch)
        archive.read_data_archive(arch)
        LOGGER.debug("Archive data loaded")

    # -- read HTML configuration --------------------

    css = config.get_css(section='html')
    javascript = config.get_javascript(section='html')

    # enable comments
    try:
        globalv.HTML_COMMENTS_NAME = config.get('html', 'disqus-shortname')
    except (NoOptionError, NoSectionError):
        pass

    # find new ifo bases
    ifobases = {}
    try:
        bases_ = config.nditems('html')
    except NoSectionError:
        pass
    else:
        base_reg = re.compile(r'-base\Z')
        for key, val in bases_:
            if base_reg.search(key):
                ifobases[key.rsplit('-', 1)[0]] = val
    ifobases = OrderedDict(sorted(ifobases.items(), key=lambda x: x[0]))

    # -- write auxiliary pages ----------------------

    # get URL from output directory
    if 'public_html' in os.getcwd():
        urlbase = os.path.sep + os.path.join(
            '~%s' % config.get(DEFAULTSECT, 'user'),
            os.getcwd().split('public_html', 1)[1][1:])
        base = urlbase
    # otherwise get URL from html config
    elif ifo in ifobases:
        urlbase = urlparse(ifobases[ifo]).path
        base = urlbase
    # otherwise let the write_html processor work it out on-the-fly
    else:
        urlbase = None
        base = None

    # get link to issues report page
    try:
        issues = config.get('html', 'issues')
    except KeyError:
        issues = True

    # write 404 error page
    if not args.no_htaccess and not args.no_html and urlbase:
        top = os.path.join(urlbase, path)
        four0four = get_tab('404')(span=span,
                                   parent=None,
                                   path=path,
                                   index=os.path.join(path, '404.html'))
        four0four.write_html(css=css,
                             js=javascript,
                             tabs=tabs,
                             ifo=ifo,
                             ifomap=ifobases,
                             top=top,
                             base=base,
                             writedata=not args.html_only,
                             writehtml=not args.no_html,
                             issues=issues)
        url404 = os.path.join(urlbase, four0four.index)
        with open(os.path.join(path, '.htaccess'), 'w') as htaccess:
            print('Options -Indexes', file=htaccess)
            print('ErrorDocument 404 %s' % url404, file=htaccess)
            print('ErrorDocument 403 %s' % url404, file=htaccess)

    # write config page
    about = get_tab('about')(span=span, parent=None, path=path)
    if not args.no_html:
        mkdir(about.path)
        about.write_html(css=css,
                         js=javascript,
                         tabs=tabs,
                         config=config.files,
                         prog=PROG,
                         ifo=ifo,
                         ifomap=ifobases,
                         about=about.index,
                         base=base,
                         issues=issues,
                         writedata=not args.html_only,
                         writehtml=not args.no_html)

    # -- read bulk data -----------------------------

    # XXX: bulk data reading could optimise things
    # XXX: but has never been used, so should remove (DMM 18/01/16)
    if args.bulk_read and not args.html_only:
        LOGGER.info("Reading all data in BULK")
        allts = set()
        allsv = set()
        allflags = set()
        for tab in tablist:
            snames = []
            for state in tab.states:
                snames.append(state.name)
                if state.definition:
                    allflags.update(re_flagdiv.split(state.definition))
            # get all data defined for the 'all' state
            if ALLSTATE in snames:
                allts.update(
                    tab.get_channels('timeseries', 'spectrogram', 'spectrum',
                                     'histogram'))
                allsv.update(tab.get_channels('statevector'))
                allflags.update(tab.get_flags('segments'))
            # or get data for plots defined over all states
            else:
                for plot in tab.plots:
                    if plot.state is not None:
                        continue
                    if plot.type in [
                            'timeseries', 'spectrogram', 'spectrum',
                            'histogram'
                    ]:
                        allts.update(plot.channels)
                    elif plot.type in ['statevector']:
                        allsv.update(plot.channels)
                    elif plot.type in ['segments']:
                        allflags.update([
                            f for cflag in plot.flags
                            for f in re_flagdiv.split(cflag)[::2] if f
                        ])
        allseg = SegmentList([span])
        if len(allflags):
            LOGGER.info(
                "%d data-quality flags identified for segment query from all "
                "tabs" % len(allflags))
            get_segments(allflags, allseg, config=config, return_=False)
        if len(allts):
            LOGGER.info("%d channels identified for TimeSeries from all tabs" %
                        len(allts))
            get_timeseries_dict(allts,
                                allseg,
                                config=config,
                                nds=args.nds,
                                nproc=args.multiprocess,
                                return_=False)
        if len(allsv):
            LOGGER.info(
                "%d channels identified for StateVector from all tabs" %
                len(allsv))
            get_timeseries_dict(allsv,
                                allseg,
                                config=config,
                                nds=args.nds,
                                statevector=True,
                                nproc=args.multiprocess,
                                return_=False)

    # -- process all tabs ---------------------------

    # TODO: consider re-working this loop as TabList.process_all

    for tab in tablist:
        if tab.parent:
            name = '%s/%s' % (tab.parent.name, tab.name)
        else:
            name = tab.name
        if not args.html_only and isinstance(tab, get_tab('_processed')):
            LOGGER.debug("Processing %s" % name)
            tab.process(config=config,
                        nds=args.nds,
                        nproc=args.multiprocess,
                        segdb_error=args.on_segdb_error,
                        datafind_error=args.on_datafind_error,
                        **cache)
        if not tab.hidden and not isinstance(tab, get_tab('link')):
            mkdir(tab.href)
            tab.write_html(css=css,
                           js=javascript,
                           tabs=tabs,
                           ifo=ifo,
                           ifomap=ifobases,
                           about=about.index,
                           base=base,
                           issues=issues,
                           writedata=not args.html_only,
                           writehtml=not args.no_html)

        # archive this tab
        if args.archive:
            LOGGER.info("Writing data to archive")
            archive.write_data_archive(args.archive)
            LOGGER.debug("Archive written to {}".format(
                os.path.abspath(args.archive)))
        LOGGER.debug("%s complete" % (name))

    LOGGER.info("-- Data products written, all done --")
def process_options():
	"""
	Process options and check for required values.
	"""
	opt = OptionParser()
	opt.add_option( "-c", "--input-cache", help="Read triggers from the files in this cache." )
	opt.add_option( "-v", "--verbose", action="store_true", help="Be verbose." )
	veto_settings = OptionGroup( opt, "HVeto settings" )
	veto_settings.add_option( "-i", "--instrument", help="Instrument against which to veto. Required." )
	veto_settings.add_option( "-r", "--reference-channel", help="Channel against which to veto. Required." )
	veto_settings.add_option( "-t", "--reference-triggers", help="File path to load reference triggers. Required." )
	veto_settings.add_option( "-s", "--significance-threshold", type=float, default=15, help="Significance below which to terminate the rounds. Default is 15." )
	veto_settings.add_option( "--snr-thresh", action="append", help="Add an SNR threshold to use in veto round. Can be given multiple times for different values. WARNING: This will override the default settings, *not* append to them." )
	veto_settings.add_option( "--time-window", action="append", help="Add a time window to use in veto round. Can be given multiple times for different values. WARNING: This will override the default settings, *not* append to them." )
	veto_settings.add_option( "-S", "--min-ref-snr", type=float, default=8, help="Minimum SNR threshold to load a trigger in the reference channel." )
	# FIXME: Strictly speaking the ignore list is required because I'm not
	# sure what the	 function will do with out one?
	veto_settings.add_option( "-I", "--ignore-list", help="Text file, one channel per line with a list of channels to ignore when loading triggers." )
	# FIXME:
	#veto_settings.add_option( "-C", "--ignore-channel", action="append", help="Ignore these channels. Given several times, will ignore several channels. Do not prepend instrument. E.g. -C LSC-DARM_CTRL." )
	veto_settings.add_option( "--write-coinc", action="store_true", default=False, help="If set, output table will include coinc tables indicating which triggers were coincided in the process of execution." )
	opt.add_option_group( veto_settings )

	livetime_settings = OptionGroup( opt, "livetime settings" )
	# FIXME:
	#livetime_settings.add_option( "-L", "--livetime-definer", action="append", help="Name of segment definer entry from which to draw live segments. See '-l' option. If none is indicated, use all segments. Provide several times for deveral different segment definers." )
	livetime_settings.add_option( "-l", "--livetime-segments", help="File from which to parse livetime segments. Will assume, at first, a LIGOLW XML file with valid segment definer and segment tables. If this fails, will try segwizard format. Required." )
	livetime_settings.add_option( "--segment-definer", help="In tandem with --livetime-segments will retrieve segments with this definer. If none is provided, all segments will be used. Note: this option is REQUIRED if querying a databse. Example: H1:DMT-SCIENCE:1 (version is required)" )
	livetime_settings.add_option( "--segment-database", help="Query this URL for segments. Takes precedence over providing a file." )
	livetime_settings.add_option( "--gps-start", type=int, help="GPS start of analysis." )
	livetime_settings.add_option( "--gps-end", type=int, help="GPS end of analysis." )
	opt.add_option_group( livetime_settings )

	opts, args = opt.parse_args()
	if opts.instrument is None:
		print >>sys.stderr, "Instrument must be indicated."
		exit()
	if opts.reference_channel is None:
		print >>sys.stderr, "Reference channel must be indicated."
		exit()
	if opts.reference_triggers is None:
		print >>sys.stderr, "Reference triggers must be present."
		exit()
	if (opts.livetime_segments or opts.segment_database) is None:
		print >>sys.stderr, "Must provide livetime segments file or segment database location."
		exit()
	if opts.segment_database and (opts.segment_definer is None):
		print >>sys.stderr, "Must provide definer for segment database querying."
		exit()
	if len(args) == 0 and opts.input_cache is None:
		print >>sys.stderr, "Must provide input arguments or set --input-cache."
		exit()
	if opts.input_cache is not None:
		with open(opts.input_cache) as cache:
			c = Cache.fromfile(cache)
			args.extend( c.pfnlist() )
	if opts.ignore_list is None:
		print >>sys.stderr, "Must provide a channel ignore list."
		exit()

	return opts, args
Exemple #20
0
def main(args=None):
    """Run the online trigger visualization tool
    """
    parser = create_parser()
    args = parser.parse_args(args=args)

    args.epoch = args.epoch or args.gpsstart
    args.columns = args.columns or DEFAULT_COLUMNS
    if len(args.columns) < 2:
        parser.error("--columns must receive at least two columns, "
                     "got {0}".format(len(args.columns)))
    args.x_column = args.x_column or args.columns[0]
    args.y_column = args.y_column or args.columns[1]
    if not args.color and len(args.columns) >= 3:
        args.color = args.columns[2]

    # add columns for tile plot
    for c in ('duration', 'bandwidth'):
        if args.tiles and c not in args.columns:
            args.columns.append(c)

    span = Segment(args.gpsstart, args.gpsend)

    # format default params
    params = {
        'xscale': 'auto-gps',
        'epoch': args.epoch or args.gpsstart,
        'xlim': (args.gpsstart, args.gpsend),
        'yscale': 'log',
        'ylabel': 'Frequency [Hz]',
        'cmap': get_plot('triggers').defaults.get('cmap', 'YlGnBu'),
        'clim': (3, 50),
        'colorlabel': 'Signal-to-noise ratio (SNR)',
    }

    # update with user params
    for input_ in args.plot_params or []:
        key, val = input_.split('=', 1)
        params[key.strip('-')] = safe_eval(val)

    # -- load triggers ------------------------------

    # get segments
    if args.state:
        segs = get_segments(args.state, [span])

    # read cache
    if args.cache_file:
        with open(args.cache_file, 'r') as f:
            cache = Cache.fromfile(f).sieve(segment=span)
        LOGGER.info("Read cache of {0} files".format(len(cache)))
    else:
        cache = None

    # get triggers
    trigs = get_triggers(
        args.channel,
        args.etg, [span],
        cache=cache,
        columns=args.columns,
        verbose='Reading {0.channel} ({0.etg}) events'.format(args))
    LOGGER.info("Read {0} events".format(len(trigs)))
    if args.state:
        trigs = keep_in_segments(trigs, segs.active, etg=args.etg)
        LOGGER.info("{0} events in state {1!r}".format(len(trigs), args.state))
    if args.snr:
        trigs = trigs[trigs['snr'] >= args.snr]
        LOGGER.info("{0} events remaining with snr >= {1.snr}".format(
            len(trigs), args))

    # -- make plot ----------------------------------

    # format keywords for plot creation
    plot_kw = OrderedDict(
        (key, params.pop(key))
        for key in ('xscale', 'xlim', 'epoch', 'yscale', 'ylabel'))

    # create plot
    if args.tiles:
        plot = trigs.tile(args.x_column,
                          args.y_column,
                          'duration',
                          'bandwidth',
                          color=args.color,
                          edgecolor=params.pop('edgecolor', 'face'),
                          linewidth=params.pop('linewidth', .8),
                          **plot_kw)
    else:
        plot = trigs.scatter(args.x_column,
                             args.y_column,
                             color=args.color,
                             edgecolor=params.pop('edgecolor', 'none'),
                             s=params.pop('s', 12),
                             **plot_kw)
    ax = plot.gca()
    mappable = ax.collections[0]

    # set mappable properties
    vmin, vmax = params.pop('clim', (3, 50))
    if params.pop('logcolor', True):
        mappable.set_norm(LogNorm(vmin=vmin, vmax=vmax))
    if mappable._A is None:
        mappable._A = ndarray((0, ))

    # draw colorbar
    clabel = params.pop(
        'colorlabel',
        'Signal-to-noise ratio (SNR)',
    )
    cmap = params.pop(
        'cmap',
        get_plot('triggers').defaults.get('cmap', 'YlGnBu'),
    )
    ax.colorbar(mappable=mappable, label=clabel, cmap=cmap, clim=(vmin, vmax))

    for key, val in params.items():
        try:
            getattr(ax, 'set_%s' % key)(val)
        except AttributeError:
            setattr(ax, key, val)

    # add segments
    if args.state:
        sax = plot.add_segments_bar(segs,
                                    ax=ax,
                                    label=args.state_label or args.state)
        setp(sax.get_yticklabels(), fontsize=10)
        sax.set_epoch(args.epoch)

    # save and exit
    plot.save(args.output_file)
    LOGGER.info("Plot saved to {0.output_file}".format(args))