Ejemplo n.º 1
0
def find_dmt_omega(channel, start, end, base=None):
    """Find DMT-Omega trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        base = '/gds-%s/dmt/triggers/%s-HOFT_Omega' % (
            ifo.lower(), ifo[0].upper())
    elif base is None:
        raise NotImplementedError("This method doesn't know how to locate DMT "
                                  "Omega trigger files for %r" % str(channel))
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, str(gps5),
            '%s-%s_%s_%s_OmegaC-*-*.xml' % (
                ifo, channel.system, channel.subsystem, channel.signal))
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (DMT-Omega)\n"
           % (len(out), channel.ndsname))
    return out
Ejemplo n.º 2
0
def find_kw(channel, start, end, base=None):
    """Find KW trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        tag = '%s-KW_HOFT' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    elif base is None:
        tag = '%s-KW_TRIGGERS' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, '%s-%d' % (tag, gps5), '%s-*-*.xml' % tag)
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (KW)\n"
           % (len(out), channel.ndsname))
    return out
Ejemplo n.º 3
0
def get_known_flags(start, end, url='https://segments.ligo.org', ifo=None,
                    badonly=None):
    """Return the list of all flags with known segments

    Parameters
    ----------
    start : `int`
        the GPS start time of the query
    end : `int`
        the GPS end time of the query
    url : `str`, optional
        the FQDN of the target segment database
    ifo : `str`, optional
        the prefix for the IFO, if `None` all flags are returned

    Returns
    -------
    flags : `list` of `str`
        a list of flag names (<ifo>:<name>:<version>) that are known by
        the database in the given [start, end) interval
    """
    start = int(to_gps(start))
    end = int(to_gps(end))
    uri = '%s/report/known?s=%d&e=%d' % (url, start, end)
    out = decode_json(urifunctions.getDataUrllib2(uri))
    def select_flag(f):
        if ifo is not None and f['ifo'] != ifo:
            return False
        if (badonly is not None and
                f['metadata']['active_indicates_ifo_badness'] != badonly):
            return False
        return True

    return sorted(['%s:%s:%d' % (f['ifo'], f['name'], f['version'])
                   for f in out['results'] if select_flag(f)])
Ejemplo n.º 4
0
def plot_blrms():
    from channeling import channeling_reader
    from gwpy.segments import DataQualityFlag
    from gwpy.time import to_gps
    in_darm = [
        f'L1:GDS-CALIB_STRAIN_BLRMS_{band}' for band in [
            '10_13', '18_22', '22_27', '27_29', '29_40', '40_54', '54_65',
            '65_76', '75_115', '115_190', '190_210', '210_290', '290_480',
            '526_590', '590_650', '650_885', '885_970', '1110_1430'
        ]
    ]
    stop = start + timedelta(days=1)
    out_darm = [f'{c}.mean' for c in in_darm]
    dqflag = 'L1:DMT-ANALYSIS_READY:1'
    dq = DataQualityFlag.query(dqflag, to_gps(start), to_gps(stop))
    downloader = channeling_reader(['L1:GDS-CALIB_STRAIN'],
                                   start,
                                   search_dirs=['../darm'])
    data = downloader(in_darm, start=to_gps(start), end=to_gps(stop))
    for name in data:
        plt = data[name].plot(figsize=(100, 10))
        # modify the figure as a whole.
        plt.add_segments_bar(dq, label='')
        plt.gca().set_xscale('days')
        plt.suptitle(name)
        plt.save(f'{name}.png')
Ejemplo n.º 5
0
 def test_to_gps(self):
     # test datetime conversion
     self.assertEqual(time.to_gps(DATE), GPS)
     # test Time
     self.assertEqual(
         time.to_gps(DATE, format='datetime', scale='utc'), GPS)
     # test tuple
     self.assertEqual(time.to_gps(tuple(DATE.timetuple())[:6]), GPS)
     # test Quantity
     self.assertEqual(time.to_gps(Quantity(GPS, 's')), GPS)
     # test errors
     self.assertRaises(UnitConversionError, time.to_gps, Quantity(1, 'm'))
     self.assertRaises(ValueError, time.to_gps, 'random string')
Ejemplo n.º 6
0
 def test_to_gps(self):
     # test datetime conversion
     self.assertEqual(time.to_gps(DATE), GPS)
     # test Time
     self.assertEqual(time.to_gps(DATE, format='datetime', scale='utc'),
                      GPS)
     # test tuple
     self.assertEqual(time.to_gps(tuple(DATE.timetuple())[:6]), GPS)
     # test Quantity
     self.assertEqual(time.to_gps(Quantity(GPS, 's')), GPS)
     # test errors
     self.assertRaises(UnitConversionError, time.to_gps, Quantity(1, 'm'))
     self.assertRaises(ValueError, time.to_gps, 'random string')
Ejemplo n.º 7
0
 def fetch(self,
           config=GWSummConfigParser(),
           segdb_error='raise',
           datafind_error='raise',
           **kwargs):
     """Finalise this state by fetching its defining segments,
     either from global memory, or from the segment database
     """
     # check we haven't done this before
     if self.ready:
         return self
     # fetch data
     if self.definition:
         match = re.search('(%s)' % '|'.join(MATHOPS.keys()),
                           self.definition)
     else:
         match = None
     if self.filename:
         self._read_segments(self.filename)
     elif match:
         channel, thresh = self.definition.split(match.groups()[0])
         channel = channel.rstrip()
         thresh = float(thresh.strip())
         self._fetch_data(channel,
                          thresh,
                          match.groups()[0],
                          config=config,
                          datafind_error=datafind_error,
                          **kwargs)
     # fetch segments
     elif self.definition:
         self._fetch_segments(config=config,
                              segdb_error=segdb_error,
                              **kwargs)
     # fetch null
     else:
         start = config.getfloat(DEFAULTSECT, 'gps-start-time')
         end = config.getfloat(DEFAULTSECT, 'gps-end-time')
         self.known = [(start, end)]
         self.active = self.known
     # restrict to given hours
     if self.hours:
         segs_ = SegmentList()
         # get start day
         d = Time(float(self.start), format='gps', scale='utc').datetime
         d.replace(hour=0, minute=0, second=0, microsecond=0)
         end_ = Time(float(self.end), format='gps', scale='utc').datetime
         while d < end_:
             # get GPS of day
             t = to_gps(d)
             # for each [start, end) hour pair, build a segment
             for h0, h1 in self.hours:
                 segs_.append(Segment(t + h0 * 3600, t + h1 * 3600))
             # increment and return
             d += datetime.timedelta(1)
         self.known &= segs_
         self.active &= segs_
     # FIXME
     self.ready = True
     return self
Ejemplo n.º 8
0
def cleander_to_gps(year, month, day, hour, minute, second):
    time = datetime(int(year), int(month), int(day), int(hour), int(minute),
                    int(second))
    all_seconds = to_gps(time) - 18
    week = int(all_seconds / 604800)
    second = all_seconds % 604800
    return (week, second)
Ejemplo n.º 9
0
def find_daily_archives(start, end, ifo, tag, basedir=os.curdir):
    """Find the daily archives spanning the given GPS [start, end) interval
    """
    archives = []
    s = from_gps(to_gps(start))
    e = from_gps(to_gps(end))
    while s < e:
        daybase = mode.get_base(s, mode=mode.Mode.day)
        ds = to_gps(s)
        s += datetime.timedelta(days=1)
        de = to_gps(s)
        archivedir = os.path.join(basedir, daybase, 'archive')
        arch = os.path.join(archivedir,
                            '%s-%s-%d-%d.hdf' % (ifo, tag, ds, de - ds))
        if os.path.isfile(arch):
            archives.append(arch)
    return archives
Ejemplo n.º 10
0
def find_daily_archives(start, end, ifo, tag, basedir=os.curdir):
    """Find the daily archives spanning the given GPS [start, end) interval
    """
    archives = []
    s = from_gps(to_gps(start))
    e = from_gps(to_gps(end))
    while s < e:
        daybase = mode.get_base(s, mode=mode.Mode.day)
        ds = to_gps(s)
        s += datetime.timedelta(days=1)
        de = to_gps(s)
        archivedir = os.path.join(basedir, daybase, 'archive')
        arch = os.path.join(archivedir, '%s-%s-%d-%d.h5'
                            % (ifo, tag, ds, de-ds))
        if os.path.isfile(arch):
            archives.append(arch)
    return archives
Ejemplo n.º 11
0
def stdin_to_gps():
    for line in sys.stdin:
        time = line[10:19]
        mon = line[4:8]
        day = line[8:11]
        year = line[20:24]
        datetime_obj = mon + day + year + time
        leap = gpstime.LEAPDATA.data[-1][1] - 19
        gps = to_gps(str(datetime_obj)).gpsSeconds - leap
        timestamps.append(gps)
Ejemplo n.º 12
0
def ligo_model_overflow_channels(dcuid, ifo=None, frametype=None, gpstime=None,
                                 accum=True, nds=None):
    """Find the CDS overflow channel names for a given DCUID

    Parameters
    ----------
    dcuid : `int`
        the ID of the front-end controller to search

    ifo : `str`, optional
        the prefix of the interferometer to use

    frametype : `str`, optional
        the frametype to use, defaults to ``{ifo}_R``

    gpstime : `int`, optional
        the GPS time at which to search

    accum : `bool`, optional
        whether to retun the accumulated overflow channels (`True`) or not
        (`False`)

    nds : `str`, optional
        the ``'host:port'`` to use for accessing data via NDS, or `None`
        to use direct GWF file access

    Returns
    -------
    names : `list` of `str`
        the list of channel names found

    """
    ifo = ifo or const.IFO
    if ifo is None:
        raise ValueError("Cannot format channel without an IFO, "
                         "please specify")
    frametype = '{0}_R'.format(ifo)

    if gpstime is None:
        gpstime = int(to_gps('now')) - 1000

    if nds:
        allchannels = _ligo_model_overflow_channels_nds(dcuid, ifo, gpstime,
                                                        nds)
    else:
        allchannels = _ligo_model_overflow_channels_gwf(dcuid, ifo, frametype,
                                                        gpstime)

    if accum:
        regex = re.compile(r'%s:FEC-%d_(ADC|DAC)_OVERFLOW_ACC_\d+_\d+\Z'
                           % (ifo, dcuid))
    else:
        regex = re.compile(r'%s:FEC-%d_(ADC|DAC)_OVERFLOW_\d+_\d+\Z'
                           % (ifo, dcuid))
    return natural_sort(filter(regex.match, allchannels))
Ejemplo n.º 13
0
def ligo_model_overflow_channels(dcuid, ifo=None, frametype=None, gpstime=None,
                                 accum=True, nds=None):
    """Find the CDS overflow channel names for a given DCUID

    Parameters
    ----------
    dcuid : `int`
        the ID of the front-end controller to search

    ifo : `str`, optional
        the prefix of the interferometer to use

    frametype : `str`, optional
        the frametype to use, defaults to ``{ifo}_R``

    gpstime : `int`, optional
        the GPS time at which to search

    accum : `bool`, optional
        whether to retun the accumulated overflow channels (`True`) or not
        (`False`)

    nds : `str`, optional
        the ``'host:port'`` to use for accessing data via NDS, or `None`
        to use direct GWF file access

    Returns
    -------
    names : `list` of `str`
        the list of channel names found

    """
    ifo = ifo or const.IFO
    if ifo is None:
        raise ValueError("Cannot format channel without an IFO, "
                         "please specify")
    frametype = '{0}_R'.format(ifo)

    if gpstime is None:
        gpstime = int(to_gps('now')) - 1000

    if nds:
        allchannels = _ligo_model_overflow_channels_nds(dcuid, ifo, gpstime,
                                                        nds)
    else:
        allchannels = _ligo_model_overflow_channels_gwf(dcuid, ifo, frametype,
                                                        gpstime)

    if accum:
        regex = re.compile(r'%s:FEC-%d_(ADC|DAC)_OVERFLOW_ACC_\d+_\d+\Z'
                           % (ifo, dcuid))
    else:
        regex = re.compile(r'%s:FEC-%d_(ADC|DAC)_OVERFLOW_\d+_\d+\Z'
                           % (ifo, dcuid))
    return natural_sort(filter(regex.match, allchannels))
Ejemplo n.º 14
0
def get_known_flags(start,
                    end,
                    url='https://segments.ligo.org',
                    ifo=None,
                    badonly=None):
    """Return the list of all flags with known segments

    Parameters
    ----------
    start : `int`
        the GPS start time of the query
    end : `int`
        the GPS end time of the query
    url : `str`, optional
        the FQDN of the target segment database
    ifo : `str`, optional
        the prefix for the IFO, if `None` all flags are returned

    Returns
    -------
    flags : `list` of `str`
        a list of flag names (<ifo>:<name>:<version>) that are known by
        the database in the given [start, end) interval
    """
    start = int(to_gps(start))
    end = int(to_gps(end))
    uri = '%s/report/known?s=%d&e=%d' % (url, start, end)
    out = decode_json(urifunctions.getDataUrllib2(uri))

    def select_flag(f):
        if ifo is not None and f['ifo'] != ifo:
            return False
        if (badonly is not None
                and f['metadata']['active_indicates_ifo_badness'] != badonly):
            return False
        return True

    return sorted([
        '%s:%s:%d' % (f['ifo'], f['name'], f['version'])
        for f in out['results'] if select_flag(f)
    ])
Ejemplo n.º 15
0
def plot_range():
    from channeling import channeling_reader
    from gwpy.segments import DataQualityFlag
    from gwpy.time import to_gps
    downloader = channeling_reader(['L1:DMT-SNSH_EFFECTIVE_RANGE_MPC.mean'],
                                   start,
                                   search_dirs=['../darm'])
    starte = start + timedelta(days=13)
    stop = starte + timedelta(days=2)
    dqflag = 'L1:DMT-ANALYSIS_READY:1'
    dq = DataQualityFlag.query(dqflag, to_gps(starte), to_gps(stop))
    data = downloader(['L1:DMT-SNSH_EFFECTIVE_RANGE_MPC.mean'],
                      start=to_gps(starte),
                      end=to_gps(stop))
    for name in data:
        plt = data[name].plot()  # figsize=(1, 10))
        # modify the figure as a whole.
        plt.add_segments_bar(dq, label='')
        plt.gca().set_xscale('hours')
        plt.suptitle(name)
        plt.save(f'{name}.png')
Ejemplo n.º 16
0
def correct_time():
    HOST = '[email protected]'
    COMMAND = 'caget -t -f10 C4:DAQ-DC0_GPS'
    ssh = subprocess.Popen(['ssh', '%s' % HOST, COMMAND],
                           shell=False,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)

    result = ssh.stdout.readlines()
    fb4gps = to_gps(result[0].strip('\n'))
    print('{} ({})'.format(from_gps(fb4gps), fb4gps))
    return fb4gps
Ejemplo n.º 17
0
 def fetch(self, config=GWSummConfigParser(), segdb_error='raise',
           datafind_error='raise', **kwargs):
     """Finalise this state by fetching its defining segments,
     either from global memory, or from the segment database
     """
     # check we haven't done this before
     if self.ready:
         return self
     # fetch data
     if self.definition:
         match = re.search('(%s)' % '|'.join(MATHOPS.keys()),
                           self.definition)
     else:
         match = None
     if self.filename:
         self._read_segments(self.filename)
     elif match:
         channel, thresh = self.definition.split(match.groups()[0])
         channel = channel.rstrip()
         thresh = float(thresh.strip())
         self._fetch_data(channel, thresh, match.groups()[0], config=config,
                          datafind_error=datafind_error, **kwargs)
     # fetch segments
     elif self.definition:
         self._fetch_segments(config=config, segdb_error=segdb_error,
                              **kwargs)
     # fetch null
     else:
         start = config.getfloat(DEFAULTSECT, 'gps-start-time')
         end = config.getfloat(DEFAULTSECT, 'gps-end-time')
         self.known = [(start, end)]
         self.active = self.known
     # restrict to given hours
     if self.hours:
         segs_ = SegmentList()
         # get start day
         d = Time(float(self.start), format='gps', scale='utc').datetime
         d.replace(hour=0, minute=0, second=0, microsecond=0)
         end_ = Time(float(self.end), format='gps', scale='utc').datetime
         while d < end_:
             # get GPS of day
             t = to_gps(d)
             # for each [start, end) hour pair, build a segment
             for h0, h1 in self.hours:
                 segs_.append(Segment(t + h0 * 3600, t + h1*3600))
             # increment and return
             d += datetime.timedelta(1)
         self.known &= segs_
         self.active &= segs_
     # FIXME
     self.ready = True
     return self
Ejemplo n.º 18
0
 def get_bins(self):
     """Work out the correct histogram binning for this `DutyDataPlot`
     """
     # if not given anything, work it out from the mode
     if self.bins is None:
         m = mode.MODE_NAME[mode.get_mode()]
         duration = float(abs(self.span))
         # for year mode, use a month
         if m in ['YEAR'] or duration >= 86400 * 300:
             dt = relativedelta(months=1)
         # for more than 8 weeks, use weeks
         elif duration >= 86400 * 7 * 8:
             dt = relativedelta(weeks=1)
         # for week and month mode, use daily
         elif m in ['WEEK', 'MONTH'] or duration >= 86400 * 7:
             dt = relativedelta(days=1)
         # for day mode, make hourly duty factor
         elif m in ['DAY']:
             dt = relativedelta(hours=1)
         # otherwise provide 10 bins
         else:
             dt = relativedelta(seconds=float(abs(self.span))/10.)
     # if given a float, assume this is the bin size
     elif isinstance(self.bins, (float, int)):
         dt = relativedelta(seconds=self.bins)
     # if we don't have a list, we must have worked out dt
     if not isinstance(self.bins, (list, tuple, numpy.ndarray)):
         self.bins = []
         s = from_gps(self.start)
         e = from_gps(self.end)
         while s < e:
             t = int(to_gps(s + dt) - to_gps(s))
             self.bins.append(t)
             s += dt
     self.bins = numpy.asarray(self.bins)
     return self.bins
Ejemplo n.º 19
0
def get_job_duration_history_shell(classad,
                                   value,
                                   user=getuser(),
                                   maxjobs=None):
    """Return the durations of history condor jobs

    This method calls to `condor_history` in the shell.

    Parameters
    ----------
    classad : `str`
        name of classad providing unique identifier for job type
    value :
        value of classad
    user : `str`, optional
        name of submitting user
    maxjobs : `int`, optional
        maximum number of matches to return

    Returns
    -------
    times, durations : `tuple` of `numpy.ndarray`
        two arrays with the job end time and durations of each matched
        condor process
    """
    if isinstance(value, str):
        value = '"%s"' % value
    cmd = [
        'condor_history', '-constraint',
        '\'%s==%s && Owner=="%s"\'' % (classad, value, user), '-autof',
        'EnteredCurrentStatus', '-autof', 'JobStartDate'
    ]
    if maxjobs is not None:
        cmd.extend(['-match', str(maxjobs)])
    history = check_output(' '.join(cmd), shell=True).decode("utf-8")
    lines = history.rstrip('\n').split('\n')
    times = numpy.zeros(len(lines))
    jobdur = numpy.zeros(times.size)
    for i, line in enumerate(lines):
        try:
            e, s = map(int, line.split())
        except ValueError:
            times = times[:i]
            jobdur = jobdur[:i]
            break
        times[i] = to_gps(datetime.fromtimestamp(e)) + time.timezone
        jobdur[i] = e - s
    return times, jobdur
Ejemplo n.º 20
0
def write_status(message, code, timeout=0, tmessage=None, nagiosfile=None):
    """Write a Nagios status file in JSON format

    Parameters
    ----------
    message : `str`
        status message for Nagios

    code : `int`
        exit code for process

    timeout : `int`, optional
        timeout length, in seconds

    tmessage : `str`, optional
        timeout message

    nagiosfile : `str`, optional
        full path to a JSON status file, defaults to ``nagios.json``

    Notes
    -----
    This function will write an output to the requested location, then exit
    without returning.
    """
    # status dictionary
    status = {
        "created_gps":
        int(to_gps('now')),
        "status_intervals": [{
            "start_sec": 0,
            "txt_status": message,
            "num_status": code
        }],
    }
    # update timeout information
    if timeout:
        status["status_intervals"].append({
            "start_sec": timeout,
            "txt_status": tmessage,
            "num_status": 3,
        })
    # get output file and write
    nagiosfile = nagiosfile or 'nagios.json'
    with open(nagiosfile, 'w') as fileobj:
        json.dump(status, fileobj)
Ejemplo n.º 21
0
def write_status(message, code, timeout=0, tmessage=None, nagiosfile=None):
    """Write a Nagios status file in JSON format

    Parameters
    ----------
    message : `str`
        status message for Nagios

    code : `int`
        exit code for process

    timeout : `int`, optional
        timeout length, in seconds

    tmessage : `str`, optional
        timeout message

    nagiosfile : `str`, optional
        full path to a JSON status file, defaults to ``nagios.json``

    Notes
    -----
    This function will write an output to the requested location, then exit
    without returning.
    """
    # status dictionary
    status = {
        "created_gps": int(to_gps('now')),
        "status_intervals": [{
            "start_sec": 0,
            "txt_status": message,
            "num_status": code
        }],
    }
    # update timeout information
    if timeout:
        status["status_intervals"].append({
            "start_sec": timeout,
            "txt_status": tmessage,
            "num_status": 3,
        })
    # get output file and write
    nagiosfile = nagiosfile or 'nagios.json'
    with open(nagiosfile, 'w') as fileobj:
        json.dump(status, fileobj)
Ejemplo n.º 22
0
def get_job_duration_history_shell(classad, value, user=getuser(),
                                   maxjobs=None):
    """Return the durations of history condor jobs

    This method calls to `condor_history` in the shell.

    Parameters
    ----------
    classad : `str`
        name of classad providing unique identifier for job type
    value :
        value of classad
    user : `str`, optional
        name of submitting user
    maxjobs : `int`, optional
        maximum number of matches to return

    Returns
    -------
    times, durations : `tuple` of `numpy.ndarray`
        two arrays with the job end time and durations of each matched
        condor process
    """
    if isinstance(value, str):
        value = '"%s"' % value
    cmd = ['condor_history', '-constraint',
           '\'%s==%s && Owner=="%s"\'' % (classad, value, user),
           '-autof', 'EnteredCurrentStatus',
           '-autof', 'JobStartDate']
    if maxjobs is not None:
        cmd.extend(['-match', str(maxjobs)])
    history = check_output(' '.join(cmd), shell=True).decode("utf-8")
    lines = history.rstrip('\n').split('\n')
    times = numpy.zeros(len(lines))
    jobdur = numpy.zeros(times.size)
    for i, line in enumerate(lines):
        try:
            e, s = map(int, line.split())
        except ValueError:
            times = times[:i]
            jobdur = jobdur[:i]
            break
        times[i] = to_gps(datetime.fromtimestamp(e)) + time.timezone
        jobdur[i] = e - s
    return times, jobdur
Ejemplo n.º 23
0
    def nagios_exit(exitcode, message, timeout=1800):
        """Exit this program in a nagios-compatible manner

        Parameters
        ----------
        exitcode : `int`
            the exitcode for the nagios message, an integer between 0--3
            (inclusive)
        message : `str`
            the message to print to the screen
        """
        if json:
            out = {
                'created_gps':
                int(to_gps('now')),
                'status_intervals': [
                    {
                        'start_sec': 0,
                        'end_sec': timeout,
                        'num_status': exitcode,
                        'txt_status': message
                    },
                    {
                        'start_sec': timeout,
                        'num_status': 3,
                        'txt_status': '%s not running' % name
                    },
                ],
            }
            for key in kwargs:
                out[key] = kwargs[key]
            if author is not None:
                author_, email = author.rsplit(' ', 1)
                email = email.strip('<').rstrip('>')
                out['author'] = {'name': author_, 'email': email}
            if isinstance(json, str):
                with open(json, 'w') as f:
                    print(jsonlib.dumps(out), file=f)
            else:
                print(jsonlib.dumps(out))
        else:
            print(message)
        sys.exit(exitcode)
Ejemplo n.º 24
0
def get_job_duration_history(classad,
                             value,
                             user=getuser(),
                             maxjobs=0,
                             schedd=None):
    """Return the durations of history condor jobs

    This method uses the python bindings for `htcondor`, which seems
    to have network transfer limits, do not use for large job numbers
    (>2000), instead use `get_job_duration_history_shell` which calls
    to `condor_history` in the shell.

    Parameters
    ----------
    classad : `str`
        name of classad providing unique identifier for job type
    value :
        value of classad
    user : `str`, optional
        name of submitting user
    maxjobs : `int`, optional
        maximum number of matches to return

    Returns
    -------
    times, durations : `tuple` of `numpy.ndarray`
        two arrays with the job end time and durations of each matched
        condor process
    """
    if schedd is None:
        schedd = htcondor.Schedd()
    if isinstance(value, str):
        value = '"%s"' % value
    history = list(
        schedd.history('%s==%s && Owner=="%s"' % (classad, value, user),
                       ['EnteredCurrentStatus', 'JobStartDate'], maxjobs))
    times = numpy.zeros(len(history))
    jobdur = numpy.zeros(len(history))
    for i, h in enumerate(history):
        times[i] = (to_gps(datetime.fromtimestamp(h['EnteredCurrentStatus'])) +
                    time.timezone)
        jobdur[i] = h['EnteredCurrentStatus'] - h['JobStartDate']
    return times, jobdur
Ejemplo n.º 25
0
def get_job_duration_history(classad, value, user=getuser(), maxjobs=0,
                             schedd=None):
    """Return the durations of history condor jobs

    This method uses the python bindings for `htcondor`, which seems
    to have network transfer limits, do not use for large job numbers
    (>2000), instead use `get_job_duration_history_shell` which calls
    to `condor_history` in the shell.

    Parameters
    ----------
    classad : `str`
        name of classad providing unique identifier for job type
    value :
        value of classad
    user : `str`, optional
        name of submitting user
    maxjobs : `int`, optional
        maximum number of matches to return

    Returns
    -------
    times, durations : `tuple` of `numpy.ndarray`
        two arrays with the job end time and durations of each matched
        condor process
    """
    if schedd is None:
        schedd = htcondor.Schedd()
    if isinstance(value, str):
        value = '"%s"' % value
    history = list(schedd.history(
        '%s==%s && Owner=="%s"' % (classad, value, user),
        ['EnteredCurrentStatus', 'JobStartDate'], maxjobs))
    times = numpy.zeros(len(history))
    jobdur = numpy.zeros(len(history))
    for i, h in enumerate(history):
        times[i] = (
            to_gps(datetime.fromtimestamp(h['EnteredCurrentStatus'])) +
            time.timezone)
        jobdur[i] = h['EnteredCurrentStatus'] - h['JobStartDate']
    return times, jobdur
Ejemplo n.º 26
0
def test_to_gps():
    """Test :func:`gwpy.time.to_gps`
    """
    # str conversion
    t = time.to_gps('Jan 1 2017')
    assert isinstance(t, time.LIGOTimeGPS)
    assert t == 1167264018
    assert time.to_gps('Sep 14 2015 09:50:45.391') == (
        time.LIGOTimeGPS(1126259462, 391000000))
    # datetime conversion
    assert time.to_gps(datetime(2017, 1, 1)) == 1167264018

    # astropy.time.Time conversion
    assert time.to_gps(Time(57754, format='mjd')) == 1167264018

    # tuple
    assert time.to_gps((2017, 1, 1)) == 1167264018

    # Quantity
    assert time.to_gps(Quantity(1167264018, 's')) == 1167264018

    # keywords
    with freeze_time('2015-09-14 09:50:45.391'):
        assert time.to_gps('now') == 1126259462
        assert time.to_gps('today') == 1126224017
        assert time.to_gps('tomorrow') == 1126310417
        assert time.to_gps('yesterday') == 1126137617

    # errors
    with pytest.raises(UnitConversionError):
        time.to_gps(Quantity(1, 'm'))
    with pytest.raises((ValueError, TypeError)) as exc:
        time.to_gps('random string')
    assert 'Cannot parse date string \'random string\': ' in str(exc.value)
Ejemplo n.º 27
0
def plot_spectro(Qtransform,
                 gps_event,
                 channel,
                 add_chirp,
                 twindow=[0.5, 2, 10],
                 m1=20,
                 m2=10,
                 s1z=0,
                 s2z=0,
                 f_low=20,
                 pn_2order=7,
                 npoints=100,
                 approximant='SEOBNRv4',
                 plot_ylim=[20, 1400],
                 outDir='spectro.png'):
    """ Create spectrogramm plot"""

    # Get loudest Q-plane
    Q_loudest = Qtransform.q

    # get the time interval queried
    tmin = np.min(Qtransform.times)
    tmax = np.max(Qtransform.times)
    tmin = tmin.value
    tmax = tmax.value
    # Convert to gps times in seconds
    tevent_list = []
    for tevent in gps_event:
        sec = to_gps(tevent).gpsSeconds
        nsec = to_gps(tevent).gpsNanoSeconds
        t = str(sec) + '.' + str(nsec)
        tevent_list.append(float(t))
    # Compute the mean of the input gps_event. Used to center the spectrogram.
    tevent_mean = np.mean(tevent_list)

    # Compute the inspiral signal
    if add_chirp:
        track_t_list = []
        track_f_list = []
        for tevent in tevent_list:
            track_t, track_f = get_inspiral_tf(
                tevent,
                m1,
                m2,
                s1z,
                s2z,
                f_low,
                #npoints=npoints,
                pn_2order=pn_2order,
                approximant=approximant)
            track_t_list.append(track_t)
            track_f_list.append(track_f)

    # Check whether the 'V1:' prefix is present in the input channel name
    if channel[:3] == 'V1:':
        channel_title = channel[3:]
    else:
        channel_title = channel

    if len(twindow) == 1:
        for wind in twindow:
            plot = plt.figure()
            ax = plot.gca()
            if tevent_mean - wind < tmin:
                print(
                    'The requested time window [%f, %f] starts before available data time interval [%f, %f]'
                    % (tevent_mean - wind, tevent_mean + wind, tmin, tmax))
                print(
                    'The x-axis of the spectrogram starts with first time available from the data.'
                )
                xmin = tmin
            else:
                xmin = tevent_mean - wind
            if tevent_mean + wind > tmax:
                print(
                    'The requested time window [%f, %f] ends after available data time interval [%f, %f]'
                    % (tevent_mean - wind, tevent_mean + wind, tmin, tmax))
                print(
                    'The x-axis of the spectrogram ends with the latest time available from the data.'
                )
                xmax = tmax
            else:
                xmax = tevent_mean + wind

            ax.imshow(Qtransform.crop(tevent_mean - wind, tevent_mean + wind))
            ax.set_yscale('log')
            ax.set_ylim(plot_ylim[0], plot_ylim[1])
            ax.set_ylabel('Frequency [Hz]')

            ax.set_xticks(
                np.linspace(tevent_mean - wind, tevent_mean + wind, 11))
            locs = ax.get_xticks()
            labels = [
                str(np.round(lab, 1)) for lab in np.linspace(-wind, wind, 11)
            ]
            ax.set_xticklabels(labels)
            ax.set_xlim(xmin, xmax)
            ax.set_xlabel(r'Seconds from $t_0$')
            #ax.set_epoch(tevent_mean)
            ax.colorbar(clim=(0, 35), label='Normalised energy')
            if add_chirp:
                for tbank, fbank in zip(track_t_list, track_f_list):
                    ax.plot(tbank, fbank, lw=2, color='r')
            plt.title('Omegascans of V1 (Q = %.1f): %s at %f' %
                      (Q_loudest, channel_title, tevent_mean))
            plt.tight_layout()
            plt.savefig(outDir)

    else:
        nb_window = len(twindow)
        fig, axes = plt.subplots(nrows=1, ncols=nb_window, figsize=(23, 5))

        for i, wind in enumerate(twindow):
            if tevent_mean - wind < tmin:
                print(
                    'The requested time window [%f, %f] starts before available data time interval [%f, %f]'
                    % (tevent_mean - wind, tevent_mean + wind, tmin, tmax))
                print(
                    'The x-axis of the spectrogram starts with first time available from the data.'
                )
                xmin = tmin
            else:
                xmin = tevent_mean - wind
            if tevent_mean + wind > tmax:
                print(
                    'The requested time window [%f, %f] ends after available data time interval [%f, %f]'
                    % (tevent_mean - wind, tevent_mean + wind, tmin, tmax))
                print(
                    'The x-axis of the spectrogram ends with the latest time available from the data.'
                )
                xmax = tmax
            else:
                xmax = tevent_mean + wind

            axes[i].imshow(
                Qtransform.crop(tevent_mean - wind, tevent_mean + wind))
            axes[i].set_yscale('log')
            axes[i].set_ylim(plot_ylim[0], plot_ylim[1])
            axes[i].set_ylabel('Frequency [Hz]')

            #axes[i].set_xscale('auto-gps')
            axes[i].set_xticks(
                np.linspace(tevent_mean - wind, tevent_mean + wind, 11))
            locs = axes[i].get_xticks()
            labels = [
                str(np.round(lab, 1)) for lab in np.linspace(-wind, wind, 11)
            ]
            axes[i].set_xticklabels(labels)
            axes[i].set_xlim(xmin, xmax)
            axes[i].set_xlabel(r'Seconds from $t_0$')
            #axes[i].set_epoch(tevent_mean)
            axes[i].colorbar(clim=(0, 35), label='Normalised energy')
            if add_chirp:
                for tbank, fbank in zip(track_t_list, track_f_list):
                    axes[i].plot(tbank, fbank, lw=2, color='r')
        plt.subplots_adjust(wspace=0.3)
        fig.suptitle('Omegascans of V1 (Q = %.1f): %s at %f' %
                     (Q_loudest, channel_title, tevent_mean),
                     fontweight='bold')
        #fig.tight_layout()
        plt.savefig(outDir)

    return True
Ejemplo n.º 28
0
def representative_spectra(channels,
                           start,
                           stop,
                           rate,
                           label='kmeans-labels',
                           filename=DEFAULT_FILENAME,
                           prefix='.',
                           downloader=TimeSeriesDict.get,
                           cluster_numbers=None,
                           groups=None,
                           **kwargs):
    """
    Make representative spectra for each cluster based on the median psd for minutes in that cluster.
    Downloads only the raw minutes in the cluster to save.
    """
    if groups is None:
        groups = channels

    # read the labels from the save file.
    labels = TimeSeries.read(filename,
                             label,
                             start=to_gps(start),
                             end=to_gps(stop))
    logger.info(f'Read labels {start} to {stop} from {filename}')

    if cluster_numbers is None:
        clusters = list(range(max(labels.value) + 1))

        cluster_counts = list(
            len(labels.value[labels.value == c]) for c in clusters)
        largest_cluster = cluster_counts.index(max(cluster_counts))
        clusters.remove(largest_cluster)

        logger.info(
            f'Largest cluster found to be Nº{largest_cluster} ({100 * max(cluster_counts) // len(labels.value)}%). Doing {clusters}.'
        )
        cluster_counts.remove(max(cluster_counts))
    else:
        clusters = cluster_numbers
        cluster_counts = list(
            len(labels.value[labels.value == c]) for c in clusters)

    t, v, d = labels.times, labels.value, diff(labels.value)

    pairs = list(
        zip([t[0]] + list(t[:-1][d != 0]),
            list(t[1:][d != 0]) + [t[-1]]))
    values = list(v[:-1][d != 0]) + [v[-1]]
    assert len(pairs) == len(values)  # need to include start-| and |-end
    # l|r l|r l|r l|r
    # l,r l,r l,r l,r
    # l r,l r,l r,l r # zip(start + l[1:], r[:-1] + stop)

    print(pairs)
    for pair in pairs:
        print(int(pair[1].value) - int(pair[0].value))
    print(values)

    # use h5py to make a mutable object pointing to a file on disk.
    save_file, filename = path2h5file(
        get_path(f'spectra-cache {start}', 'hdf5', prefix=prefix))
    logger.debug(f'Initiated hdf5 stream to {filename}')

    logger.info(f'Patching {filename}...')
    for i, (dl_start, end) in enumerate(pairs):
        if values[i] in clusters:
            if not data_exists(channels, to_gps(end).seconds, save_file):
                logger.debug(
                    f'Downloading Nº{values[i]} from {dl_start} to {end}...')
                try:
                    dl = downloader(channels,
                                    start=to_gps(dl_start) - LIGOTimeGPS(60),
                                    end=to_gps(end) + LIGOTimeGPS(seconds=1))
                    out = TimeSeriesDict()
                    for n in dl:
                        out[n] = dl[n].resample(**better_aa_opts(dl[n], rate))
                    write_to_disk(out, to_gps(dl_start).seconds, save_file)
                except RuntimeError:  # Cannot find all relevant data on any known server
                    logger.warning(
                        f"SKIPPING Nº{values[i]} from {dl_start} to {end} !!")

    logger.info('Reading data...')
    data = TimeSeriesDict.read(save_file, channels)

    logger.info('Starting PSD generation...')

    f = data[channels[0]].crop(
        start=to_gps(data[channels[0]].times[-1]) - LIGOTimeGPS(60),
        end=to_gps(data[channels[0]].times[-1])).psd().frequencies

    d = (to_gps(labels.times[-1]).seconds - to_gps(labels.times[1]).seconds)
    for i, cluster in enumerate(clusters):
        try:
            psds = {
                channel: FrequencySeries.read(filename, f'{cluster}-{channel}')
                for channel in channels
            }
            logger.info(f'Loaded Nº{cluster}.')

        except KeyError:

            logger.info(
                f'Doing Nº{cluster} ({100 * cluster_counts[i] / len(labels.value):.2f}% of data)...'
            )
            with Progress(f'psd Nº{cluster} ({i + 1}/{len(clusters)})',
                          len(channels) * d) as progress:
                psds = {
                    channel: FrequencySeries(median(stack([
                        progress(data[channel].crop,
                                 pc * d + (to_gps(time).seconds -
                                           to_gps(labels.times[1]).seconds),
                                 start=to_gps(time) - LIGOTimeGPS(60),
                                 end=to_gps(time)).psd().value
                        for c, time in zip(labels.value, labels.times)
                        if c == cluster
                    ]),
                                                    axis=0),
                                             frequencies=f,
                                             name=f'{cluster}-{channel}')
                    for pc, channel in enumerate(channels)
                }
            for name in psds.keys():
                psds[name].write(filename, **writing_opts)

        # plotting is slow, so show a nice progress bar.
        logger.debug('Initiating plotting routine...')
        with Progress('plotting', len(groups)) as progress:

            for p, (group, lbls, title) in enumerate(groups):
                # plot the group in one figure.
                plt = Plot(*(psds[channel] for channel in group),
                           separate=False,
                           sharex=True,
                           zorder=1,
                           **kwargs)
                # plt.gca().set_xlim((30,60))
                # modify the figure as a whole.
                # plt.add_segments_bar(dq, label='')
                plt.gca().set_xscale('log')
                plt.gca().set_yscale('log')
                plt.suptitle(title)
                plt.legend(lbls)

                # save to png.
                progress(
                    plt.save, p,
                    get_path(f'{cluster}-{title}',
                             'png',
                             prefix=f'{prefix}/{cluster}'))
Ejemplo n.º 29
0
def threshold_table(start,
                    stop,
                    reading_channels,
                    channels,
                    bands,
                    label='kmeans-labels',
                    filename=DEFAULT_FILENAME,
                    prefix='.'):
    """
    Makes a html table of 'percent increase' from the largest cluster by band and channel.
    """
    data = TimeSeriesDict.read(filename,
                               reading_channels + [label],
                               start=to_gps(start),
                               end=to_gps(stop))
    labels = data[label]

    clusters = list(range(max(labels.value) + 1))
    cluster_counts = list(
        len(labels.value[labels.value == c]) for c in clusters)
    largest_cluster = cluster_counts.index(max(cluster_counts))
    clusters.remove(largest_cluster)

    logger.info(
        f'Largest cluster found to be Nº{largest_cluster} ({100 * max(cluster_counts) // len(labels.value)}%). Doing {clusters}.'
    )
    cluster_counts.remove(max(cluster_counts))

    def amplitude(channel, cluster):
        """return median amplitude for channel in cluster."""
        try:
            chan = data[channel]
        except KeyError:
            return 0.0
        return median([
            chan.value[i] for i, c in enumerate(labels.value) if c == cluster
        ])

    def threshold(cluster, channel, band) -> str:
        f_channel = f'{channel}_BLRMS_{band}.mean'
        base = amplitude(f_channel, largest_cluster)
        if base != 0.0:
            return str(int(
                100 * (amplitude(f_channel, cluster) - base) / base)) + '%'
        else:
            return str(amplitude(f_channel, cluster))

    range_chan = 'L1:DMT-SNSH_EFFECTIVE_RANGE_MPC.mean'
    if range_chan in reading_channels:
        base_range = amplitude(range_chan, largest_cluster)
        if base_range != 0.0:
            snsh = lambda c: 'SNSH: ' + str(
                int(100 * (amplitude(range_chan, c) - base_range) / base_range)
            ) + '%'
        else:
            snsh = lambda c: 'SNSH: 0.0'
    else:
        snsh = lambda c: ''

    with Progress('taking thresholds', len(clusters)) as progress:
        for i, cluster in enumerate(clusters):
            buffer = [[''] + bands]
            for channel in channels:
                buffer.append([channel] + [
                    progress(threshold, i, cluster, channel, band)
                    for band in bands
                ])
            html_table(
                f'cluster {cluster} ({colors[cluster]}) {snsh(cluster)}',
                csv_writer(buffer, get_path(f'{cluster}', 'csv',
                                            prefix=prefix)),
                get_path(f'{cluster}', 'html', prefix=prefix))
    html_table(
        'Index',
        csv_writer(
            [['clusters:']] +
            [[f'<a href="{cluster}.html">Nº{cluster} ({colors[cluster]})</a>']
             for cluster in clusters], get_path('idx', 'csv', prefix=prefix)),
        get_path('index', 'html', prefix=prefix))
Ejemplo n.º 30
0
Archivo: core.py Proyecto: gwpy/gwsumm
 def gpstime(self, t):
     self._gpstime = to_gps(t)
Ejemplo n.º 31
0
def process_channel(processor: PostProcessor,
                    start: datetime,
                    stop: datetime,
                    downloader=TimeSeriesDict.get) -> str:
    """
    Post-processes a channel using the given post-processor, and streams to a file in the working directory.
    The output .hdf5 file is given by the channel name and the start time.
    This is because inserting (unsupported) requires reading out the whole database and re-writing it again.
    It's not a terribly high priority, I think.

    :return filename of generated post-processed channel.

    >>> from channeling import config, process_channel, PostProcessor
    >>> from util import config2dataclass
    >>>
    >>> for channel in eval(config['DEFAULT']['channels']):
    >>>     for processor in config2dataclass(PostProcessor, config, channel):
    >>>         process_channel(processor, start, stop)

    or even
    >>> from channeling import config, process_channel, PostProcessor
    >>> from util import config2dataclass
    >>> from multiprocessing import Pool
    >>>
    >>> p = lambda channel: [process_channel(processor, start, stop) for processor in
    >>>      config2dataclass(PostProcessor, config, channel)]
    >>>
    >>> pool = Pool()
    >>> pool.map(p, eval(config['DEFAULT']['channels']))

    """

    # use h5py to make a mutable object pointing to a file on disk.
    channel_file, filename = path2h5file(
        get_path(f'{processor.channel} {start}', 'hdf5'))
    logger.debug(f'Initiated hdf5 stream to {filename}')

    # get the number of strides.
    num_strides = (stop - start) // processor.stride_length

    # create list of start and end times.
    strides = [[
        start + processor.stride_length * i,
        start + processor.stride_length * (i + 1)
    ] for i in range(num_strides)]

    # stride loop.
    for stride_start, stride_stop in strides:

        if data_exists(processor.output_channels,
                       to_gps(stride_stop).seconds, channel_file):
            # for all possible output channels, it's likely this stride exists already on disk.
            continue

        # get the data.
        logger.debug(
            f'Initiating data download for {processor.channel} ({stride_start} to {stride_stop})'
        )

        # separately download all observing segments within the stride, or one segment for the whole stride.
        # this is set by the processor.respect_segments: bool option.
        # it really should be processor.respect_segments: str = 'L1:DMT-ANALYSIS_READY:1' for generality.
        segments = [[int(s.start), int(s.end)] for s in DataQualityFlag.query(
            'L1:DMT-ANALYSIS_READY:1', to_gps(stride_start), to_gps(
                stride_stop)).active] if processor.respect_segments else [[
                    to_gps(stride_start).seconds,
                    to_gps(stride_stop).seconds
                ]]

        raw_segments = list()
        for seg_start, seg_stop in segments:
            try:
                raw_segments.append([
                    downloader([processor.channel],
                               start=seg_start,
                               end=seg_stop + processor.extra_seconds),
                    seg_start
                ])
            except RuntimeError:  # sometimes the data does not exist on the server. The show must go on, though.
                logger.warning(
                    f'SKIPPING download for {processor.channel} ({stride_start} to {stride_stop}) !!'
                )

        logger.info(
            f'Completed data download for {processor.channel} ({stride_start} to {stride_stop})'
        )

        for raw, segment_start in raw_segments:
            # use the processor to compute each downloaded segment in the stride.
            finished_segment = processor.compute(raw[processor.channel])
            logger.info(
                f'Generated {processor.__class__.__name__} for {processor.channel}'
            )

            # write each computed segment to the channel file.
            write_to_disk(finished_segment, segment_start, channel_file)

        logger.info(f'Completed stride {stride_start} to {stride_stop})')

    logger.debug(f'Completed channel at {filename}')

    # for automated usage of the post-processed data, return the generated filename.
    return filename
Ejemplo n.º 32
0
def get_channel_online_data(
        channel,
        st,
        et,
        format='spectrogram',
        remove_nonlocked_times=False,
        normalize_coherence=False,
        config_file='/home/stochastic/config_files/ini_files/H1.ini'):
    """
    Returns a list of PEMCoherenceSegment
    objects.

    Parameters
    ----------
    channel : str
        channel name you want to load
    st : str or int
        start time (in string format) or gps time
    et : str or int
        end time (in string format) or gps time
    format : str, optional, default='spectrogram'
        format to return. either spectrogram or seglist. Spectrogram returns a
        `gwpy.spectrogram.Spectrogram` and seglist returns a list of
        `stamp_pem.coherence_segment.PEMCoherenceSegment`.
    remove_nonlocked_times: bool, optional, default=False
        Removes non locked times from a spectrogram
    normalize_coherence : bool, optional, default=False
        Normalizes each column of spectrogram by the number of averages

    Returns
    -------
    out : `gwpy.spectrogram.Spectrogram` or list of
    `stamp_pem.coherence_segment.PEMCoherencSegment` objects
        representation of data between start and end times for a given channel
    """
    pipeline_dict = coh_io.read_pipeline_ini(config_file)
    env_params, run_params = coh_io.check_ini_params(pipeline_dict)
    channel_dict = ChannelDict.read(env_params['list'])
    jobdur = int(env_params['job_duration'])
    darm_channel = run_params['darm_channel']
    basedir = env_params['base_directory']

    if isinstance(st, str):
        st = int(time.to_gps(st))
    if isinstance(et, str):
        et = int(time.to_gps(et))
    starttimes = np.arange(st, et, jobdur)
    subsys = get_channels_subsystem(channel, channel_dict)
    seglist = []
    for starttime in starttimes:
        cohdir = coh_io.get_directory_structure(subsys, starttime, basedir)
        cohfile = coh_io.create_coherence_data_filename(darm_channel,
                                                        subsys,
                                                        starttime,
                                                        starttime + jobdur,
                                                        directory=cohdir)
        try:
            subsystem_data = PEMSubsystem.read(subsys, cohfile)
        except IOError:
            print "No data found between %d and %d for %s" % (
                starttime, starttime + jobdur, channel)
            continue

        if np.isnan(subsystem_data[channel].psd1.value[0]):
            continue
        seglist.append(subsystem_data[channel])

    N = 1

    if format == 'spectrogram':
        if remove_nonlocked_times:
            foundtimes = np.asarray(
                [seglist[ii].starttime for ii in range(len(seglist))])
            data = np.zeros((len(seglist), seglist[0].psd1.size))
            for ii in range(len(seglist)):
                if normalize_coherence:
                    N = seglist[ii].N
                if seglist[ii].get_coh()[0] == np.nan:
                    continue
                data[ii, :] = seglist[ii].get_coh() * N
                specgram = Spectrogram(data,
                                       epoch=foundtimes[0],
                                       dt=jobdur,
                                       df=seglist[0].psd1.df)
        else:
            foundtimes = np.asarray(
                [seglist[ii].starttime for ii in range(len(seglist))])
            count = 0
            data = np.nan * np.zeros((starttimes.size, seglist[0].psd1.size))
            for ii, starttime in enumerate(starttimes):
                if np.any(foundtimes == starttime):
                    if normalize_coherence:
                        N = seglist[count].N
                    data[ii, :] = seglist[count].get_coh() * N
                    count += 1
            specgram = Spectrogram(data,
                                   dt=jobdur,
                                   epoch=starttimes[0],
                                   df=seglist[0].psd1.df)
        return specgram
    elif format == 'seglist':
        return seglist
    else:
        raise ValueError('format needs to be "spectrogram" or "seglist"')
Ejemplo n.º 33
0
        if not folder in entries:
            os.system('mkdir {}'.format(folder))
        os.chdir(folder)
    if not end == '':
        os.chdir(end)
    return None


# This script will save Time Series data within a specified time
# period in a directory named '{dirname}/{observing run}/{channel}'
# for a each of a set of given channel names. To use it, simply
# tweak the values laid out in all lines through 163 as is
# appropriate for your purposes.

# Input start and end times of desired Time Series files:
start = to_gps('Nov 1 2019 01:00:00')
end = to_gps('Mar 20 2020 00:00:00')

# Input name for directory to store files,
# or '' for storage in this script's directory:
dirname = 'Local_Data'
# Input string for name of observing run containing 'start':
obsrun = 'O3b'
# Input frametype for desired Time Series data:
frame = 'L1_R'
# Input the desired number of parallel processors to assign to data retrieval:
procs = 10

# Input list of channels whose Time Series are to be saved.
chan1 = 'L1:ISI-HAM6_SENSCOR_X_FADE_TIME_LEFT_MON'
chan2 = 'L1:ISI-HAM6_SENSCOR_X_FADE_CUR_CHAN_MON'
Ejemplo n.º 34
0
 def format(self, record):
     record.gpstime = to_gps('now')
     levelname = record.levelname
     if self.use_color and levelname in LEVEL_COLORS:
         record.levelname = color_text(levelname, LEVEL_COLORS[levelname])
     return logging.Formatter.format(self, record)
Ejemplo n.º 35
0
 def gpstime(self, t):
     self._gpstime = to_gps(t)
Ejemplo n.º 36
0
        print('\nArguments values:')
        print('-------------------')
        for key, value in vars(args).items():
            print('%s : %s' % (key, value))
        print('-------------------\n')

    if args.frange:
        frange = args.frange
    else:
        frange = qtransform.DEFAULT_FRANGE

    if args.useGPS is True:
        # Compute the mean event time if a list was provided
        tevent_list = []
        for tevent in args.gps_event:
            sec = to_gps(tevent).gpsSeconds
            nsec = to_gps(tevent).gpsNanoSeconds
            t = str(sec) + '.' + str(nsec)
            tevent_list.append(float(t))
        # Compute the mean of the input gps_event. Used to center the spectrogram.
        tevent_mean = np.mean(tevent_list)
    else:
        tevent_mean = None

    # Transform the outseg into a gwpy Segments
    if args.outseg:
        outseg = Segment(float(args.outseg[0]), float(args.outseg[1]))
    else:
        outseg = None

    #  Set fres and tres
Ejemplo n.º 37
0
def get_segments(flags, segments, cache=None,
                 url='https://segdb-er.ligo.caltech.edu', **kwargs):
    """Fetch some segments from the segment database

    Parameters
    ----------
    flags : `str`, `list`
        one of more flags for which to query
    segments : `~gwpy.segments.DataQualityFlag`, `~gwpy.segments.SegmentList`
        span over which to query for flag segments
    cache : `~glue.lal.Cache`, optional
        cache of files to use as data source
    url : `str`
        URL of segment database, if ``cache`` is not given
    **kwargs
        other keyword arguments to pass to either
        `~gwpy.segments.DataQualityFlag.read` (if ``cache`` is given) or
        `~gwpy.segments.DataQualityFlag.query` (otherwise)

    Returns
    -------
    segments : `~gwpy.segments.DataQualityFlag` or `~gwpy.segments.DataQualityDict`
        a single `~gwpy.segments.DataQualityFlag` (if ``flags`` is given
        as a `str`), or a `~gwpy.segments.DataQualityDict` (if ``flags``
        is given as a `list`)
    """
    # format segments
    if isinstance(segments, DataQualityFlag):
        segments = segments.active
    elif isinstance(segments, tuple):
        segments = [Segment(to_gps(segments[0]), to_gps(segments[1]))]
    segments = SegmentList(segments)

    # get format for files
    if cache is not None and not isinstance(cache, Cache):
        kwargs.setdefault(
            'format', _get_valid_format('read', DataQualityFlag, None,
                                        None, (cache[0],), {}))

    # populate an existing set of flags
    if isinstance(flags, (DataQualityFlag, DataQualityDict)):
        return flags.populate(source=cache or url, segments=segments,
                               **kwargs)
    # query one flag
    elif cache is None and isinstance(flags, str):
        return DataQualityFlag.query(flags, segments, url=url, **kwargs)
    # query lots of flags
    elif cache is None:
        return DataQualityDict.query(flags, segments, url=url, **kwargs)
    # read one flag
    elif flags is None or isinstance(flags, str):
        segs = DataQualityFlag.read(cache, flags, coalesce=False, **kwargs)
        if segs.known:
            segs.known &= segments
        else:
            segs.known = segments
        segs.active &= segments
        return segs
    # read lots of flags
    else:
        segs = DataQualityDict.read(cache, flags, coalesce=True, **kwargs)
        for name, flag in segs.iteritems():
            flag.known &= segments
            flag.active &= segments
        return segs
Ejemplo n.º 38
0
 def format(self, record):
     record.gpstime = to_gps('now')
     levelname = record.levelname
     if self.use_color and levelname in LEVEL_COLORS:
         record.levelname = color_text(levelname, LEVEL_COLORS[levelname])
     return logging.Formatter.format(self, record)
Ejemplo n.º 39
0
 def test_to_gps(self):
     gps = time.to_gps(DATE)
     self.assertEqual(gps, GPS)
Ejemplo n.º 40
0
def read_virgo_timeseries(source,
                          channel,
                          t0,
                          gstop_or_dur,
                          mask=False,
                          fill_value=np.nan,
                          remote=False):
    """Function to read virgo data as timeseries.
       This should one day be included in gwpy.
   
    Parameters
    ----------
    source : `str`
        Frame file, either a full path to a ffl or gwf file, or an
        abbreviation like 'raw', 'trend', which are looked up in
        the stancard location. If omitted, defaults to 'raw',
        but this default value is deprecated.

    channel : `str`
        Source datastream for these data. 
        If missing, a prefix 'V1:' is added.

    t0 : `~gwpy.time.LIGOTimeGPS`, `float`, `str`
        GPS epoch corresponding to starting time,
        any input parsable by `~gwpy.time.to_gps` is fine

    gstop_or_dur: `~gwpy.time.LIGOTimeGPS`, `float`, `str`
        GPS epoch corresponding to end time,
        any input parsable by `~gwpy.time.to_gps` is fine
        If a `float` < 1e6 is provided, it corresponds to a duration
        in seconds from `t0`.

    mask : `bool`, optional
        If mask is False, missing samples will be replaced
        by fill_value. If it is True, the returned FrVect will
        have an attribute missing, which is a mask vector that
        is zero for missing samples.
        Default : False

    fill_value : `float`, optional
        Value that is used for missing samples if mask is False.
        Default: np.nan

    remote: `bool` optional
        If False, use PythonVirgoTools to parse raw data files.
        If True, use gwpy.TimeSeries.get(), but takes longer.
        Default : False, assuming the script is ran on Virgo server.

    Examples
    --------
    >>> from virgotools import getChannel
 
    Load the data from channel 'INJ_IMC_TRA_DC', from
    Sep 14 2015 09:50:45.391, and a duration of 10s

    >>> x = getChannel('raw', 'INJ_IMC_TRA_DC', 'Sep 14 2015 09:50:45.391', 10)

    That can be simply visualise:

    >>> import matplotlib.pyplot as plt
    >>> plt.plot(x.time, x.data)
    >>> plt.show()

    Same, using 2 GPS times:

    >>> x = getChannel('raw', 'INJ_IMC_TRA_DC', 1126259462.3910, 1126259472.3910)

    """

    # Convert to gps times in seconds
    # Use the Seconds and NanoSeconds instead of ns()
    # Because otherwise one needs to multiply by 1e-9
    # And this can cause rounding approximation
    sec = to_gps(t0).gpsSeconds
    nsec = to_gps(t0).gpsNanoSeconds
    tstart = str(sec) + '.' + str(nsec)
    gstart = float(tstart)

    sec = to_gps(gstop_or_dur).gpsSeconds
    nsec = to_gps(gstop_or_dur).gpsNanoSeconds
    tend = str(sec) + '.' + str(nsec)

    if float(tend) < 1e6:
        gstop = gstart + float(tend)
    else:
        gstop = float(tend)

    # If the script is running on Virgo's server.
    if not remote:
        from virgotools import getChannel
        # Parse Virgo files
        with getChannel(source,
                        channel,
                        gstart,
                        gstop,
                        mask=mask,
                        fill_value=fill_value) as data:
            data = TimeSeries(data.data,
                              unit=data.unit,
                              t0=gstart,
                              dt=data.dt,
                              channel=channel)
    else:
        # If not running the script on Virgo's server. Takes longer
        # Query is working, but crashes when computing q_transform.
        # Data might not be the same format as with PythonVirgoTools.
        # Further checks required.

        if channel[:3] not in ['V1:', 'H1:', 'L1:']:
            print(
                'When accessing the data outside the virgo server, the channel must start with `V1:`, `H1:` or `L1:` '
            )
        data = TimeSeries.get(channel, gstart, gstop)
    return data
Ejemplo n.º 41
0
 def gps_time():
     gps = to_gps(datetime.datetime.now(tz=datetime.timezone.utc))
     return gps.seconds + gps.nanoseconds * 10**-9
Ejemplo n.º 42
0
 def set_gps_times(namespace, startdate, enddate):
     setattr(namespace, 'gpsstart', to_gps(startdate))
     setattr(namespace, 'gpsend', to_gps(enddate))
Ejemplo n.º 43
0
def get_segments(flags,
                 segments,
                 cache=None,
                 url='https://segdb-er.ligo.caltech.edu',
                 **kwargs):
    """Fetch some segments from the segment database

    Parameters
    ----------
    flags : `str`, `list`
        one of more flags for which to query
    segments : `~gwpy.segments.DataQualityFlag`, `~gwpy.segments.SegmentList`
        span over which to query for flag segments
    cache : `~glue.lal.Cache`, optional
        cache of files to use as data source
    url : `str`
        URL of segment database, if ``cache`` is not given
    **kwargs
        other keyword arguments to pass to either
        `~gwpy.segments.DataQualityFlag.read` (if ``cache`` is given) or
        `~gwpy.segments.DataQualityFlag.query` (otherwise)

    Returns
    -------
    segments : `~gwpy.segments.DataQualityFlag`,
               `~gwpy.segments.DataQualityDict`
        a single `~gwpy.segments.DataQualityFlag` (if ``flags`` is given
        as a `str`), or a `~gwpy.segments.DataQualityDict` (if ``flags``
        is given as a `list`)
    """
    # format segments
    if isinstance(segments, DataQualityFlag):
        segments = segments.active
    elif isinstance(segments, tuple):
        segments = [Segment(to_gps(segments[0]), to_gps(segments[1]))]
    segments = SegmentList(segments)

    # get format for files
    if cache is not None and not isinstance(cache, Cache):
        kwargs.setdefault(
            'format',
            _get_valid_format('read', DataQualityFlag, None, None,
                              (cache[0], ), {}))

    # populate an existing set of flags
    if isinstance(flags, (DataQualityFlag, DataQualityDict)):
        return flags.populate(source=cache or url, segments=segments, **kwargs)
    # query one flag
    elif cache is None and isinstance(flags, str):
        return DataQualityFlag.query(flags, segments, url=url, **kwargs)
    # query lots of flags
    elif cache is None:
        return DataQualityDict.query(flags, segments, url=url, **kwargs)
    # read one flag
    elif flags is None or isinstance(flags, str):
        segs = DataQualityFlag.read(cache, flags, coalesce=False, **kwargs)
        if segs.known:
            segs.known &= segments
        else:
            segs.known = segments
        segs.active &= segments
        return segs
    # read lots of flags
    else:
        segs = DataQualityDict.read(cache, flags, coalesce=True, **kwargs)
        for name, flag in segs.items():
            flag.known &= segments
            flag.active &= segments
        return segs
Ejemplo n.º 44
0
 def __call__(self, parser, namespace, values, option_string=False):
     try:
         values = float(values)
     except (TypeError, ValueError):
         pass
     setattr(namespace, self.dest, to_gps(values))