Example #1
0
File: all.py Project: tjma12/gwsumm
def generate_all_state(start, end, register=True, **kwargs):
    """Build a new `SummaryState` for the given [start, end) interval.

    Parameters
    ----------
    start : `~gwpy.time.LIGOTimeGPS`, float
        the GPS start time of the current analysis
    end : `~gwpy.time.LIGOTimeGPS`, float
        the GPS end time of the current analysis
    register : `bool`, optional
        should the new `SummaryState` be registered, default `True`
    **kwargs
        other keyword arguments passed to the `SummaryState` constructor

    Returns
    -------
    allstate : `SummaryState`
        the newly created 'All' `SummaryState`
    """
    now = min(end, NOW)
    all_ = SummaryState(ALLSTATE,
                        known=SegmentList([Segment(start, end)]),
                        active=SegmentList([Segment(start, now)]),
                        **kwargs)
    all_.ready = True
    if register:
        register_state(all_)
    return all_
Example #2
0
    def test_add_state_segments(self):
        fig, ax = self.new()

        # mock up some segments and add them as 'state' segments
        segs = SegmentList([Segment(1, 2), Segment(4, 5)])
        segax = fig.add_state_segments(segs)

        # check that the new axes aligns with the parent
        utils.assert_array_equal(segax.get_position().intervalx,
                                 ax.get_position().intervalx)
        coll = segax.collections[0]
        for seg, path in zip(segs, coll.get_paths()):
            utils.assert_array_equal(path.vertices,
                                     [(seg[0], -.4), (seg[1], -.4),
                                      (seg[1], .4), (seg[0], .4),
                                      (seg[0], -.4)])

        with pytest.raises(ValueError):
            fig.add_state_segments(segs, location='left')

        # test that this doesn't work with non-timeseries axes
        fig = self.FIGURE_CLASS()
        ax = fig.gca(projection='rectilinear')
        with pytest.raises(ValueError) as exc:
            fig.add_state_segments(segs)
        assert str(exc.value) == ("No 'timeseries' Axes found, cannot anchor "
                                  "new segment Axes.")
Example #3
0
    def __init__(self, parent=None, tssb=None, conc=0.1):
        super(DataNode, self).__init__(parent=parent, tssb=tssb)

        # pi is a first-class citizen
        self.pi = 0.0
        self.param = 0.0
        self.param1 = 0.0
        self.pi1 = 0.0  # used in MH	to store old state

        self.path = None  # set of nodes from root to this node
        self.ht = 0.0

        if parent is None:
            self._conc = conc
            self.pi = 1.0
            self.param = 1.0

        else:
            self.pi = rand(1) * parent.pi
            parent.pi = parent.pi - self.pi
            self.param = self.pi

        # 此处初始化应该设置为SegmentList
        self.varphiR = SegmentList([Segment(0, 1)])
        self.piR = SegmentList([Segment(0, 1)])
        self.epsilon = ""
        # tssb node
        self.tNode = None
Example #4
0
 def _read(q, pstart, pend):
     try:
         # don't go beyond the requested limits
         pstart = float(max(start, pstart))
         pend = float(min(end, pend))
         # if resampling TimeSeries, pad by 8 seconds inside cache limits
         if cls not in (StateVector, StateVectorDict) and resample:
             cstart = float(max(cspan[0], pstart - 8))
             subcache = cache.sieve(segment=Segment(cstart, pend))
             out = cls.read(subcache,
                            channel,
                            format=format,
                            start=cstart,
                            end=pend,
                            resample=None,
                            **kwargs)
             out = out.resample(resample)
             q.put(out.crop(pstart, pend))
         else:
             subcache = cache.sieve(segment=Segment(pstart, pend))
             q.put(
                 cls.read(subcache,
                          channel,
                          format=format,
                          start=pstart,
                          end=pend,
                          resample=resample,
                          **kwargs))
     except Exception as e:
         q.put(e)
Example #5
0
def test_get_frame_segments(find):
    assert segments.get_frame_segments("X", "X1_R", 0, 100) == SegmentList(
        [Segment(0, 10), Segment(20, 30)])
    assert segments.get_frame_segments(
        "X",
        "X1_R",
        25,
        100,
    ) == SegmentList([Segment(25, 30)])
Example #6
0
    def test_query_dqsegdb_multi(self):
        segs = SegmentList([Segment(0, 2), Segment(8, 10)])
        result = query_dqsegdb(self.TEST_CLASS.query_dqsegdb, QUERY_FLAGS[0],
                               segs)
        RESULT = QUERY_RESULTC[QUERY_FLAGS[0]]

        assert isinstance(result, self.TEST_CLASS)
        utils.assert_segmentlist_equal(result.known, RESULT.known & segs)
        utils.assert_segmentlist_equal(result.active, RESULT.active & segs)
Example #7
0
 def test_query_dqsegdb_multi(self):
     querymid = int(QUERY_START + (QUERY_END - QUERY_START) / 2.)
     segs = SegmentList(
         [Segment(QUERY_START, querymid),
          Segment(querymid, QUERY_END)])
     flag = QUERY_FLAGS[0]
     result = self._query(DataQualityFlag.query, flag, segs, url=QUERY_URL)
     self.assertEqual(result.known, QUERY_RESULT[flag].known)
     self.assertEqual(result.active, QUERY_RESULT[flag].active)
Example #8
0
def test_segments_from_sngl_burst():
    tab = io_ligolw.sngl_burst_from_times([1, 4, 7, 10], channel='test')
    segs = io_ligolw.segments_from_sngl_burst(tab, 1)
    assert_segmentlist_equal(
        segs['test'].active,
        SegmentList([
            Segment(0, 2),
            Segment(3, 5),
            Segment(6, 8),
            Segment(9, 11),
        ]))
Example #9
0
 def test_query_dqsegdb_multi(self):
     querymid = int(QUERY_START + (QUERY_END - QUERY_START) / 2.)
     segs = SegmentList(
         [Segment(QUERY_START, querymid),
          Segment(querymid, QUERY_END)])
     flag = self._query(DataQualityFlag.query_dqsegdb,
                        QUERY_FLAG,
                        segs,
                        url=QUERY_URL)
     self.assertEqual(flag.known, QUERY_KNOWN)
     self.assertEqual(flag.active, QUERY_ACTIVE)
Example #10
0
 def test_get_triggers(self):
     # test that trigfind raises a warning if the channel-level directory
     # doesn't exist
     with pytest.warns(UserWarning):
         out = triggers.get_triggers('X1:DOES_NOT_EXIST', 'omicron',
                                     SegmentList([Segment(0, 100)]))
     # check output type and columns
     self.assertIsInstance(out, numpy.ndarray)
     for col in ['time', 'frequency', 'snr']:
         self.assertIn(col, out.dtype.fields)
     # test that unknown ETG raises KeyError
     self.assertRaises(KeyError, triggers.get_triggers,
                       'X1:DOES_NOT_EXIST', 'fake-etg',
                       SegmentList([Segment(0, 100)]))
Example #11
0
def find_all_coincidences(triggers, channel, snrs, windows):
    """Find the number of coincs between each auxiliary channel and the primary

    Parameters
    ----------
    primary : `numpy.ndarray`
        an array of times for the primary channel
    auxiliary : `numpy.recarray`
        an array of triggers for a set of auxiliary channels
    snrs : `list` of `float`
        the SNR thresholds to use
    window : `list` of `float`
        the time windows to use
    """
    triggers.sort(order='time')
    windows = sorted(windows, reverse=True)
    snrs = sorted(snrs)
    coincs = dict((p, {}) for p in itertools.product(windows, snrs))

    for i, x in enumerate(triggers):
        if x['channel'] != channel:
            continue
        t = x['time']
        channels = dict((key, set()) for key in coincs)
        j = i - 1
        segs = [Segment(t-dt/2., t+dt/2.) for dt in windows]

        # define coincidence test
        def add_if_coinc(event):
            if event['channel'] == channel:
                return
            in_seg = filter(lambda s: s[0] <= event['time'] <= s[1], segs)
            if not in_seg:  # no triggers in window
                return
            for k, w in enumerate(in_seg):
                for snr in filter(lambda s: event['snr'] >= s, snrs):
                    channels[(windows[k], snr)].add(event['channel'])
            return 1

        # search left half-window
        while j >= 0:
            if not add_if_coinc(triggers[j]):
                break
            j -= 1
        j = i + 1
        # search right half-window
        while j < triggers.shape[0]:
            if not add_if_coinc(triggers[j]):
                break
            j += 1

        # count 'em up
        for p, cset in channels.items():
            for c in cset:
                try:
                    coincs[p][c] += 1
                except KeyError:
                    coincs[p][c] = 1

    return coincs
Example #12
0
def segments_from_array(array):
    """Convert a 2-dimensional `numpy.ndarray` to a `SegmentList`
    """
    out = SegmentList()
    for row in array:
        out.append(Segment(*row))
    return out
Example #13
0
def divide_segmentlist(start,end,bins=4096,write=True,**kwargs):
    ''' Divide given period to segmenlist
    
    Parameters
    ----------
    start : `int`
        GPS start time of given period
    end : `int`
        GPS end time of given period
    bins : `int`, optional
        The number of bins. Unit is second. Default value is 4096 =(2**12).

    Returns
    -------
    segmentlist : `gwpy.segment.SegmentList`
        Divided segmentlist
    '''
    if ((end-start) % bins) != 0:
        raise ValueError('Not divisible!')

    _start = range(start     ,end     ,bins)
    _end   = range(start+bins,end+bins,bins)
    segmentlist = SegmentList([Segment(s,e) for s,e in zip(_start,_end)])
    log.debug(segmentlist[0])
    log.debug(segmentlist[-1])
    if write:
        segmentlist.write('./segmentlist/total.txt')
    return segmentlist
Example #14
0
def find_kw(channel, start, end, base=None):
    """Find KW trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        tag = '%s-KW_HOFT' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    elif base is None:
        tag = '%s-KW_TRIGGERS' % ifo[0].upper()
        base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, '%s-%d' % (tag, gps5), '%s-*-*.xml' % tag)
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (KW)\n"
           % (len(out), channel.ndsname))
    return out
Example #15
0
def find_dmt_omega(channel, start, end, base=None):
    """Find DMT-Omega trigger XML files
    """
    span = Segment(to_gps(start), to_gps(end))
    channel = get_channel(channel)
    ifo = channel.ifo
    if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
        base = '/gds-%s/dmt/triggers/%s-HOFT_Omega' % (
            ifo.lower(), ifo[0].upper())
    elif base is None:
        raise NotImplementedError("This method doesn't know how to locate DMT "
                                  "Omega trigger files for %r" % str(channel))
    gps5 = int('%.5s' % start)
    end5 = int('%.5s' % end)
    out = Cache()
    append = out.append
    while gps5 <= end5:
        trigglob = os.path.join(
            base, str(gps5),
            '%s-%s_%s_%s_OmegaC-*-*.xml' % (
                ifo, channel.system, channel.subsystem, channel.signal))
        found = glob.glob(trigglob)
        for f in found:
            ce = CacheEntry.from_T050017(f)
            if ce.segment.intersects(span):
                append(ce)
        gps5 += 1
    out.sort(key=lambda e: e.path)
    vprint("    Found %d files for %s (DMT-Omega)\n"
           % (len(out), channel.ndsname))
    return out
Example #16
0
 def fetch(self,
           config=GWSummConfigParser(),
           segdb_error='raise',
           datafind_error='raise',
           **kwargs):
     """Finalise this state by fetching its defining segments,
     either from global memory, or from the segment database
     """
     # check we haven't done this before
     if self.ready:
         return self
     # fetch data
     if self.definition:
         match = re.search('(%s)' % '|'.join(MATHOPS.keys()),
                           self.definition)
     else:
         match = None
     if self.filename:
         self._read_segments(self.filename)
     elif match:
         channel, thresh = self.definition.split(match.groups()[0])
         channel = channel.rstrip()
         thresh = float(thresh.strip())
         self._fetch_data(channel,
                          thresh,
                          match.groups()[0],
                          config=config,
                          datafind_error=datafind_error,
                          **kwargs)
     # fetch segments
     elif self.definition:
         self._fetch_segments(config=config,
                              segdb_error=segdb_error,
                              **kwargs)
     # fetch null
     else:
         start = config.getfloat(DEFAULTSECT, 'gps-start-time')
         end = config.getfloat(DEFAULTSECT, 'gps-end-time')
         self.known = [(start, end)]
         self.active = self.known
     # restrict to given hours
     if self.hours:
         segs_ = SegmentList()
         # get start day
         d = Time(float(self.start), format='gps', scale='utc').datetime
         d.replace(hour=0, minute=0, second=0, microsecond=0)
         end_ = Time(float(self.end), format='gps', scale='utc').datetime
         while d < end_:
             # get GPS of day
             t = to_gps(d)
             # for each [start, end) hour pair, build a segment
             for h0, h1 in self.hours:
                 segs_.append(Segment(t + h0 * 3600, t + h1 * 3600))
             # increment and return
             d += datetime.timedelta(1)
         self.known &= segs_
         self.active &= segs_
     # FIXME
     self.ready = True
     return self
Example #17
0
def find_best_frames(ifo, frametype, start, end, **kwargs):
    """Find frames for the given type, replacing with a better type if needed
    """
    # find cache for this frametype
    cache = find_frames(ifo, frametype, start, end, **kwargs)

    # check for gaps in current cache
    span = SegmentList([Segment(start, end)])
    gaps = span - cache_segments(cache)

    # if gaps and using aggregated h(t), check short files
    if abs(gaps) and frametype in SHORT_HOFT_TYPES:
        f2 = SHORT_HOFT_TYPES[frametype]
        vprint("    Gaps discovered in aggregated h(t) type "
               "%s, checking %s\n" % (frametype, f2))
        kwargs['gaps'] = 'ignore'
        cache.extend(
            filter(lambda e: file_segment(e) in gaps,
                   find_frames(ifo, f2, start, end, **kwargs)))
        new = int(abs(gaps - cache_segments(cache)))
        if new:
            vprint("    %ss extra coverage with frametype %s\n" % (new, f2))
        else:
            vprint("    No extra coverage with frametype %s\n" % f2)

    return cache, frametype
Example #18
0
def check_flag(flag, gpstime, duration, pad):
    """Check that a state flag is active during an entire analysis segment

    Parameters
    ----------
    flag : `str`
        state flag to check
    gpstime : `float`
        GPS time of required data
    duration : `float`
        duration (in seconds) of required data
    pad : `float`
        amount of extra data to read in at the start and end for filtering

    Returns
    -------
    check : `bool`
        Boolean switch to pass (`True`) or fail (`False`) depending on whether
        the given flag is active
    """
    # set GPS start and end time
    start = gpstime - duration / 2. - pad
    end = gpstime + duration / 2. + pad
    seg = Segment(start, end)
    # query for state segments
    active = DataQualityFlag.query(flag,
                                   start,
                                   end,
                                   url=DEFAULT_SEGMENT_SERVER).active
    # check that state flag is active during the entire analysis
    if (not active.intersects_segment(seg)) or (abs(active[0]) < abs(seg)):
        return False
    return True
Example #19
0
def img_qtransform(data, To=2, frange=(10, 2048), qrange=(4, 64), vmin=0, qsplit=False, dT=2.0):
	"""
	this funciton performs the q-transform on the strain data and returns it as an rgb image
	inputs:
		data - TimeSeries - the strain data to be transformed
		frange - tuple - frequency range of the q-transform
		qrange - tuple - q factor range of the q-transform
		vmin - scalar - the value range for the colormap to cover. if None the full range will be covered.
			if vmax='auto' the range will be set automatically according to the q-transform values
		qsplit - if True split the qtransform to separate images for better resolution
	output:
		qimg - ndarray - array with the q-transform image
	"""

	# qt = data.q_transform(frange=frange, qrange=qrange, whiten=True, tres=0.002) # compute the q-transform with the built in function of gwpy
	# qt = qt[int(To/2.0 / qt.dt.value):-int(To/2.0 / qt.dt.value)] # crop the q-transform to remove half the overlap time at each end
	# vmax = qt.max()
	# print(vmax.value)
	vmax = 25.5 # I think this is the value used in gravityspy (if I understand correctly)
	if qsplit:
		qt = data.q_transform(frange=frange, qrange=qrange, whiten=True, tres=0.002) # compute the q-transform with the built in function of gwpy
		qt = qt[int(To/2.0 / qt.dt.value):-int(To/2.0 / qt.dt.value)] # crop the q-transform to remove half the overlap time at each end
		qimg = qimg_split(qt, dT=dT, vmin=vmin, vmax=vmax)
	else:
		t_center = data.times[int(len(data.times)/2)].value
		outseg = Segment(t_center - dT/2, t_center + dT/2)
		qt = data.q_transform(frange=frange, qrange=qrange, whiten=True, tres=0.002, gps=t_center, search=0.5, fres=0.5, outseg=outseg)
		qt = qt.crop(t_center - dT/2, t_center + dT/2)
		qimg = qimg_draw(qt, vmin, vmax)
	return qimg
Example #20
0
File: flag.py Project: tjma12/vet
def run(args, config):
    """Execute the flag study
    """
    if (args.label == 'Vetoes' and len(args.flag) == 1
            and not os.path.isfile(args.flag[0])):
        args.label = args.flag[0]

    vprint("\n-------------------------------------------------\n")
    vprint("Processing %s\n" % args.label)

    span = Segment(args.gps_start_time, args.gps_end_time)

    # format analysis state
    if args.analysis_flag:
        state = SummaryState(args.analysis_flag,
                             definition=args.analysis_flag,
                             known=[span])
        state.fetch(config=config)
    else:
        state = generate_all_state(*span)

    tab = FlagTab(args.label,
                  args.gps_start_time,
                  args.gps_end_time,
                  args.flag,
                  states=[state],
                  metrics=args.metrics,
                  channel=args.channel,
                  etg=args.trigger_format,
                  intersection=args.intersection)
    tab.index = 'index.html'
    tab.process(config=config)
    tab.write_html(ifo='VET')
Example #21
0
def use_segmentlist(f, arg1, segments, *args, **kwargs):
    """Decorator a method to convert incoming segments into a `SegmentList`
    """
    if isinstance(segments, DataQualityFlag):
        segments = segments.active
    elif not isinstance(segments, segmentlist):
        segments = SegmentList([Segment(*x) for x in segments])
    return f(arg1, segments, *args, **kwargs)
Example #22
0
def segmentlist_from_tree(tree, coalesce=False):
    """Read a `~ligo.segments.segmentlist` from a 'segments' `ROOT.Tree`
    """
    segs = SegmentList()
    for i in range(tree.GetEntries()):
        tree.GetEntry(i)
        segs.append(Segment(tree.start, tree.end))
    return segs
Example #23
0
 def test_prepend(self):
     a = self.read()
     a.crop(968654552, 968654552.5)
     b = self.read()
     b.crop(968654552.5, 968654553, copy=True)
     b.prepend(a)
     for key in b:
         self.assertEqual(b[key].span, Segment(968654552, 968654553))
Example #24
0
 def test_append(self):
     a = self.read()
     a.crop(968654552, 968654552.5, copy=True)
     b = self.read()
     b.crop(968654552.5, 968654553)
     a.append(b)
     for key in a:
         self.assertEqual(a[key].span, Segment(968654552, 968654553))
Example #25
0
 def make_cache():
     segs = SegmentList()
     cache = Cache()
     for seg in [(0, 1), (1, 2), (4, 5)]:
         d = seg[1] - seg[0]
         f = 'A-B-%d-%d.tmp' % (seg[0], d)
         cache.append(CacheEntry.from_T050017(f))
         segs.append(Segment(*seg))
     return cache, segs
Example #26
0
def test_get_triggers():
    # test that trigfind raises a warning if the channel-level directory
    # doesn't exist
    with pytest.warns(UserWarning):
        out = triggers.get_triggers('X1:DOES_NOT_EXIST', 'omicron',
                                    SegmentList([Segment(0, 100)]))
    # check output type and columns
    assert isinstance(out, Table)
    for col in ['time', 'frequency', 'snr']:
        assert col in out.dtype.names
Example #27
0
 def make_cache():
     segs = SegmentList()
     cache = Cache()
     for seg in [(0, 1), (1, 2), (4, 5)]:
         d = seg[1] - seg[0]
         _, f = tempfile.mkstemp(prefix='A-',
                                 suffix='-%d-%d.tmp' % (seg[0], d))
         cache.append(CacheEntry.from_T050017(f))
         segs.append(Segment(*seg))
     return cache, segs
    def removeLeft(self, boundary):
        slRm = SegmentList([Segment(self._minU, boundary)])
        self._supportiveRanges = self._supportiveRanges - slRm
        self._supportiveRanges.coalesce()

        if 0 < len(self._supportiveRanges):
            self._minU = self._supportiveRanges[0][0]
            self._cumLens = self._get_cumulative_lens()
        else:
            self._minU = self._maxU
            self._cumLens = np.array([])
Example #29
0
def Seg_Split(start, end, frame):
    """Return a list of time segments that combine to form the period between
    start and end, with information gaps excluded.
    Arguments:
    start -- All times in all output segments are at or after this time
    end -- All times in all output segments are at or before this time
    frame -- String such as 'L1_R' informed by desired observatory and 
             type of frame..
    
    Returns:
    segs -- a list of tuples, each representing a usable time segment."""
    empties = []
    obsruns = List_Runs(start, end)
    runends = open('Obsrun_Endtimes.txt', 'r').readlines()
    if len(obsruns) == 0:
        return []
    if len(obsruns) > 1:
        for i in range(len(obsruns) - 1):
            empties.append(
                Segment(int(runends[obsruns[i] - 1][16:26]),
                        int(runends[obsruns[i + 1] - 1][1:11])))
    for run in obsruns:
        segstrings = open('Gaps/O{}-{}_Gaps.txt'.format(str(run), frame),
                          'r').readlines()
        for i in range(len(segstrings)):
            segstrings[i] = Segment(int(segstrings[i][1:11]),
                                    int(segstrings[i][16:26]))
        empties = empties + segstrings
    starts = [start] + [seg[1] for seg in empties]
    ends = [end] + [seg[0] for seg in empties]
    bookends = starts + ends
    bookends.sort()
    if bookends[-1] == end:
        bookends = bookends[bookends.index(start):]
    else:
        bookends = bookends[bookends.index(start):bookends.index(end) + 1]
    if bookends[1] in starts:
        bookends = bookends[1:]
    if bookends[-2] in ends:
        bookends = bookends[:-1]
    return Pair_Up(bookends, 0)
Example #30
0
    def extent(self):
        """The enclosing segment during which data have been fetched

        .. warning::

           Thie `extent` does not guarantee that all data in the middle
           have been fetched, gaps may be present depending on which
           segments were used

        :type: `~gwpy.segments.Segment`
        """
        return Segment(*self.segments.extent())
Example #31
0
def read_cache(cache, channel, start=None, end=None, resample=None,
               gap=None, pad=None, nproc=1, format=None, **kwargs):
    """Read a `TimeSeries` from a cache of data files using
    multiprocessing.

    The inner-workings are agnostic of data-type, but can only handle a
    single data type at a time.

    Parameters
    ----------
    cache : :class:`glue.lal.Cache`, `str`
        cache of GWF frame files, or path to a LAL-format cache file
        on disk
    channel : :class:`~gwpy.detector.channel.Channel`, `str`
        data channel to read from frames
    start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
        start GPS time of desired data
    end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional
        end GPS time of desired data
    resample : `float`, optional
        rate (samples per second) to resample
    format : `str`, optional
        name of data file format, e.g. ``gwf`` or ``hdf``.
    nproc : `int`, default: ``1``
        maximum number of independent frame reading processes, default
        is set to single-process file reading.
    gap : `str`, optional
        how to handle gaps in the cache, one of

        - 'ignore': do nothing, let the undelying reader method handle it
        - 'warn': do nothing except print a warning to the screen
        - 'raise': raise an exception upon finding a gap (default)
        - 'pad': insert a value to fill the gaps

    pad : `float`, optional
        value with which to fill gaps in the source data, only used if
        gap is not given, or `gap='pad'` is given

    Notes
    -----
    The number of independent processes spawned by this function can be
    calculated as ``min(maxprocesses, len(cache)//minprocesssize)``.

    Returns
    -------
    data : :class:`~gwpy.timeseries.TimeSeries`
        a new `TimeSeries` containing the data read from disk
    """
    from gwpy.segments import (Segment, SegmentList)

    cls = kwargs.pop('target', TimeSeries)
    # open cache from file if given
    if isinstance(cache, (unicode, str, file)):
        cache = open_cache(cache)

    # fudge empty cache
    if len(cache) == 0:
        return cls([], channel=channel, epoch=start)

    # use cache to get start end times
    cache.sort(key=lambda ce: ce.segment[0])
    if start is None:
        start = cache[0].segment[0]
    if end is None:
        end = cache[-1].segment[1]

    # get span
    span = Segment(start, end)
    if cls not in (StateVector, StateVectorDict) and resample:
        cache = cache.sieve(segment=span.protract(8))
    else:
        cache = cache.sieve(segment=span)
    cspan = Segment(cache[0].segment[0], cache[-1].segment[1])

    # check for gaps
    if gap is None and pad is not None:
        gap = 'pad'
    elif gap is None:
        gap = 'raise'
    segs = cache_segments(cache, on_missing='ignore') & SegmentList([span])
    if len(segs) != 1 and gap.lower() == 'ignore' or gap.lower() == 'pad':
        pass
    elif len(segs) != 1:
        gaps = SegmentList([cspan]) - segs
        msg = ("The cache given to %s.read has gaps in it in the "
               "following segments:\n    %s"
               % (cls.__name__, '\n    '.join(map(str, gaps))))
        if gap.lower() == 'warn':
            warnings.warn(msg)
        else:
            raise ValueError(msg)
        segs = type(segs)([span])

    # if reading a small number of channels, try to use lalframe, its faster
    if format is None and (
            isinstance(channel, str) or (isinstance(channel, (list, tuple)) and
            len(channel) <= MAX_LALFRAME_CHANNELS)):
        try:
            from lalframe import frread
        except ImportError:
            format = 'gwf'
        else:
            kwargs.pop('type', None)
            format = 'lalframe'
    # otherwise use the file extension as the format
    elif format is None:
        format = os.path.splitext(cache[0].path)[1][1:]

    # -- process multiple cache segments --------
    # this entry point loops this method for each segment

    if len(segs) > 1:
        out = None
        for seg in segs:
            new = read_cache(cache, channel, start=seg[0], end=seg[1],
                             resample=resample, nproc=nproc, format=format,
                             target=cls, **kwargs)
            if out is None:
                out = new.copy()
            else:
                out.append(new, gap='pad', pad=pad)
        return out

    # -- process single cache segment

    # force one frame per process minimum
    nproc = min(nproc, len(cache))

    # single-process
    if nproc <= 1:
        return cls.read(cache, channel, format=format, start=start, end=end,
                        resample=resample, **kwargs)

    # define how to read each frame
    def _read(q, pstart, pend):
        try:
            # don't go beyond the requested limits
            pstart = float(max(start, pstart))
            pend = float(min(end, pend))
            # if resampling TimeSeries, pad by 8 seconds inside cache limits
            if cls not in (StateVector, StateVectorDict) and resample:
                cstart = float(max(cspan[0], pstart - 8))
                subcache = cache.sieve(segment=Segment(cstart, pend))
                out = cls.read(subcache, channel, format=format, start=cstart,
                               end=pend, resample=None, **kwargs)
                out = out.resample(resample)
                q.put(out.crop(pstart, pend))
            else:
                subcache = cache.sieve(segment=Segment(pstart, pend))
                q.put(cls.read(subcache, channel, format=format, start=pstart,
                               end=pend, resample=resample, **kwargs))
        except Exception as e:
            q.put(e)

    # separate cache into parts
    fperproc = int(ceil(len(cache) / nproc))
    subcaches = [Cache(cache[i:i+fperproc]) for
                 i in range(0, len(cache), fperproc)]
    subsegments = SegmentList([Segment(c[0].segment[0], c[-1].segment[1])
                               for c in subcaches])

    # start all processes
    queue = ProcessQueue(nproc)
    proclist = []
    for subseg in subsegments:
        process = Process(target=_read, args=(queue, subseg[0], subseg[1]))
        process.daemon = True
        proclist.append(process)
        process.start()

    # get data and block
    data = [queue.get() for p in proclist]
    for result in data:
        process.join()
        if isinstance(result, Exception):
            raise result

    # format and return
    if issubclass(cls, dict):
        try:
            data.sort(key=lambda tsd: tsd.values()[0].epoch.gps)
        except IndexError:
            pass
        out = cls()
        while len(data):
            tsd = data.pop(0)
            out.append(tsd)
            del tsd
        return out
    else:
        if cls in (TimeSeries, TimeSeriesDict):
            out = TimeSeriesList(*data)
        else:
            out = StateVectorList(*data)
        out.sort(key=lambda ts: ts.epoch.gps)
        ts = out.join(gap=gap)
        return ts