예제 #1
0
    def test_parstack(self):
        for i in range(100):
            narrays = random.randint(1, 5)
            arrays = [
                num.random.random(random.randint(5, 10))
                for j in range(narrays)
            ]
            offsets = num.random.randint(-5, 6, size=narrays).astype(num.int32)
            nshifts = random.randint(1, 10)
            shifts = num.random.randint(-5, 6,
                                        size=(nshifts,
                                              narrays)).astype(num.int32)
            weights = num.random.random((nshifts, narrays))

            for method in (0, 1):
                for nparallel in range(1, 5):
                    r1, o1 = parstack(arrays,
                                      offsets,
                                      shifts,
                                      weights,
                                      method,
                                      impl='openmp',
                                      nparallel=nparallel)

                    r2, o2 = parstack(arrays,
                                      offsets,
                                      shifts,
                                      weights,
                                      method,
                                      impl='numpy')

                    assert o1 == o2
                    assert numeq(r1, r2, 1e-9)
예제 #2
0
    def test_limited(self):
        arrays = [
            num.array([0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0], dtype=num.float),
            num.array([0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0], dtype=num.float),
            num.array([0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0], dtype=num.float)
        ]

        offsets = num.array([0, 0, 0], dtype=num.int32)
        shifts = -num.array([[8, 7, 6], [7, 6, 5], [6, 5, 4]], dtype=num.int32)

        weights = num.ones((3, 3), dtype=num.float)

        mat, ioff = parstack(arrays, offsets, shifts, weights, 0)

        ioff_total, nsamples_total = get_offset_and_length(
            arrays, offsets, shifts)

        mat0, ioff = parstack(arrays, offsets, shifts, weights, 0)

        neach = 3
        for ioff in range(0, nsamples_total, 3):
            mat, ioff_check = parstack(arrays,
                                       offsets,
                                       shifts,
                                       weights,
                                       0,
                                       offsetout=ioff_total + ioff,
                                       lengthout=neach)

            assert ioff_total + ioff == ioff_check

            num.testing.assert_almost_equal(mat0[:, ioff:ioff + neach], mat)
예제 #3
0
    def test_parstack_cumulative(self):
        for i in xrange(10):
            narrays = random.randint(1, 5)
            arrays = [
                num.random.random(random.randint(5, 10))
                for i in xrange(narrays)
            ]
            offsets = num.random.randint(-5, 6, size=narrays).astype(num.int32)
            nshifts = random.randint(1, 10)
            shifts = num.random.randint(
                -5, 6, size=(nshifts, narrays)).astype(num.int32)
            weights = num.random.random((nshifts, narrays))

            for method in (0,):
                for nparallel in xrange(1, 4):
                    result, offset = parstack(
                        arrays, offsets, shifts, weights, method,
                        result=None,
                        nparallel=nparallel,
                        impl='openmp')

                    result1 = result.copy()
                    for k in xrange(5):
                        result, offset = parstack(
                            arrays, offsets, shifts, weights, method,
                            result=result,
                            nparallel=nparallel,
                            impl='openmp')

                        assert numeq(result, result1*(k+2), 1e-9)
예제 #4
0
    def test_parstack(self):
        for i in xrange(100):
            narrays = random.randint(1, 5)
            arrays = [
                num.random.random(random.randint(5, 10))
                for j in xrange(narrays)
            ]
            offsets = num.random.randint(-5, 6, size=narrays).astype(num.int32)
            nshifts = random.randint(1, 10)
            shifts = num.random.randint(
                -5, 6, size=(nshifts, narrays)).astype(num.int32)
            weights = num.random.random((nshifts, narrays))

            for method in (0, 1):
                for nparallel in xrange(1, 5):
                    r1, o1 = parstack(
                        arrays, offsets, shifts, weights, method,
                        impl='openmp',
                        nparallel=nparallel)

                    r2, o2 = parstack(
                        arrays, offsets, shifts, weights, method, impl='numpy')

                    assert o1 == o2
                    assert numeq(r1, r2, 1e-9)
예제 #5
0
    def test_parstack_cumulative(self):
        for i in xrange(10):
            narrays = random.randint(1, 5)
            arrays = [
                num.random.random(random.randint(5, 10))
                for i in xrange(narrays)
            ]
            offsets = num.random.randint(-5, 6, size=narrays).astype(num.int32)
            nshifts = random.randint(1, 10)
            shifts = num.random.randint(
                -5, 6, size=(nshifts, narrays)).astype(num.int32)
            weights = num.random.random((nshifts, narrays))

            for method in (0,):
                for nparallel in xrange(1, 4):
                    result, offset = parstack(
                        arrays, offsets, shifts, weights, method,
                        result=None,
                        nparallel=nparallel,
                        impl='openmp')

                    result1 = result.copy()
                    for k in xrange(5):
                        result, offset = parstack(
                            arrays, offsets, shifts, weights, method,
                            result=result,
                            nparallel=nparallel,
                            impl='openmp')

                        assert numeq(result, result1*(k+2), 1e-9)
    def test_limited(self):
        arrays = [
            num.array([0, 0, 0, 0, 0, 0, 0, 1, 0, -1, 0], dtype=num.float),
            num.array([0, 0, 0, 0, 0, 0, 1, 0, -1, 0, 0], dtype=num.float),
            num.array([0, 0, 0, 0, 0, 1, 0, -1, 0, 0, 0], dtype=num.float)]

        offsets = num.array([0, 0, 0], dtype=num.int32)
        shifts = -num.array([
            [8, 7, 6],
            [7, 6, 5],
            [6, 5, 4]], dtype=num.int32)

        weights = num.ones((3, 3), dtype=num.float)

        mat, ioff = parstack(arrays, offsets, shifts, weights, 0)

        ioff_total, nsamples_total = get_offset_and_length(
            arrays, offsets, shifts)

        mat0, ioff = parstack(arrays, offsets, shifts, weights, 0)

        neach = 3
        for ioff in range(0, nsamples_total, 3):
            mat, ioff_check = parstack(
                arrays, offsets, shifts, weights, 0,
                offsetout=ioff_total + ioff,
                lengthout=neach)

            assert ioff_total + ioff == ioff_check

            num.testing.assert_almost_equal(
                mat0[:, ioff:ioff+neach], mat)
예제 #7
0
    def benchmark(self):

        for nsamples in (10, 100, 1000, 10000):
            nrepeats = max(10, 1000 / nsamples)

            narrays = 20
            arrays = []
            for iarray in xrange(narrays):
                arrays.append(num.arange(nsamples, dtype=num.float))

            offsets = num.arange(narrays, dtype=num.int32)

            nshifts = 100
            shifts = num.zeros((nshifts, narrays), dtype=num.int32)
            weights = num.ones((nshifts, narrays))

            confs = [('numpy', 1)]
            for nparallel in xrange(1, multiprocessing.cpu_count() + 1):
                confs.append(('openmp', nparallel))

            for (impl, nparallel) in confs:
                t0 = time.time()
                for j in xrange(nrepeats):
                    r, o = parstack(
                        arrays, offsets, shifts, weights, 0,
                        impl=impl, nparallel=nparallel)

                t1 = time.time()

                t = t1-t0
                score = nsamples * narrays * nshifts * nrepeats / t / 1e9
                print '%s, %i, %i, %g' % (impl, nparallel, nsamples, score)
예제 #8
0
    def benchmark(self):

        for nsamples in (10, 100, 1000, 10000):
            nrepeats = max(10, 1000 / nsamples)

            narrays = 20
            arrays = []
            for iarray in xrange(narrays):
                arrays.append(num.arange(nsamples, dtype=num.float))

            offsets = num.arange(narrays, dtype=num.int32)

            nshifts = 100
            shifts = num.zeros((nshifts, narrays), dtype=num.int32)
            weights = num.ones((nshifts, narrays))

            confs = [('numpy', 1)]
            for nparallel in xrange(1, multiprocessing.cpu_count() + 1):
                confs.append(('openmp', nparallel))

            for (impl, nparallel) in confs:
                t0 = time.time()
                for j in xrange(nrepeats):
                    r, o = parstack(
                        arrays, offsets, shifts, weights, 0,
                        impl=impl, nparallel=nparallel)

                t1 = time.time()

                t = t1-t0
                score = nsamples * narrays * nshifts * nrepeats / t / 1e9
                print '%s, %i, %i, %g' % (impl, nparallel, nsamples, score)
예제 #9
0
def abeam(traces, delays, tmin, tmax):
    narrays = len(traces)
    
   # for tr in traces:
    #    tr.lowpass(4, 0.1)
  #  trace.snuffle(traces)
        
    tr_tmins = np.array([tr.tmin for tr in traces], dtype=np.float)
    t0 = tr_tmins[0]
    deltat = traces[0].deltat

    offsets = np.round((tr_tmins - t0) / deltat).astype(np.int32)
    offsets = np.concatenate([offsets, offsets])
    
    nshifts = delays.shape[0]
    
    shifts = np.zeros((nshifts, narrays*2), dtype=np.int32)
    
    rshifts = delays / deltat
    fshifts = shifts[:,:narrays] = np.floor(rshifts)
    cshifts = shifts[:, narrays:] = np.ceil(rshifts)
    
    assert shifts.shape[1] == narrays*2

    arrays = [tr.get_ydata().astype(np.float) for tr in (traces + traces)]
    weights = np.zeros(shifts.shape, dtype=np.float)
    weights[:,:narrays] = 1.0 - (rshifts - fshifts)
    weights[:, narrays:] = (1.0 - (cshifts - rshifts)) * (cshifts - fshifts)
    
    p, _ = parstack(arrays, offsets, shifts, weights, 1)

    p = p/p.max()
    return p
예제 #10
0
    def test_parstack_limited(self):
        for i in range(10):
            narrays = random.randint(1, 5)
            arrays = [
                num.random.random(random.randint(5, 10))
                for j in range(narrays)
            ]
            offsets = num.random.randint(-5, 6, size=narrays).astype(num.int32)
            nshifts = random.randint(1, 10)
            shifts = num.random.randint(-5, 6,
                                        size=(nshifts,
                                              narrays)).astype(num.int32)
            weights = num.random.random((nshifts, narrays))

            for nparallel in range(1, 5):
                r1, o1 = parstack(arrays,
                                  offsets,
                                  shifts,
                                  weights,
                                  0,
                                  nparallel=nparallel,
                                  impl='openmp')

                for impl in ['openmp', 'numpy']:
                    r2, o2 = parstack(arrays,
                                      offsets,
                                      shifts,
                                      weights,
                                      0,
                                      lengthout=r1.shape[1],
                                      offsetout=o1,
                                      nparallel=nparallel,
                                      impl=impl)

                    assert o1 == o2
                    num.testing.assert_almost_equal(r1, r2, decimal=9)

                    n = r1.shape[1]
                    for k in range(n):
                        r3, o3 = parstack(arrays,
                                          offsets,
                                          shifts,
                                          weights,
                                          0,
                                          lengthout=n,
                                          offsetout=o1 - k,
                                          nparallel=nparallel,
                                          impl=impl)

                        assert o3 == o1 - k
                        num.testing.assert_almost_equal(r1[:, :n - k],
                                                        r3[:, k:],
                                                        decimal=9)

                    for k in range(n):
                        r3, o3 = parstack(arrays,
                                          offsets,
                                          shifts,
                                          weights,
                                          0,
                                          lengthout=n,
                                          offsetout=o1 + k,
                                          nparallel=nparallel,
                                          impl=impl)

                        assert o3 == o1 + k
                        num.testing.assert_almost_equal(r1[:, k:],
                                                        r3[:, :n - k],
                                                        decimal=9)

                    for k in range(n):
                        r3, o3 = parstack(arrays,
                                          offsets,
                                          shifts,
                                          weights,
                                          0,
                                          lengthout=n - k,
                                          offsetout=o1,
                                          nparallel=nparallel,
                                          impl=impl)

                        assert o3 == o1
                        num.testing.assert_almost_equal(r1[:, :n - k],
                                                        r3[:, :],
                                                        decimal=9)
예제 #11
0
    def _off_test_synthetic(self):

        from pyrocko import gf

        km = 1000.
        nstations = 10
        edepth = 5 * km
        store_id = 'crust2_d0'

        swin = 2.
        lwin = 9. * swin
        ks = 1.0
        kl = 1.0
        kd = 3.0

        engine = gf.get_engine()
        snorths = (num.random.random(nstations) - 1.0) * 50 * km
        seasts = (num.random.random(nstations) - 1.0) * 50 * km
        targets = []
        for istation, (snorths, seasts) in enumerate(zip(snorths, seasts)):
            targets.append(
                gf.Target(quantity='displacement',
                          codes=('', 's%03i' % istation, '', 'Z'),
                          north_shift=float(snorths),
                          east_shift=float(seasts),
                          store_id=store_id,
                          interpolation='multilinear'))

        source = gf.DCSource(north_shift=50 * km,
                             east_shift=50 * km,
                             depth=edepth)

        store = engine.get_store(store_id)

        response = engine.process(source, targets)
        trs = []

        station_traces = defaultdict(list)
        station_targets = defaultdict(list)
        for source, target, tr in response.iter_results():
            tp = store.t('any_P', source, target)
            t = tp - 5 * tr.deltat + num.arange(11) * tr.deltat
            if False:
                gauss = trace.Trace(tmin=t[0],
                                    deltat=tr.deltat,
                                    ydata=num.exp(-((t - tp)**2) /
                                                  ((2 * tr.deltat)**2)))

                tr.ydata[:] = 0.0
                tr.add(gauss)

            trs.append(tr)
            station_traces[target.codes[:3]].append(tr)
            station_targets[target.codes[:3]].append(target)

        station_stalta_traces = {}
        for nsl, traces in station_traces.items():
            etr = None
            for tr in traces:
                sqr_tr = tr.copy(data=False)
                sqr_tr.ydata = tr.ydata**2
                if etr is None:
                    etr = sqr_tr
                else:
                    etr += sqr_tr

            autopick.recursive_stalta(swin, lwin, ks, kl, kd, etr)
            etr.set_codes(channel='C')

            station_stalta_traces[nsl] = etr

        trace.snuffle(trs + list(station_stalta_traces.values()))
        deltat = trs[0].deltat

        nnorth = 50
        neast = 50

        size = 200 * km

        north = num.linspace(-size, size, nnorth)
        north2 = num.repeat(north, neast)
        east = num.linspace(-size, size, neast)
        east2 = num.tile(east, nnorth)
        depth = 5 * km

        def tcal(target, i):
            try:
                return store.t(
                    'any_P',
                    gf.Location(north_shift=north2[i],
                                east_shift=east2[i],
                                depth=depth), target)

            except gf.OutOfBounds:
                return 0.0

        nsls = sorted(station_stalta_traces.keys())

        tts = num.fromiter((tcal(station_targets[nsl][0], i)
                            for i in range(nnorth * neast) for nsl in nsls),
                           dtype=num.float)

        arrays = [
            station_stalta_traces[nsl].ydata.astype(num.float) for nsl in nsls
        ]
        offsets = num.array([
            int(round(station_stalta_traces[nsl].tmin / deltat))
            for nsl in nsls
        ],
                            dtype=num.int32)
        shifts = -num.array([int(round(tt / deltat)) for tt in tts],
                            dtype=num.int32).reshape(nnorth * neast, nstations)
        weights = num.ones((nnorth * neast, nstations))

        print(shifts[25 * neast + 25] * deltat)

        print(offsets.dtype, shifts.dtype, weights.dtype)

        print('stack start')
        mat, ioff = parstack(arrays, offsets, shifts, weights, 1)
        print('stack stop')

        mat = num.reshape(mat, (nnorth, neast))

        from matplotlib import pyplot as plt

        fig = plt.figure()

        axes = fig.add_subplot(1, 1, 1, aspect=1.0)

        axes.contourf(east / km, north / km, mat)

        axes.plot(
            g(targets, 'east_shift') / km,
            g(targets, 'north_shift') / km, '^')
        axes.plot(source.east_shift / km, source.north_shift / km, 'o')
        plt.show()
    def call(self):

        self.cleanup()
        figs = []
        azi_theo = None
        method = {'stack': 0,
                  'correlate': 2}[self.method]

        bazis = num.arange(0., 360.+self.delta_bazi, self.delta_bazi)
        slownesses = num.arange(self.slowness_min/km,
                                self.slowness_max/km,
                                self.slowness_delta/km)
        n_bazis = len(bazis)
        n_slow = len(slownesses)

        viewer = self.get_viewer()
        event = viewer.get_active_event()

        stations = self.get_stations()
        stations_dict = dict(zip([viewer.station_key(s) for s in stations],
                                 stations))

        traces_pile = self.get_pile()
        deltats = traces_pile.deltats.keys()
        if len(deltats) > 1:
            self.fail('sampling rates differ in dataset')
        else:
            deltat_cf = deltats[0]

        tinc_use = self.get_tinc_use(precision=deltat_cf)

        if self.ntaper:
            taper = num.hanning(int(self.ntaper))
        else:
            taper = None

        frames = None
        t1 = time.time()

        # make sure that only visible stations are used
        use_stations = stations
        center_station = get_center_station(use_stations, select_closest=True)
        print('Center station: ', center_station)
        shift_table = get_shifts(
            stations=use_stations,
            center_station=center_station,
            bazis=bazis,
            slownesses=slownesses)

        shifts = num.round(shift_table / deltat_cf).astype(num.int32)

        # padding from maximum shift of traces:
        npad = num.max(num.abs(shifts))
        tpad = npad * deltat_cf

        # additional padding for cross over fading
        npad_fade = 0
        tpad_fade = npad_fade * deltat_cf

        npad += npad_fade
        tpad += tpad_fade

        frames = None
        tinc_add = tinc_use or 0

        def trace_selector(x):
            return util.match_nslc('*.*.*.%s' % self.want_channel, x.nslc_id)

        for traces in self.chopper_selected_traces(
                tinc=tinc_use, tpad=tpad, fallback=True,
                want_incomplete=False, trace_selector=trace_selector):

            if len(traces) == 0:
                self.fail('No traces matched')
                continue

            # should be correct
            t_min = traces[0].tmin
            t_max = traces[0].tmax

            use_stations = []
            for tr in traces:
                try:
                    use_stations.append(stations_dict[viewer.station_key(tr)])
                except KeyError:
                    self.fail('no trace %s' % ('.'.join(tr.nslc_id)))

            shift_table = get_shifts(
                stations=use_stations,
                center_station=center_station,
                bazis=bazis,
                slownesses=slownesses)

            shifts = num.round(shift_table / deltat_cf).astype(num.int32)

            wmin = traces[0].tmin
            wmax = wmin + tinc_add

            iwmin = int(round((wmin-wmin) / deltat_cf))
            iwmax = int(round((wmax-wmin) / deltat_cf))
            lengthout = iwmax - iwmin
            arrays = num.zeros((len(traces), lengthout + npad*2))

            for itr, tr in enumerate(traces):
                tr = tr.copy()
                if viewer.highpass:
                    tr.highpass(4, viewer.highpass, demean=True)
                else:
                    tr.ydata = num.asarray(
                        tr.ydata, dtype=num.float) - num.mean(tr.ydata)
                if viewer.lowpass:
                    tr.lowpass(4, viewer.lowpass)

                arrays[itr] = tr.get_ydata()

            # if viewer.highpass:
            #     arrays = highpass_array(
            #            arrays, deltat_cf, 4, viewer.highpass)
            # if viewer.lowpass:
            #     arrays = lowpass_array(arrays, deltat_cf, 4, viewer.lowpass)

            _arrays = []
            for itr, tr in enumerate(traces):
                if taper is not None:
                    ydata = fftconvolve(arrays[itr], taper, mode='same')
                else:
                    ydata = arrays[itr]
                _arrays.append(num.asarray(ydata, dtype=num.float64))
            arrays = _arrays

            offsets = num.array(
                [int(round((tr.tmin-wmin) / deltat_cf)) for tr in traces],
                dtype=num.int32)

            ngridpoints = len(bazis)*len(slownesses)
            weights = num.ones((ngridpoints, len(traces)))

            frames, ioff = parstack.parstack(
                arrays, offsets, shifts, weights, method,
                offsetout=iwmin,
                lengthout=lengthout,
                result=frames,
                impl='openmp')

            # theoretical bazi
            if event is not None:
                azi_theo = get_theoretical_backazimuth(
                    event, use_stations, center_station)
                print('theoretical azimuth %s degrees' % (azi_theo))

            print('processing time: %s seconds' % (time.time()-t1))

            if frames is None:
                self.fail('Could not process data!')
                return

            frames_reshaped = frames.reshape((n_bazis, n_slow, lengthout))
            times = num.linspace(t_min-tpad_fade, t_max+tpad_fade, lengthout)
            max_powers = num.max(frames, axis=0)

            # power maxima in blocks
            i_max_blocked = search_max_block(
                n_maxsearch=int(npad*self.search_factor), data=max_powers)

            max_powers += (num.min(max_powers)*-1)
            max_powers /= num.max(max_powers)
            max_powers *= max_powers
            weights = max_powers[i_max_blocked]
            block_max_times = times[i_max_blocked]

            _argmax = num.argmax(frames, axis=0)
            imax_bazi_all, imax_slow_all = num.unravel_index(
                _argmax, dims=(n_bazis, n_slow))

            local_max_bazi = bazis[imax_bazi_all][i_max_blocked]
            local_max_slow = slownesses[imax_slow_all][i_max_blocked]*km

            k_north = num.sin(local_max_bazi * d2r) * local_max_slow
            k_east = num.cos(local_max_bazi * d2r) * local_max_slow

            smooth = 4e7

            spline_north = UnivariateSpline(
                block_max_times, k_north, w=weights,
                s=smooth
            )

            spline_east = UnivariateSpline(
                block_max_times, k_east, w=weights,
                s=smooth,
            )

            k_north_fit = spline_north(times)
            k_east_fit = spline_east(times)

            bazi_fitted = num.arctan2(k_east_fit, k_north_fit) / d2r
            bazi_fitted -= 90.
            bazi_fitted *= -1.
            bazi_fitted[num.where(bazi_fitted<0.)] += 360.

            spline_slow = UnivariateSpline(
                block_max_times,
                local_max_slow,
                w=weights,
            )

            slow_fitted = spline_slow(times)
            i_bazi_fitted = value_to_index(
                bazi_fitted, 0., 360., self.delta_bazi)

            i_slow_fitted = value_to_index(
                slow_fitted, self.slowness_min, self.slowness_max,
                self.slowness_delta)

            i_shift = num.ravel_multi_index(
                num.vstack((i_bazi_fitted, i_slow_fitted)),
                (n_bazis, n_slow),
            )

            stack_trace = num.zeros(lengthout)
            i_base = num.arange(lengthout, dtype=num.int) + npad
            for itr, tr in enumerate(traces):
                isorting = num.clip(
                    i_base-shifts[i_shift, itr], npad, lengthout+npad)
                stack_trace += tr.ydata[isorting]

            beam_tr = trace.Trace(
                tmin=t_min+tpad, ydata=stack_trace, deltat=deltat_cf)

            self.add_trace(beam_tr)

            if self.want_all:

                # ---------------------------------------------------------
                # maxima search
                # ---------------------------------------------------------
                fig1 = self.new_figure('Max Power')
                nsubplots = 1
                ax = fig1.add_subplot(nsubplots, 1, 1)
                ax.plot(num.max(frames, axis=0))
                # --------------------------------------------------------------
                # coherence maps
                # --------------------------------------------------------------

                max_time = num.amax(frames, axis=0)
                imax_time = num.argmax(max_time)
                best_frame = num.amax(frames, axis=1)
                imax_bazi_slow = num.argmax(best_frame)
                imax_bazi, imax_slow = num.unravel_index(
                    num.argmax(best_frame),
                    dims=(n_bazis, n_slow))

                fig2 = self.new_figure('Slowness')
                data = frames_reshaped[imax_bazi, :, :]
                data_max = num.amax(frames_reshaped, axis=0)

                ax = fig2.add_subplot(211)
                ax.set_title('Global maximum slize')
                ax.set_ylabel('slowness [s/km]')
                ax.plot(times[imax_time], slownesses[imax_slow]*km, 'b.')
                ax.pcolormesh(times, slownesses*km, data)

                ax = fig2.add_subplot(212, sharex=ax, sharey=ax)
                ax.set_ylabel('slowness [s/km]')
                ax.pcolormesh(times, slownesses*km, data_max)
                ax.set_title('Maximum')

                # highlight block maxima
                ax.plot(block_max_times, local_max_slow, 'wo')

                ax.plot(times, num.clip(
                    slow_fitted, self.slowness_min, self.slowness_max)
                )

                fig3 = self.new_figure('Back-Azimuth')
                data = frames_reshaped[:, imax_slow, :]
                data_max = num.amax(frames_reshaped, axis=1)

                ax = fig3.add_subplot(211, sharex=ax)
                ax.set_title('Global maximum slize')
                ax.set_ylabel('back-azimuth')
                ax.pcolormesh(times, bazis, data)
                ax.plot(times[imax_time], bazis[imax_bazi], 'b.')

                ax = fig3.add_subplot(212, sharex=ax, sharey=ax)
                ax.set_ylabel('back-azimuth')
                ax.set_title('Maximum')
                ax.pcolormesh(times, bazis, data_max)

                # highlight block maxima
                ax.plot(block_max_times, local_max_bazi, 'wo')
                ax.plot(times, num.clip(bazi_fitted, 0, 360.))

                # xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
                # ax.xaxis.set_major_formatter(xfmt)
                # fig.autofmt_xdate()
                # fig.subplots_adjust(hspace=0)

                semblance = best_frame.reshape((n_bazis, n_slow))

                fig4 = self.new_figure('Max')
                theta, r = num.meshgrid(bazis, slownesses)
                theta *= (num.pi/180.)

                ax = fig4.add_subplot(111, projection='polar')
                m = ax.pcolormesh(theta.T, r.T*km, to_db(semblance))

                ax.plot(bazis[imax_bazi]*d2r, slownesses[imax_slow]*km, 'o')

                bazi_max = bazis[imax_bazi]*d2r
                slow_max = slownesses[imax_slow]*km
                ax.plot(bazi_max, slow_max, 'b.')
                ax.text(0.5, 0.01, 'Maximum at %s degrees, %s s/km' %
                        (num.round(bazi_max, 1), slow_max),
                        transform=fig4.transFigure,
                        horizontalalignment='center',
                        verticalalignment='bottom')

                if azi_theo:
                    ax.arrow(azi_theo/180.*num.pi, num.min(slownesses), 0,
                             num.max(slownesses), alpha=0.5, width=0.015,
                             edgecolor='black', facecolor='green', lw=2,
                             zorder=5)

                self.adjust_polar_axis(ax)
                fig4.colorbar(m)

                # ---------------------------------------------------------
                # CF and beam forming
                # ---------------------------------------------------------
                fig5 = self.new_figure('Beam')
                nsubplots = 4
                nsubplots += self.want_pws

                ax_raw = fig5.add_subplot(nsubplots, 1, 1)
                ax_shifted = fig5.add_subplot(nsubplots, 1, 2)
                ax_beam = fig5.add_subplot(nsubplots, 1, 3)
                ax_beam_new = fig5.add_subplot(nsubplots, 1, 4)

                axkwargs = dict(alpha=0.3, linewidth=0.3, color='grey')

                ybeam = num.zeros(lengthout)
                ybeam_weighted = num.zeros(lengthout)
                for i, (shift, array) in enumerate(zip(shifts.T, arrays)):
                    ax_raw.plot(times, array[npad: -npad], **axkwargs)
                    ishift = shift[imax_bazi_slow]
                    ax_shifted.plot(
                        times, array[npad-ishift: -npad-ishift], **axkwargs)

                    ydata = traces[i].get_ydata()[npad-ishift: -npad-ishift]
                    ybeam += ydata

                    # calculate phase weighting
                    if self.want_pws:
                        ph_inst = instantaneous_phase(ydata)
                        ybeam_weighted += num.abs(num.exp(ph_inst))**4

                ax_beam_new.plot(stack_trace)
                ax_beam_new.set_title('continuous mode')
                # ax_beam.plot(times, ybeam, color='black')
                ax_beam.plot(ybeam, color='black')
                ax_raw.set_title('Characteristic Function')
                ax_shifted.set_title('Shifted CF')
                ax_beam.set_title('Linear Stack')

                if self.want_pws:
                    ax_playground = fig5.add_subplot(nsubplots, 1, 5)
                    ax_playground.plot(ybeam*ybeam_weighted/len(arrays))
                    ax_playground.set_title('Phase Weighted Stack')

                # -----------------------------------------------------------
                # polar movie:
                # -----------------------------------------------------------
                fig6 = self.new_figure('Beam')
                self.polar_movie(
                    fig=fig6,
                    frames=frames,
                    times=times,
                    theta=theta.T,
                    r=r.T*km,
                    nth_frame=2,
                    n_bazis=n_bazis,
                    n_slow=n_slow,
                )

                self.draw_figures()

                self.irun += 1
예제 #13
0
    def test_parstack_limited(self):
        for i in xrange(10):
            narrays = random.randint(1, 5)
            arrays = [
                num.random.random(random.randint(5, 10))
                for j in xrange(narrays)
            ]
            offsets = num.random.randint(-5, 6, size=narrays).astype(num.int32)
            nshifts = random.randint(1, 10)
            shifts = num.random.randint(
                -5, 6, size=(nshifts, narrays)).astype(num.int32)
            weights = num.random.random((nshifts, narrays))

            for nparallel in xrange(1, 5):
                r1, o1 = parstack(
                    arrays, offsets, shifts, weights, 0,
                    nparallel=nparallel,
                    impl='openmp')

                for impl in ['openmp', 'numpy']:
                    r2, o2 = parstack(
                        arrays, offsets, shifts, weights, 0,
                        lengthout=r1.shape[1],
                        offsetout=o1,
                        nparallel=nparallel,
                        impl=impl)

                    assert o1 == o2
                    assert numeq(r1, r2, 1e-9)

                    n = r1.shape[1]
                    for k in xrange(n):
                        r3, o3 = parstack(
                            arrays, offsets, shifts, weights, 0,
                            lengthout=n,
                            offsetout=o1-k,
                            nparallel=nparallel,
                            impl=impl)

                        assert o3 == o1-k
                        assert numeq(r1[:, :n-k], r3[:, k:], 1e-9)

                    for k in xrange(n):
                        r3, o3 = parstack(
                            arrays, offsets, shifts, weights, 0,
                            lengthout=n,
                            offsetout=o1+k,
                            nparallel=nparallel,
                            impl=impl)

                        assert o3 == o1+k
                        assert numeq(r1[:, k:], r3[:, :n-k], 1e-9)

                    for k in xrange(n):
                        r3, o3 = parstack(
                            arrays, offsets, shifts, weights, 0,
                            lengthout=n-k,
                            offsetout=o1,
                            nparallel=nparallel,
                            impl=impl)

                        assert o3 == o1
                        assert numeq(r1[:, :n-k], r3[:, :], 1e-9)
예제 #14
0
    def off_test_synthetic(self):

        from pyrocko import gf

        km = 1000.
        nstations = 10
        edepth = 5*km
        store_id = 'crust2_d0'

        swin = 2.
        lwin = 9.*swin
        ks = 1.0
        kl = 1.0
        kd = 3.0

        engine = gf.get_engine()
        snorths = (num.random.random(nstations)-1.0) * 50*km
        seasts = (num.random.random(nstations)-1.0) * 50*km
        targets = []
        for istation, (snorths, seasts) in enumerate(zip(snorths, seasts)):
            targets.append(
                gf.Target(
                    quantity='displacement',
                    codes=('', 's%03i' % istation, '', 'Z'),
                    north_shift=float(snorths),
                    east_shift=float(seasts),
                    store_id=store_id,
                    interpolation='multilinear'))

        source = gf.DCSource(
            north_shift=50*km,
            east_shift=50*km,
            depth=edepth)

        store = engine.get_store(store_id)

        response = engine.process(source, targets)
        trs = []

        station_traces = defaultdict(list)
        station_targets = defaultdict(list)
        for source, target, tr in response.iter_results():
            tp = store.t('any_P', source, target)
            t = tp - 5 * tr.deltat + num.arange(11) * tr.deltat
            if False:
                gauss = trace.Trace(
                    tmin=t[0],
                    deltat=tr.deltat,
                    ydata=num.exp(-((t-tp)**2)/((2*tr.deltat)**2)))

                tr.ydata[:] = 0.0
                tr.add(gauss)

            trs.append(tr)
            station_traces[target.codes[:3]].append(tr)
            station_targets[target.codes[:3]].append(target)

        station_stalta_traces = {}
        for nsl, traces in station_traces.iteritems():
            etr = None
            for tr in traces:
                sqr_tr = tr.copy(data=False)
                sqr_tr.ydata = tr.ydata**2
                if etr is None:
                    etr = sqr_tr
                else:
                    etr += sqr_tr

            autopick.recursive_stalta(swin, lwin, ks, kl, kd, etr)
            etr.set_codes(channel='C')

            station_stalta_traces[nsl] = etr

        trace.snuffle(trs + station_stalta_traces.values())
        deltat = trs[0].deltat

        nnorth = 50
        neast = 50

        size = 400*km

        north = num.linspace(-size/2., size/2., nnorth)
        north2 = num.repeat(north, neast)
        east = num.linspace(-size/2., size/2., neast)
        east2 = num.tile(east, nnorth)
        depth = 5*km

        def tcal(target, i):
            try:
                return store.t(
                    'any_P',
                    gf.Location(
                        north_shift=north2[i],
                        east_shift=east2[i],
                        depth=depth),
                    target)

            except gf.OutOfBounds:
                return 0.0

        nsls = sorted(station_stalta_traces.keys())

        tts = num.fromiter((tcal(station_targets[nsl][0], i)
                           for i in xrange(nnorth*neast)
                           for nsl in nsls), dtype=num.float)

        arrays = [
            station_stalta_traces[nsl].ydata.astype(num.float) for nsl in nsls]
        offsets = num.array(
            [int(round(station_stalta_traces[nsl].tmin / deltat))
             for nsl in nsls], dtype=num.int32)
        shifts = -num.array(
            [int(round(tt / deltat))
             for tt in tts], dtype=num.int32).reshape(nnorth*neast, nstations)
        weights = num.ones((nnorth*neast, nstations))

        print shifts[25*neast + 25] * deltat

        print offsets.dtype, shifts.dtype, weights.dtype

        print 'stack start'
        mat, ioff = parstack(arrays, offsets, shifts, weights, 1)
        print 'stack stop'

        mat = num.reshape(mat, (nnorth, neast))

        from matplotlib import pyplot as plt

        fig = plt.figure()

        axes = fig.add_subplot(1, 1, 1, aspect=1.0)

        axes.contourf(east/km, north/km, mat)

        axes.plot(
            g(targets, 'east_shift')/km,
            g(targets, 'north_shift')/km, '^')
        axes.plot(source.east_shift/km, source.north_shift/km, 'o')
        plt.show()
예제 #15
0
def search(config,
           override_tmin=None,
           override_tmax=None,
           show_detections=False,
           show_movie=False,
           show_window_traces=False,
           force=False,
           stop_after_first=False,
           nparallel=6,
           save_imax=False,
           bark=False):

    fp = config.expand_path

    run_path = fp(config.run_path)

    # if op.exists(run_path):
    #     if force:
    #         shutil.rmtree(run_path)
    #     else:
    #         raise common.LassieError(
    #             'run directory already exists: %s' %
    #             run_path)

    util.ensuredir(run_path)

    write_config(config, op.join(run_path, 'config.yaml'))

    ifm_path_template = config.get_ifm_path_template()
    detections_path = config.get_detections_path()
    events_path = config.get_events_path()
    figures_path_template = config.get_figures_path_template()

    config.setup_image_function_contributions()
    ifcs = config.image_function_contributions

    grid = config.get_grid()
    receivers = config.get_receivers()

    norm_map = gridmod.geometrical_normalization(grid, receivers)

    data_paths = fp(config.data_paths)
    for data_path in fp(data_paths):
        if not op.exists(data_path):
            pass

    p = pile.make_pile(data_paths, fileformat='detect')
    if p.is_empty():
        raise common.LassieError('no usable waveforms found')

    for ifc in ifcs:
        ifc.prescan(p)

    shift_tables = []
    tshift_minmaxs = []
    for ifc in ifcs:
        shift_tables.append(ifc.get_table(grid, receivers))
        tshift_minmaxs.append(num.nanmin(shift_tables[-1]))
        tshift_minmaxs.append(num.nanmax(shift_tables[-1]))

    fsmooth_min = min(ifc.get_fsmooth() for ifc in ifcs)

    tshift_min = min(tshift_minmaxs)
    tshift_max = max(tshift_minmaxs)

    if config.detector_tpeaksearch is not None:
        tpeaksearch = config.detector_tpeaksearch
    else:
        tpeaksearch = (tshift_max - tshift_min) + 1.0 / fsmooth_min

    tpad = max(ifc.get_tpad() for ifc in ifcs) + \
        (tshift_max - tshift_min) + tpeaksearch

    tinc = (tshift_max - tshift_min) * 10. + 3.0 * tpad
    tavail = p.tmax - p.tmin
    tinc = min(tinc, tavail - 2.0 * tpad)

    if tinc <= 0:
        raise common.LassieError('available waveforms too short \n'
                                 'required: %g s\n'
                                 'available: %g s\n' % (2. * tpad, tavail))

    blacklist = set(tuple(s.split('.')) for s in config.blacklist)
    whitelist = set(tuple(s.split('.')) for s in config.whitelist)

    distances = grid.distances(receivers)
    distances_to_grid = num.min(distances, axis=0)

    distance_min = num.min(distances)
    distance_max = num.max(distances)

    station_index = dict(
        (rec.codes, i) for (i, rec) in enumerate(receivers)
        if rec.codes not in blacklist and (
            not whitelist or rec.codes in whitelist) and (
                config.distance_max is None
                or distances_to_grid[i] <= config.distance_max))

    check_data_consistency(p, config)

    deltat_cf = max(p.deltats.keys())
    assert deltat_cf > 0.0

    while True:
        if not all(ifc.deltat_cf_is_available(deltat_cf * 2) for ifc in ifcs):
            break

        deltat_cf *= 2
    logger.info('CF lassie sampling interval (rate): %g s (%g Hz)' %
                (deltat_cf, 1.0 / deltat_cf))

    ngridpoints = grid.size()

    logger.info('number of grid points: %i' % ngridpoints)
    logger.info('minimum source-receiver distance: %g m' % distance_min)
    logger.info('maximum source-receiver distance: %g m' % distance_max)
    logger.info('minimum travel-time: %g s' % tshift_min)
    logger.info('maximum travel-time: %g s' % tshift_max)

    idetection = 0

    tmin = override_tmin or config.tmin or p.tmin + tpad
    tmax = override_tmax or config.tmax or p.tmax - tpad

    events = config.get_events()
    twindows = []
    if events is not None:
        for ev in events:
            if tmin <= ev.time <= tmax:
                twindows.append(
                    (ev.time + tshift_min - (tshift_max - tshift_min) *
                     config.event_time_window_factor,
                     ev.time + tshift_min + (tshift_max - tshift_min) *
                     config.event_time_window_factor))

    else:
        twindows.append((tmin, tmax))

    for iwindow_group, (tmin_win, tmax_win) in enumerate(twindows):

        nwin = int(math.ceil((tmax_win - tmin_win) / tinc))

        logger.info('start processing time window group %i/%i: %s - %s' %
                    (iwindow_group + 1, len(twindows),
                     util.time_to_str(tmin_win), util.time_to_str(tmax_win)))

        logger.info('number of time windows: %i' % nwin)
        logger.info('time window length: %g s' % (tinc + 2.0 * tpad))
        logger.info('time window payload: %g s' % tinc)
        logger.info('time window padding: 2 x %g s' % tpad)
        logger.info('time window overlap: %g%%' % (100.0 * 2.0 * tpad /
                                                   (tinc + 2.0 * tpad)))

        iwin = -1

        for trs in p.chopper(
                tmin=tmin_win,
                tmax=tmax_win,
                tinc=tinc,
                tpad=tpad,
                want_incomplete=config.fill_incomplete_with_zeros,
                trace_selector=lambda tr: tr.nslc_id[:3] in station_index):
            iwin += 1
            trs_ok = []
            for tr in trs:
                if tr.ydata.size == 0:
                    logger.warn('skipping empty trace: %s.%s.%s.%s' %
                                tr.nslc_id)

                    continue

                if not num.all(num.isfinite(tr.ydata)):
                    logger.warn('skipping trace because of invalid values: '
                                '%s.%s.%s.%s' % tr.nslc_id)

                    continue

                trs_ok.append(tr)

            trs = trs_ok

            if not trs:
                continue

            logger.info('processing time window %i/%i: %s - %s' %
                        (iwin + 1, nwin, util.time_to_str(
                            trs[0].wmin), util.time_to_str(trs[0].wmax)))

            wmin = trs[0].wmin
            wmax = trs[0].wmax

            if config.fill_incomplete_with_zeros:
                trs = zero_fill(trs, wmin - tpad, wmax + tpad)

            t0 = math.floor(wmin / deltat_cf) * deltat_cf
            iwmin = int(round((wmin - tpeaksearch - t0) / deltat_cf))
            iwmax = int(round((wmax + tpeaksearch - t0) / deltat_cf))
            lengthout = iwmax - iwmin + 1

            pdata = []
            trs_debug = []
            parstack_params = []
            for iifc, ifc in enumerate(ifcs):
                dataset = ifc.preprocess(trs, wmin - tpeaksearch,
                                         wmax + tpeaksearch,
                                         tshift_max - tshift_min, deltat_cf)
                if not dataset:
                    continue

                nstations_selected = len(dataset)

                nsls_selected, trs_selected = zip(*dataset)

                for tr in trs_selected:
                    tr.meta = {'tabu': True}

                trs_debug.extend(trs + list(trs_selected))

                istations_selected = num.array(
                    [station_index[nsl] for nsl in nsls_selected],
                    dtype=num.int)
                arrays = [tr.ydata.astype(num.float) for tr in trs_selected]

                offsets = num.array([
                    int(round((tr.tmin - t0) / deltat_cf))
                    for tr in trs_selected
                ],
                                    dtype=num.int32)

                w = ifc.get_weights(nsls_selected)

                weights = num.ones((ngridpoints, nstations_selected))
                weights *= w[num.newaxis, :]
                weights *= ifc.weight

                shift_table = shift_tables[iifc]

                ok = num.isfinite(shift_table[:, istations_selected])
                bad = num.logical_not(ok)

                shifts = -num.round(shift_table[:, istations_selected] /
                                    deltat_cf).astype(num.int32)

                weights[bad] = 0.0
                shifts[bad] = num.max(shifts[ok])

                pdata.append((list(trs_selected), shift_table, ifc))
                parstack_params.append((arrays, offsets, shifts, weights))

            if config.stacking_blocksize is not None:
                ipstep = config.stacking_blocksize
                frames = None
            else:
                ipstep = lengthout
                frames = num.zeros((ngridpoints, lengthout))

            twall_start = time.time()
            frame_maxs = num.zeros(lengthout)
            frame_argmaxs = num.zeros(lengthout, dtype=num.int)
            ipmin = iwmin
            while ipmin < iwmin + lengthout:
                ipsize = min(ipstep, iwmin + lengthout - ipmin)
                if ipstep == lengthout:
                    frames_p = frames
                else:
                    frames_p = num.zeros((ngridpoints, ipsize))

                for (arrays, offsets, shifts, weights) in parstack_params:
                    frames_p, _ = parstack(arrays,
                                           offsets,
                                           shifts,
                                           weights,
                                           0,
                                           offsetout=ipmin,
                                           lengthout=ipsize,
                                           result=frames_p,
                                           nparallel=nparallel,
                                           impl='openmp')

                if config.sharpness_normalization:
                    frame_p_maxs = frames_p.max(axis=0)
                    frame_p_means = num.abs(frames_p).mean(axis=0)
                    frames_p *= (frame_p_maxs / frame_p_means)[num.newaxis, :]
                    frames_p *= norm_map[:, num.newaxis]

                if config.ifc_count_normalization:
                    frames_p *= 1.0 / len(ifcs)

                frame_maxs[ipmin-iwmin:ipmin-iwmin+ipsize] = \
                    frames_p.max(axis=0)
                frame_argmaxs[ipmin-iwmin:ipmin-iwmin+ipsize] = \
                    pargmax(frames_p)

                ipmin += ipstep
                del frames_p

            twall_end = time.time()

            logger.info('wallclock time for stacking: %g s' %
                        (twall_end - twall_start))

            tmin_frames = t0 + iwmin * deltat_cf

            tr_stackmax = trace.Trace('',
                                      'SMAX',
                                      '',
                                      '',
                                      tmin=tmin_frames,
                                      deltat=deltat_cf,
                                      ydata=frame_maxs)

            tr_stackmax.meta = {'tabu': True}

            trs_debug.append(tr_stackmax)

            if show_window_traces:
                trace.snuffle(trs_debug)

            ydata_window = tr_stackmax.chop(wmin, wmax,
                                            inplace=False).get_ydata()

            logger.info('CF stats: min %g, max %g, median %g' %
                        (num.min(ydata_window), num.max(ydata_window),
                         num.median(ydata_window)))
            if nstations_selected != 17:
                logger.info(
                    'Warning, station outage detected! Nr of station operable: %s'
                    % nstations_selected)

            detector_threshold_seiger = config.detector_threshold - (
                (17 - nstations_selected) * 4
            )  # 17 is maximum number of seiger stations, 4 is a mean baseline for noise
            if nstations_selected != 17:
                logger.info(
                    'Warning, station outage detected! Nr of station operable: %s, threshold now: %s'
                    % (nstations_selected, detector_threshold_seiger))

            tpeaks, apeaks = list(
                zip(*[(tpeak, apeak) for (tpeak, apeak) in zip(
                    *tr_stackmax.peaks(detector_threshold_seiger, tpeaksearch))
                      if wmin <= tpeak and tpeak < wmax])) or ([], [])

            tr_stackmax_indx = tr_stackmax.copy(data=False)
            tr_stackmax_indx.set_ydata(frame_argmaxs.astype(num.int32))
            tr_stackmax_indx.set_location('i')

            for (tpeak, apeak) in zip(tpeaks, apeaks):

                iframe = int(round((tpeak - tmin_frames) / deltat_cf))
                imax = frame_argmaxs[iframe]

                latpeak, lonpeak, xpeak, ypeak, zpeak = \
                    grid.index_to_location(imax)

                idetection += 1

                detection = Detection(id='%06i' % idetection,
                                      time=tpeak,
                                      location=geo.Point(lat=float(latpeak),
                                                         lon=float(lonpeak),
                                                         x=float(xpeak),
                                                         y=float(ypeak),
                                                         z=float(zpeak)),
                                      ifm=float(apeak))

                if bark:
                    common.bark()

                logger.info('detection found: %s' % str(detection))

                f = open(detections_path, 'a')
                f.write(
                    '%06i %s %g %g %g %g %g %g\n' %
                    (idetection,
                     util.time_to_str(tpeak, format='%Y-%m-%d %H:%M:%S.6FRAC'),
                     apeak, latpeak, lonpeak, xpeak, ypeak, zpeak))

                f.close()

                ev = detection.get_event()
                f = open(events_path, 'a')
                model.dump_events([ev], stream=f)
                f.close()

                if show_detections or config.save_figures:
                    fmin = min(ifc.fmin for ifc in ifcs)
                    fmax = min(ifc.fmax for ifc in ifcs)

                    fn = figures_path_template % {
                        'id': util.tts(t0).replace(" ", "T"),
                        'format': 'png'
                    }

                    util.ensuredirs(fn)

                    if frames is not None:
                        frames_p = frames
                        tmin_frames_p = tmin_frames
                        iframe_p = iframe

                    else:
                        iframe_min = max(
                            0, int(round(iframe - tpeaksearch / deltat_cf)))
                        iframe_max = min(
                            lengthout - 1,
                            int(round(iframe + tpeaksearch / deltat_cf)))

                        ipsize = iframe_max - iframe_min + 1
                        frames_p = num.zeros((ngridpoints, ipsize))
                        tmin_frames_p = tmin_frames + iframe_min * deltat_cf
                        iframe_p = iframe - iframe_min

                        for (arrays, offsets, shifts, weights) \
                                in parstack_params:

                            frames_p, _ = parstack(arrays,
                                                   offsets,
                                                   shifts,
                                                   weights,
                                                   0,
                                                   offsetout=iwmin +
                                                   iframe_min,
                                                   lengthout=ipsize,
                                                   result=frames_p,
                                                   nparallel=nparallel,
                                                   impl='openmp')

                        if config.sharpness_normalization:
                            frame_p_maxs = frames_p.max(axis=0)
                            frame_p_means = num.abs(frames_p).mean(axis=0)
                            frames_p *= (frame_p_maxs /
                                         frame_p_means)[num.newaxis, :]
                            frames_p *= norm_map[:, num.newaxis]

                        if config.ifc_count_normalization:
                            frames_p *= 1.0 / len(ifcs)
                    try:
                        plot.plot_detection(grid,
                                            receivers,
                                            frames_p,
                                            tmin_frames_p,
                                            deltat_cf,
                                            imax,
                                            iframe_p,
                                            xpeak,
                                            ypeak,
                                            zpeak,
                                            tr_stackmax,
                                            tpeaks,
                                            apeaks,
                                            detector_threshold_seiger,
                                            wmin,
                                            wmax,
                                            pdata,
                                            trs,
                                            fmin,
                                            fmax,
                                            idetection,
                                            tpeaksearch,
                                            movie=show_movie,
                                            show=show_detections,
                                            save_filename=fn,
                                            event=ev)
                    except:
                        pass

                    del frames_p

                if stop_after_first:
                    return

            tr_stackmax.chop(wmin, wmax)
            tr_stackmax_indx.chop(wmin, wmax)
            if save_imax is True:
                io.save([tr_stackmax, tr_stackmax_indx], ifm_path_template)

            del frames
        logger.info('end processing time window group: %s - %s' %
                    (util.time_to_str(tmin_win), util.time_to_str(tmax_win)))
    cat = Catalog()
    files = glob("%s/../figures/*qml*" % run_path)
    files.sort(key=os.path.getmtime)
    for file in files:
        cat_read = read_events(file)
        for event in cat_read:
            cat.append(event)
    cat.write("%s/../all_events_stacking.qml" % run_path, format="QUAKEML")
예제 #16
0
    def call(self):

        self.cleanup()
        figs = []
        azi_theo = None
        method = {'stack': 0, 'correlate': 2}[self.method]

        bazis = num.arange(0., 360. + self.delta_bazi, self.delta_bazi)
        slownesses = num.arange(self.slowness_min / km, self.slowness_max / km,
                                self.slowness_delta / km)
        n_bazis = len(bazis)
        n_slow = len(slownesses)

        viewer = self.get_viewer()
        event = viewer.get_active_event()

        stations = self.get_stations()
        stations_dict = dict(
            zip([viewer.station_key(s) for s in stations], stations))

        traces_pile = self.get_pile()
        deltats = traces_pile.deltats.keys()
        if len(deltats) > 1:
            self.fail('sampling rates differ in dataset')
        else:
            deltat_cf = deltats[0]

        tinc_use = self.get_tinc_use(precision=deltat_cf)

        if self.ntaper:
            taper = num.hanning(int(self.ntaper))
        else:
            taper = None

        frames = None
        t1 = time.time()

        # make sure that only visible stations are used
        use_stations = stations
        center_station = get_center_station(use_stations, select_closest=True)
        print('Center station: ', center_station)
        shift_table = get_shifts(stations=use_stations,
                                 center_station=center_station,
                                 bazis=bazis,
                                 slownesses=slownesses)

        shifts = num.round(shift_table / deltat_cf).astype(num.int32)

        # padding from maximum shift of traces:
        npad = num.max(num.abs(shifts))
        tpad = npad * deltat_cf

        # additional padding for cross over fading
        npad_fade = 0
        tpad_fade = npad_fade * deltat_cf

        npad += npad_fade
        tpad += tpad_fade

        frames = None
        tinc_add = tinc_use or 0

        def trace_selector(x):
            return util.match_nslc('*.*.*.%s' % self.want_channel, x.nslc_id)

        for traces in self.chopper_selected_traces(
                tinc=tinc_use,
                tpad=tpad,
                fallback=True,
                want_incomplete=False,
                trace_selector=trace_selector):

            if len(traces) == 0:
                self.fail('No traces matched')
                continue

            # should be correct
            t_min = traces[0].tmin
            t_max = traces[0].tmax

            use_stations = []
            for tr in traces:
                try:
                    use_stations.append(stations_dict[viewer.station_key(tr)])
                except KeyError:
                    self.fail('no trace %s' % ('.'.join(tr.nslc_id)))

            shift_table = get_shifts(stations=use_stations,
                                     center_station=center_station,
                                     bazis=bazis,
                                     slownesses=slownesses)

            shifts = num.round(shift_table / deltat_cf).astype(num.int32)

            wmin = traces[0].tmin
            wmax = wmin + tinc_add

            iwmin = int(round((wmin - wmin) / deltat_cf))
            iwmax = int(round((wmax - wmin) / deltat_cf))
            lengthout = iwmax - iwmin
            arrays = num.zeros((len(traces), lengthout + npad * 2))

            for itr, tr in enumerate(traces):
                tr = tr.copy()
                if viewer.highpass:
                    tr.highpass(4, viewer.highpass, demean=True)
                else:
                    tr.ydata = num.asarray(
                        tr.ydata, dtype=num.float) - num.mean(tr.ydata)
                if viewer.lowpass:
                    tr.lowpass(4, viewer.lowpass)

                arrays[itr] = tr.get_ydata()

            # if viewer.highpass:
            #     arrays = highpass_array(
            #            arrays, deltat_cf, 4, viewer.highpass)
            # if viewer.lowpass:
            #     arrays = lowpass_array(arrays, deltat_cf, 4, viewer.lowpass)

            _arrays = []
            for itr, tr in enumerate(traces):
                if taper is not None:
                    ydata = fftconvolve(arrays[itr], taper, mode='same')
                else:
                    ydata = arrays[itr]
                _arrays.append(num.asarray(ydata, dtype=num.float64))
            arrays = _arrays

            offsets = num.array(
                [int(round((tr.tmin - wmin) / deltat_cf)) for tr in traces],
                dtype=num.int32)

            ngridpoints = len(bazis) * len(slownesses)
            weights = num.ones((ngridpoints, len(traces)))

            frames, ioff = parstack.parstack(arrays,
                                             offsets,
                                             shifts,
                                             weights,
                                             method,
                                             offsetout=iwmin,
                                             lengthout=lengthout,
                                             result=frames,
                                             impl='openmp')

            # theoretical bazi
            if event is not None:
                azi_theo = get_theoretical_backazimuth(event, use_stations,
                                                       center_station)
                print('theoretical azimuth %s degrees' % (azi_theo))

            print('processing time: %s seconds' % (time.time() - t1))

            if frames is None:
                self.fail('Could not process data!')
                return

            frames_reshaped = frames.reshape((n_bazis, n_slow, lengthout))
            times = num.linspace(t_min - tpad_fade, t_max + tpad_fade,
                                 lengthout)
            max_powers = num.max(frames, axis=0)

            # power maxima in blocks
            i_max_blocked = search_max_block(n_maxsearch=int(
                npad * self.search_factor),
                                             data=max_powers)

            max_powers += (num.min(max_powers) * -1)
            max_powers /= num.max(max_powers)
            max_powers *= max_powers
            weights = max_powers[i_max_blocked]
            block_max_times = times[i_max_blocked]

            _argmax = num.argmax(frames, axis=0)
            imax_bazi_all, imax_slow_all = num.unravel_index(_argmax,
                                                             dims=(n_bazis,
                                                                   n_slow))

            local_max_bazi = bazis[imax_bazi_all][i_max_blocked]
            local_max_slow = slownesses[imax_slow_all][i_max_blocked] * km

            k_north = num.sin(local_max_bazi * d2r) * local_max_slow
            k_east = num.cos(local_max_bazi * d2r) * local_max_slow

            smooth = 4e7

            spline_north = UnivariateSpline(block_max_times,
                                            k_north,
                                            w=weights,
                                            s=smooth)

            spline_east = UnivariateSpline(
                block_max_times,
                k_east,
                w=weights,
                s=smooth,
            )

            k_north_fit = spline_north(times)
            k_east_fit = spline_east(times)

            bazi_fitted = num.arctan2(k_east_fit, k_north_fit) / d2r
            bazi_fitted -= 90.
            bazi_fitted *= -1.
            bazi_fitted[num.where(bazi_fitted < 0.)] += 360.

            spline_slow = UnivariateSpline(
                block_max_times,
                local_max_slow,
                w=weights,
            )

            slow_fitted = spline_slow(times)
            i_bazi_fitted = value_to_index(bazi_fitted, 0., 360.,
                                           self.delta_bazi)

            i_slow_fitted = value_to_index(slow_fitted, self.slowness_min,
                                           self.slowness_max,
                                           self.slowness_delta)

            i_shift = num.ravel_multi_index(
                num.vstack((i_bazi_fitted, i_slow_fitted)),
                (n_bazis, n_slow),
            )

            stack_trace = num.zeros(lengthout)
            i_base = num.arange(lengthout, dtype=num.int) + npad
            for itr, tr in enumerate(traces):
                isorting = num.clip(i_base - shifts[i_shift, itr], npad,
                                    lengthout + npad)
                stack_trace += tr.ydata[isorting]

            beam_tr = trace.Trace(tmin=t_min + tpad,
                                  ydata=stack_trace,
                                  deltat=deltat_cf)

            self.add_trace(beam_tr)

            if self.want_all:

                # ---------------------------------------------------------
                # maxima search
                # ---------------------------------------------------------
                fig1 = self.new_figure('Max Power')
                nsubplots = 1
                ax = fig1.add_subplot(nsubplots, 1, 1)
                ax.plot(num.max(frames, axis=0))
                # --------------------------------------------------------------
                # coherence maps
                # --------------------------------------------------------------

                max_time = num.amax(frames, axis=0)
                imax_time = num.argmax(max_time)
                best_frame = num.amax(frames, axis=1)
                imax_bazi_slow = num.argmax(best_frame)
                imax_bazi, imax_slow = num.unravel_index(
                    num.argmax(best_frame), dims=(n_bazis, n_slow))

                fig2 = self.new_figure('Slowness')
                data = frames_reshaped[imax_bazi, :, :]
                data_max = num.amax(frames_reshaped, axis=0)

                ax = fig2.add_subplot(211)
                ax.set_title('Global maximum slize')
                ax.set_ylabel('slowness [s/km]')
                ax.plot(times[imax_time], slownesses[imax_slow] * km, 'b.')
                ax.pcolormesh(times, slownesses * km, data)

                ax = fig2.add_subplot(212, sharex=ax, sharey=ax)
                ax.set_ylabel('slowness [s/km]')
                ax.pcolormesh(times, slownesses * km, data_max)
                ax.set_title('Maximum')

                # highlight block maxima
                ax.plot(block_max_times, local_max_slow, 'wo')

                ax.plot(
                    times,
                    num.clip(slow_fitted, self.slowness_min,
                             self.slowness_max))

                fig3 = self.new_figure('Back-Azimuth')
                data = frames_reshaped[:, imax_slow, :]
                data_max = num.amax(frames_reshaped, axis=1)

                ax = fig3.add_subplot(211, sharex=ax)
                ax.set_title('Global maximum slize')
                ax.set_ylabel('back-azimuth')
                ax.pcolormesh(times, bazis, data)
                ax.plot(times[imax_time], bazis[imax_bazi], 'b.')

                ax = fig3.add_subplot(212, sharex=ax, sharey=ax)
                ax.set_ylabel('back-azimuth')
                ax.set_title('Maximum')
                ax.pcolormesh(times, bazis, data_max)

                # highlight block maxima
                ax.plot(block_max_times, local_max_bazi, 'wo')
                ax.plot(times, num.clip(bazi_fitted, 0, 360.))

                # xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
                # ax.xaxis.set_major_formatter(xfmt)
                # fig.autofmt_xdate()
                # fig.subplots_adjust(hspace=0)

                semblance = best_frame.reshape((n_bazis, n_slow))

                fig4 = self.new_figure('Max')
                theta, r = num.meshgrid(bazis, slownesses)
                theta *= (num.pi / 180.)

                ax = fig4.add_subplot(111, projection='polar')
                m = ax.pcolormesh(theta.T, r.T * km, to_db(semblance))

                ax.plot(bazis[imax_bazi] * d2r, slownesses[imax_slow] * km,
                        'o')

                bazi_max = bazis[imax_bazi] * d2r
                slow_max = slownesses[imax_slow] * km
                ax.plot(bazi_max, slow_max, 'b.')
                ax.text(0.5,
                        0.01,
                        'Maximum at %s degrees, %s s/km' %
                        (num.round(bazi_max, 1), slow_max),
                        transform=fig4.transFigure,
                        horizontalalignment='center',
                        verticalalignment='bottom')

                if azi_theo:
                    ax.arrow(azi_theo / 180. * num.pi,
                             num.min(slownesses),
                             0,
                             num.max(slownesses),
                             alpha=0.5,
                             width=0.015,
                             edgecolor='black',
                             facecolor='green',
                             lw=2,
                             zorder=5)

                self.adjust_polar_axis(ax)
                fig4.colorbar(m)

                # ---------------------------------------------------------
                # CF and beam forming
                # ---------------------------------------------------------
                fig5 = self.new_figure('Beam')
                nsubplots = 4
                nsubplots += self.want_pws

                ax_raw = fig5.add_subplot(nsubplots, 1, 1)
                ax_shifted = fig5.add_subplot(nsubplots, 1, 2)
                ax_beam = fig5.add_subplot(nsubplots, 1, 3)
                ax_beam_new = fig5.add_subplot(nsubplots, 1, 4)

                axkwargs = dict(alpha=0.3, linewidth=0.3, color='grey')

                ybeam = num.zeros(lengthout)
                ybeam_weighted = num.zeros(lengthout)
                for i, (shift, array) in enumerate(zip(shifts.T, arrays)):
                    ax_raw.plot(times, array[npad:-npad], **axkwargs)
                    ishift = shift[imax_bazi_slow]
                    ax_shifted.plot(times, array[npad - ishift:-npad - ishift],
                                    **axkwargs)

                    ydata = traces[i].get_ydata()[npad - ishift:-npad - ishift]
                    ybeam += ydata

                    # calculate phase weighting
                    if self.want_pws:
                        ph_inst = instantaneous_phase(ydata)
                        ybeam_weighted += num.abs(num.exp(ph_inst))**4

                ax_beam_new.plot(stack_trace)
                ax_beam_new.set_title('continuous mode')
                # ax_beam.plot(times, ybeam, color='black')
                ax_beam.plot(ybeam, color='black')
                ax_raw.set_title('Characteristic Function')
                ax_shifted.set_title('Shifted CF')
                ax_beam.set_title('Linear Stack')

                if self.want_pws:
                    ax_playground = fig5.add_subplot(nsubplots, 1, 5)
                    ax_playground.plot(ybeam * ybeam_weighted / len(arrays))
                    ax_playground.set_title('Phase Weighted Stack')

                # -----------------------------------------------------------
                # polar movie:
                # -----------------------------------------------------------
                fig6 = self.new_figure('Beam')
                self.polar_movie(
                    fig=fig6,
                    frames=frames,
                    times=times,
                    theta=theta.T,
                    r=r.T * km,
                    nth_frame=2,
                    n_bazis=n_bazis,
                    n_slow=n_slow,
                )

                self.draw_figures()

                self.irun += 1