示例#1
0
def download_data(staloc, Tstart, Tend, filt, nattempts, waittime, ncpu, icpu):
    """
    """
    nstations = int(ceil(len(staloc) / ncpu))
    ibegin = icpu * nstations
    iend = min((icpu + 1) * nstations, len(staloc))

    for ir in range(ibegin, iend):
        station = staloc['station'][ir]
        network = staloc['network'][ir]
        channels = staloc['channels'][ir]
        location = staloc['location'][ir]
        server = staloc['server'][ir]
        time_on = staloc['time_on'][ir]
        time_off = staloc['time_off'][ir]
        dt = staloc['dt'][ir]

        # File to write error messages
        namedir = 'error'
        if not os.path.exists(namedir):
            os.makedirs(namedir)
        errorfile = 'error/' + station + '.txt'

        # Check whether there are data for this period of time
        year_on = int(time_on[0:4])
        month_on = int(time_on[5:7])
        day_on = int(time_on[8:10])
        year_off = int(time_off[0:4])
        month_off = int(time_off[5:7])
        day_off = int(time_off[8:10])
        if ((Tstart > UTCDateTime(year=year_on, month=month_on, day=day_on)) \
            and (Tend < UTCDateTime(year=year_off, month=month_off, day=day_off))):

            # First case: we can get the data from IRIS
            if (server == 'IRIS'):
                (D, orientation) = get_from_IRIS(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            # Second case: we get the data from NCEDC
            elif (server == 'NCEDC'):
                (D, orientation) = get_from_NCEDC(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            else:
                raise ValueError(
                    'You can only download data from IRIS and NCEDC')

            # Store the data into temporary files
            if type(D) == obspy.core.stream.Stream:
                mychannels = channels.split(',')
                for channel in mychannels:
                    stream = D.select(channel=channel)
                    if type(stream) == obspy.core.stream.Stream:
                        if len(stream) > 0:
                            stream.write('tmp/' + station + '_' + channel + \
                                '.mseed', format='MSEED')
                            namefile = 'tmp/' + station + '_' + channel + '.pkl'
                            pickle.dump(orientation, open(namefile, 'wb'))
示例#2
0
def get_cc_window(filename, TDUR, filt, dt, nattempts, waittime, \
        method='RMS', envelope=True):
    """
    This function finds the time arrival of each template waveform
    for each station

    Input:
        type filename = string
        filename = Name of the template
        type TDUR = float
        TDUR = Time to add before and after the time window for tapering
        type filt = tuple of floats
        filt = Lower and upper frequencies of the filter
        type dt = float
        dt = Time step for resampling
        type nattempts = integer
        nattempts = Number of times we try to download data
        type waittime = positive float
        waittime = Type to wait between two attempts at downloading
        type method = string
        method = Normalization method for linear stack (RMS or Max)
        type envelope = boolean
        envelope = Do we compute the max CC on the signal or the envelope?
    Output:
        None
    """
    # Get the names of the stations which have a waveform for this LFE family
    file = open('../data/Plourde_2015/detections/' + filename + \
        '_detect5_cull.txt')
    first_line = file.readline().strip()
    staNames = first_line.split()
    file.close()

    # Get the time of LFE detections
    LFEtime = np.loadtxt('../data/Plourde_2015/detections/' + filename + \
        '_detect5_cull.txt', \
        dtype={'names': ('unknown', 'day', 'hour', 'second', 'threshold'), \
             'formats': (np.float, '|S6', np.int, np.float, np.float)}, \
        skiprows=2)

    # Get the network, channels, and location of the stations
    staloc = pd.read_csv('../data/Plourde_2015/station_locations.txt', \
        sep=r'\s{1,}', header=None)
    staloc.columns = ['station', 'network', 'channels', 'location', \
        'server', 'latitude', 'longitude']

    # File to write error messages
    errorfile = 'error/' + filename + '.txt'

    # Initialize lists
    maxEW = []
    maxNS = []
    maxUD = []
    timeEW = []
    timeNS = []
    timeUD = []
    stations = []

    # Loop over stations
    for station in staNames:
        # Create streams
        EW = Stream()
        NS = Stream()
        UD = Stream()
        # Get station metadata for downloading
        for ir in range(0, len(staloc)):
            if (station == staloc['station'][ir]):
                network = staloc['network'][ir]
                channels = staloc['channels'][ir]
                location = staloc['location'][ir]
                server = staloc['server'][ir]
        # Loop on LFEs
        for i in range(0, np.shape(LFEtime)[0]):
            YMD = LFEtime[i][1]
            myYear = 2000 + int(YMD[0:2])
            myMonth = int(YMD[2:4])
            myDay = int(YMD[4:6])
            myHour = LFEtime[i][2] - 1
            myMinute = int(LFEtime[i][3] / 60.0)
            mySecond = int(LFEtime[i][3] - 60.0 * myMinute)
            myMicrosecond = int(1000000.0 * \
                (LFEtime[i][3] - 60.0 * myMinute - mySecond))
            Tori = UTCDateTime(year=myYear, month=myMonth, day=myDay, \
                hour=myHour, minute=myMinute, second=mySecond, \
                microsecond=myMicrosecond)
            Tstart = Tori - TDUR
            Tend = Tori + 60.0 + TDUR
            # First case: we can get the data from IRIS
            if (station[0:2] == 'ME' or station == 'B039'):
                (D, orientation) = get_from_IRIS(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            # Second case: we get the data from NCEDC
            else:
                (D, orientation) = get_from_NCEDC(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            if (type(D) == obspy.core.stream.Stream):
                # Add to stream
                if (station == 'B039'):
                    EW.append(D.select(channel='EH1').slice(Tori, \
                        Tori + 60.0)[0])
                    NS.append(D.select(channel='EH2').slice(Tori, \
                        Tori + 60.0)[0])
                    UD.append(D.select(channel='EHZ').slice(Tori, \
                        Tori + 60.0)[0])
                else:
                    EW.append(D.select(component='E').slice(Tori, \
                        Tori + 60.0)[0])
                    NS.append(D.select(component='N').slice(Tori, \
                        Tori + 60.0)[0])
                    UD.append(D.select(component='Z').slice(Tori, \
                        Tori + 60.0)[0])
            else:
                print('Failed at downloading data')
        # Stack
        if (len(EW) > 0):
            # Stack waveforms
            EWstack = linstack([EW], normalize=True, method=method)
            NSstack = linstack([NS], normalize=True, method=method)
            UDstack = linstack([UD], normalize=True, method=method)
            if (envelope == True):
                EWstack[0].data = obspy.signal.filter.envelope( \
                    EWstack[0].data)
                NSstack[0].data = obspy.signal.filter.envelope( \
                    NSstack[0].data)
                UDstack[0].data = obspy.signal.filter.envelope( \
                    UDstack[0].data)
            maxEW.append(np.max(np.abs(EWstack[0].data) / np.sqrt(np.mean( \
                np.square(EWstack[0].data)))))
            maxNS.append(np.max(np.abs(NSstack[0].data) / np.sqrt(np.mean( \
                np.square(NSstack[0].data)))))
            maxUD.append(np.max(np.abs(UDstack[0].data) / np.sqrt(np.mean( \
                np.square(UDstack[0].data)))))
            timeEW.append(np.argmax(np.abs(EWstack[0].data)) * \
                EWstack[0].stats.delta)
            timeNS.append(np.argmax(np.abs(NSstack[0].data)) * \
                NSstack[0].stats.delta)
            timeUD.append(np.argmax(np.abs(UDstack[0].data)) * \
                UDstack[0].stats.delta)
            stations.append(station)

    # Save time arrivals into file
    output = 'timearrival/' + filename + '.pkl'
    pickle.dump([stations, maxEW, maxNS, maxUD, timeEW, timeNS, timeUD], \
        open(output, 'wb'))
示例#3
0
def find_LFEs(family_file, station_file, template_dir, tbegin, tend, \
    TDUR, duration, filt, freq0, dt, nattempts, waittime, type_threshold='MAD', \
    threshold=0.0075):
    """
    
    """
    # Get the network, channels, and location of the stations
    staloc = pd.read_csv(station_file, sep=r'\s{1,}', header=None, engine='python')
    staloc.columns = ['station', 'network', 'channels', 'location', \
        'server', 'latitude', 'longitude', 'time_on', 'time_off']

    # Begin and end time of analysis
    t1 = UTCDateTime(year=tbegin[0], month=tbegin[1], \
        day=tbegin[2], hour=tbegin[3], minute=tbegin[4], \
        second=tbegin[5])
    t2 = UTCDateTime(year=tend[0], month=tend[1], \
        day=tend[2], hour=tend[3], minute=tend[4], \
        second=tend[5])
    
    # Number of hours of data to analyze
    nhour = int(ceil((t2 - t1) / 3600.0))

    # Begin and end time of downloading
    Tstart = t1 - TDUR
    Tend = t2 + duration + TDUR

    # Temporary directory to store the data
    namedir = 'tmp'
    if not os.path.exists(namedir):
        os.makedirs(namedir)

    # Download the data from the stations
    for ir in range(0, len(staloc)):
        station = staloc['station'][ir]
        network = staloc['network'][ir]
        channels = staloc['channels'][ir]
        location = staloc['location'][ir]
        server = staloc['server'][ir]
        time_on = staloc['time_on'][ir]
        time_off = staloc['time_off'][ir]

        # File to write error messages
        namedir = 'error'
        if not os.path.exists(namedir):
            os.makedirs(namedir)
        errorfile = 'error/' + station + '.txt'

        # Check whether there are data for this period of time
        year_on = int(time_on[0:4])
        month_on = int(time_on[5:7])
        day_on = int(time_on[8:10])
        year_off = int(time_off[0:4])
        month_off = int(time_off[5:7])
        day_off = int(time_off[8:10])
        if ((Tstart > UTCDateTime(year=year_on, month=month_on, day=day_on)) \
           and (Tend < UTCDateTime(year=year_off, month=month_off, day=day_off))):

            # First case: we can get the data from IRIS
            if (server == 'IRIS'):
                (D, orientation) = get_from_IRIS(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            # Second case: we get the data from NCEDC
            elif (server == 'NCEDC'):
                (D, orientation) = get_from_NCEDC(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            else:
                raise ValueError('You can only download data from IRIS and NCEDC')

            # Store the data into temporary files
            if (type(D) == obspy.core.stream.Stream):
                D.write('tmp/' + station + '.mseed', format='MSEED')
                namefile = 'tmp/' + station + '.pkl'
                pickle.dump(orientation, open(namefile, 'wb'))

    # Loop on families
    families = pd.read_csv(family_file, sep=r'\s{1,}', header=None, engine='python')
    families.columns = ['family', 'stations']
    for i in range(0, len(families)):

        # Create directory to store the LFEs times
        namedir = 'LFEs/' + families['family'].iloc[i]
        if not os.path.exists(namedir):
             os.makedirs(namedir)

        # File to write error messages
        namedir = 'error'
        if not os.path.exists(namedir):
            os.makedirs(namedir)
            errorfile = 'error/' + families['family'].iloc[i] + '.txt'

        # Create dataframe to store LFE times
        df = pd.DataFrame(columns=['year', 'month', 'day', 'hour', \
        'minute', 'second', 'cc', 'nchannel'])

        # Read the templates
        stations = families['stations'].iloc[i].split(',')
        templates = Stream()
        for station in stations:
            data = pickle.load(open(template_dir + '/' + families['family'].iloc[i] + \
            '/' + station + '.pkl', 'rb'))
            if (len(data) == 3):
                EW = data[0]
                NS = data[1]
                UD = data[2]
                EW.stats.station = station
                NS.stats.station = station
                EW.stats.channel = 'E'
                NS.stats.channel = 'N'
                templates.append(EW)
                templates.append(NS)
            else:
                UD = data[0]
            UD.stats.station = station
            UD.stats.channel = 'Z'
            templates.append(UD)                       

        # Loop on hours of data
        for hour in range(0, nhour):
            nchannel = 0
            Tstart = t1 + hour * 3600.0
            Tend = t1 + (hour + 1) * 3600.0 + duration
            delta = Tend - Tstart
            ndata = int(delta / dt) + 1

            # Get the data
            data = []
            for station in stations:
                try:
                    D = read('tmp/' + station + '.mseed')
                    D = D.slice(Tstart, Tend)
                    namefile = 'tmp/' + station + '.pkl'
                    orientation = pickle.load(open(namefile, 'rb'))

                    # Get station metadata for reading response file
                    for ir in range(0, len(staloc)):
                        if (station == staloc['station'][ir]):
                            network = staloc['network'][ir]
                            channels = staloc['channels'][ir]
                            location = staloc['location'][ir]
                            server = staloc['server'][ir]

                    # Orientation of template
                    # Date chosen: April 1st 2008
                    mychannels = channels.split(',')
                    mylocation = location
                    if (mylocation == '--'):
                        mylocation = ''
                    response = '../data/response/' + network + '_' + station + '.xml'
                    inventory = read_inventory(response, format='STATIONXML')
                    reference = []
                    for channel in mychannels:
                        angle = inventory.get_orientation(network + '.' + \
                            station + '.' + mylocation + '.' + channel, \
                            UTCDateTime(2008, 4, 1, 0, 0, 0))
                        reference.append(angle)

                    # Append data to stream
                    if (type(D) == obspy.core.stream.Stream):
                        stationdata = fill_data(D, orientation, station, channels, reference)
                        if (len(stationdata) > 0):
                            for stream in stationdata:
                                data.append(stream)
                except:
                    message = 'No data available for station {} '.format( \
                        station) + 'at time {}/{}/{} - {}:{}:{}\n'.format( \
                        Tstart.year, Tstart.month, Tstart.day, Tstart.hour, \
                        Tstart.minute, Tstart.second)

            # Loop on channels
            for channel in range(0, len(data)):
                subdata = data[channel]
                # Check whether we have a complete one-hour-long recording
                if (len(subdata) == 1):
                    if (len(subdata[0].data) == ndata):
                        # Get the template
                        station = subdata[0].stats.station
                        component = subdata[0].stats.channel
                        template = templates.select(station=station, \
                            component=component)[0]
                        # Cross correlation
                        cctemp = correlate.optimized(template, subdata[0])
                        if (nchannel > 0):
                            cc = np.vstack((cc, cctemp))
                        else:
                            cc = cctemp
                        nchannel = nchannel + 1
    
            if (nchannel > 0):   
                # Compute average cross-correlation across channels
                meancc = np.mean(cc, axis=0)
                if (type_threshold == 'MAD'):
                    MAD = np.median(np.abs(meancc - np.mean(meancc)))
                    index = np.where(meancc >= threshold * MAD)
                elif (type_threshold == 'Threshold'):
                    index = np.where(meancc >= threshold)
                else:
                    raise ValueError('Type of threshold must be MAD or Threshold')
                times = np.arange(0.0, np.shape(meancc)[0] * dt, dt)

                # Get LFE times
                if np.shape(index)[1] > 0:
                    (time, cc) = clean_LFEs(index, times, meancc, dt, freq0)

                    # Add LFE times to dataframe
                    i0 = len(df.index)
                    for j in range(0, len(time)):
                        timeLFE = Tstart + time[j]
                        df.loc[i0 + j] = [int(timeLFE.year), int(timeLFE.month), \
                            int(timeLFE.day), int(timeLFE.hour), \
                            int(timeLFE.minute), timeLFE.second + \
                            timeLFE.microsecond / 1000000.0, cc[j], nchannel]

        # Add to pandas dataframe and save
        namefile = 'LFEs/' + families['family'].iloc[i] + '/catalog.pkl'
        if os.path.exists(namefile):
            df_all = pickle.load(open(namefile, 'rb'))
            df_all = pd.concat([df_all, df], ignore_index=True)
        else:
            df_all = df    
        df_all = df_all.astype(dtype={'year':'int32', 'month':'int32', \
            'day':'int32', 'hour':'int32', 'minute':'int32', \
            'second':'float', 'cc':'float', 'nchannel':'int32'})
        pickle.dump(df_all, open(namefile, 'wb'))
示例#4
0
def find_LFEs(filename, stations, tbegin, tend, TDUR, filt, \
        freq0, nattempts, waittime, draw=False, type_threshold='MAD', \
        threshold=0.0075):
    """
    Find LFEs with the temporary stations from FAME
    using the templates from Plourde et al. (2015)

    Input:
        type filename = string
        filename = Name of the template
        type stations = list of strings
        stations = name of the stations used for the matched-filter algorithm
        type tebgin = tuplet of 6 integers
        tbegin = Time when we begin looking for LFEs
        type tend = tuplet of 6 integers
        tend = Time we stop looking for LFEs
        type TDUR = float
        TDUR = Time to add before and after the time window for tapering
        type filt = tuple of floats
        filt = Lower and upper frequencies of the filter
        type freq0 = float
        freq0 = Maximum frequency rate of LFE occurrence
        type nattempts = integer
        nattempts = Number of times we try to download data
        type waittime = positive float
        waittime = Type to wait between two attempts at downloading
        type draw = boolean
        draw = Do we draw a figure of the cross-correlation?
        type type_threshold = string
        type_threshold = 'MAD' or 'Threshold'
        type threshold = float
        threshold = Cross correlation value must be higher than that
    Output:
        None
    """

    # Get the network, channels, and location of the stations
    staloc = pd.read_csv('../data/Ducellier/stations_permanent.txt', \
        sep=r'\s{1,}', header=None, engine='python')
    staloc.columns = ['station', 'network', 'channels', 'location', \
        'server', 'latitude', 'longitude', 'time_on', 'time_off']

    # Create directory to store the LFEs times
    namedir = 'LFEs/' + filename
    if not os.path.exists(namedir):
        os.makedirs(namedir)

    # File to write error messages
    namedir = 'error'
    if not os.path.exists(namedir):
        os.makedirs(namedir)
    errorfile = 'error/' + filename + '.txt'

    # Read the templates
    templates = Stream()
    for station in stations:
        data = pickle.load(open('templates_new/' + filename + \
            '/' + station + '.pkl', 'rb'))
        if (len(data) == 3):
            EW = data[0]
            NS = data[1]
            UD = data[2]
            EW.stats.station = station
            NS.stats.station = station
            EW.stats.channel = 'E'
            NS.stats.channel = 'N'
            templates.append(EW)
            templates.append(NS)
        else:
            UD = data[0]
        UD.stats.station = station
        UD.stats.channel = 'Z'
        templates.append(UD)

    # Begin and end time of analysis
    t1 = UTCDateTime(year=tbegin[0], month=tbegin[1], \
        day=tbegin[2], hour=tbegin[3], minute=tbegin[4], \
        second=tbegin[5])
    t2 = UTCDateTime(year=tend[0], month=tend[1], \
        day=tend[2], hour=tend[3], minute=tend[4], \
        second=tend[5])

    # Read the data
    data = []
    for station in stations:
        # Get station metadata for downloading
        for ir in range(0, len(staloc)):
            if (station == staloc['station'][ir]):
                network = staloc['network'][ir]
                channels = staloc['channels'][ir]
                location = staloc['location'][ir]
                server = staloc['server'][ir]

        # Duration of template
        template = templates.select(station=station, component='Z')[0]
        dt = template.stats.delta
        nt = template.stats.npts
        duration = (nt - 1) * dt   
        Tstart = t1 - TDUR
        Tend = t2 + duration + TDUR
        delta = t2 + duration - t1
        ndata = int(delta / dt) + 1

        # Orientation of template
        # Date chosen: April 1st 2008
        mychannels = channels.split(',')
        mylocation = location
        if (mylocation == '--'):
            mylocation = ''
        response = '../data/response/' + network + '_' + station + '.xml'
        inventory = read_inventory(response, format='STATIONXML')
        reference = []
        for channel in mychannels:
            angle = inventory.get_orientation(network + '.' + \
                station + '.' + mylocation + '.' + channel, \
                UTCDateTime(2012, 1, 1, 0, 0, 0))
            reference.append(angle)

        # First case: we can get the data from IRIS
        if (server == 'IRIS'):
            (D, orientation) = get_from_IRIS(station, network, channels, \
                location, Tstart, Tend, filt, dt, nattempts, waittime, \
                errorfile)
        # Second case: we get the data from NCEDC
        elif (server == 'NCEDC'):
            (D, orientation) = get_from_NCEDC(station, network, channels, \
                location, Tstart, Tend, filt, dt, nattempts, waittime, \
                errorfile)
        else:
            raise ValueError('You can only download data from IRIS and NCEDC')

        # Append data to stream
        if (type(D) == obspy.core.stream.Stream):
            stationdata = fill_data(D, orientation, station, channels, reference)
            if (len(stationdata) > 0):
                for stream in stationdata:
                    data.append(stream)

    # Number of hours of data to analyze
    nhour = int(ceil((t2 - t1) / 3600.0))

    # Create dataframe to store LFE times
    df = pd.DataFrame(columns=['year', 'month', 'day', 'hour', \
        'minute', 'second', 'cc', 'nchannel'])

    # Loop on hours of data
    for hour in range(0, nhour):
        nchannel = 0
        Tstart = t1 + hour * 3600.0
        Tend = t1 + (hour + 1) * 3600.0 + duration
        delta = Tend - Tstart
        ndata = int(delta / dt) + 1

        # Loop on channels
        for channel in range(0, len(data)):
            # Cut the data
            subdata = data[channel]
            subdata = subdata.slice(Tstart, Tend)
            # Check whether we have a complete one-hour-long recording
            if (len(subdata) == 1):
                if (len(subdata[0].data) == ndata):
                    # Get the template
                    station = subdata[0].stats.station
                    component = subdata[0].stats.channel
                    template = templates.select(station=station, \
                        component=component)[0]
                    # Cross correlation
                    cctemp = correlate.optimized(template, subdata[0])
                    if (nchannel > 0):
                        cc = np.vstack((cc, cctemp))
                    else:
                        cc = cctemp
                    nchannel = nchannel + 1
    
        if (nchannel > 0):
   
            # Compute average cross-correlation across channels
            meancc = np.mean(cc, axis=0)
            if (type_threshold == 'MAD'):
                MAD = np.median(np.abs(meancc - np.mean(meancc)))
                index = np.where(meancc >= threshold * MAD)
            elif (type_threshold == 'Threshold'):
                index = np.where(meancc >= threshold)
            else:
                raise ValueError('Type of threshold must be MAD or Threshold')
            times = np.arange(0.0, np.shape(meancc)[0] * dt, dt)

            # Get LFE times
            if np.shape(index)[1] > 0:
                (time, cc) = clean_LFEs(index, times, meancc, dt, freq0)

                # Add LFE times to dataframe
                i0 = len(df.index)
                for i in range(0, len(time)):
                    timeLFE = Tstart + time[i]
                    df.loc[i0 + i] = [int(timeLFE.year), int(timeLFE.month), \
                        int(timeLFE.day), int(timeLFE.hour), \
                        int(timeLFE.minute), timeLFE.second + \
                        timeLFE.microsecond / 1000000.0, cc[i], nchannel]

            # Draw figure
            if (draw == True):
                params = {'xtick.labelsize':16,
                          'ytick.labelsize':16}
                pylab.rcParams.update(params) 
                plt.figure(1, figsize=(20, 8))
                if np.shape(index)[1] > 0:
                    for i in range(0, len(time)):
                        plt.axvline(time[i], linewidth=2, color='grey')
                plt.plot(np.arange(0.0, np.shape(meancc)[0] * dt, \
                    dt), meancc, color='black')
                if (type_threshold == 'MAD'):
                    plt.axhline(threshold * MAD, linewidth=2, color='red', \
                        label = '{:6.2f} * MAD'.format(threshold))
                elif (type_threshold == 'Threshold'):
                    plt.axhline(threshold, linewidth=2, color='red', \
                        label = 'Threshold = {:8.4f}'.format(threshold))
                else:
                    raise ValueError( \
                        'Type of threshold must be MAD or Threshold')
                plt.xlim(0.0, (np.shape(meancc)[0] - 1) * dt)
                plt.xlabel('Time (s)', fontsize=24)
                plt.ylabel('Cross-correlation', fontsize=24)
                plt.title('Average cross-correlation across stations', \
                    fontsize=30)
                plt.legend(loc=2, fontsize=24)
                plt.savefig('LFEs/' + filename + '/' + \
                    '{:04d}{:02d}{:02d}_{:02d}{:02d}{:02d}'.format( \
                    Tstart.year, Tstart.month, Tstart.day, Tstart.hour, \
                    Tstart.minute, Tstart.second) + '.png', format='png')
                plt.close(1)

    # Add to pandas dataframe and save
    namefile = 'LFEs/' + filename + '/catalog.pkl'
    if os.path.exists(namefile):
        df_all = pickle.load(open(namefile, 'rb'))
        df_all = pd.concat([df_all, df], ignore_index=True)
    else:
        df_all = df    
    df_all = df_all.astype(dtype={'year':'int32', 'month':'int32', \
        'day':'int32', 'hour':'int32', 'minute':'int32', \
        'second':'float', 'cc':'float', 'nchannel':'int32'})
    pickle.dump(df_all, open(namefile, 'wb'))
示例#5
0
def get_waveform(filename, TDUR, filt, nattempts, waittime, method='RMS'):
    """
    This function computes the waveforms for a given template and compare
    them to the waveforms from Plourde et al. (2015)

    Input:
        type filename = string
        filename = Name of the template
        type TDUR = float
        TDUR = Time to add before and after the time window for tapering
        type filt = tuple of floats
        filt = Lower and upper frequencies of the filter
        type nattempts = integer
        nattempts = Number of times we try to download data
        type waittime = positive float
        waittime = Type to wait between two attempts at downloading
        type method = string
        method = Normalization method for linear stack (RMS or Max)
    Output:
        None
    """
    # Get the names of the stations which have a waveform for this LFE family
    file = open('../data/Plourde_2015/detections/' + filename + \
        '_detect5_cull.txt')
    first_line = file.readline().strip()
    staNames = first_line.split()
    file.close()

    # Get the time of LFE detections
    LFEtime = np.loadtxt('../data/Plourde_2015/detections/' + filename + \
        '_detect5_cull.txt', \
        dtype={'names': ('unknown', 'day', 'hour', 'second', 'threshold'), \
             'formats': (np.float, '|S6', np.int, np.float, np.float)}, \
        skiprows=2)

    # Get the waveforms from the catalog of Plourde et al. (2015)
    data = loadmat('../data/Plourde_2015/waveforms/' + filename + '.mat')
    ndt = data['ndt'][0][0]
    ordlst = data['ordlst']
    uk = data['uk']
    ns = len(ordlst)

    # Get the network, channels, and location of the stations
    staloc = pd.read_csv('../data/Plourde_2015/station_locations.txt', \
        sep=r'\s{1,}', header=None)
    staloc.columns = ['station', 'network', 'channels', 'location', \
        'server', 'latitude', 'longitude']

    # Create directory to store the waveforms
    namedir = 'waveforms/' + filename
    if not os.path.exists(namedir):
        os.makedirs(namedir)

    # File to write error messages
    errorfile = 'error/' + filename + '.txt'

    # Loop over stations
    for station in staNames:
        # Create streams
        EW = Stream()
        NS = Stream()
        UD = Stream()
        # Get station metadata for downloading
        for ir in range(0, len(staloc)):
            if (station == staloc['station'][ir]):
                network = staloc['network'][ir]
                channels = staloc['channels'][ir]
                location = staloc['location'][ir]
                server = staloc['server'][ir]
        # Loop on LFEs
        for i in range(0, np.shape(LFEtime)[0]):
            YMD = LFEtime[i][1]
            myYear = 2000 + int(YMD[0:2])
            myMonth = int(YMD[2:4])
            myDay = int(YMD[4:6])
            myHour = LFEtime[i][2] - 1
            myMinute = int(LFEtime[i][3] / 60.0)
            mySecond = int(LFEtime[i][3] - 60.0 * myMinute)
            myMicrosecond = int(1000000.0 * \
                (LFEtime[i][3] - 60.0 * myMinute - mySecond))
            Tori = UTCDateTime(year=myYear, month=myMonth, day=myDay, \
                hour=myHour, minute=myMinute, second=mySecond, \
                microsecond=myMicrosecond)
            Tstart = Tori - TDUR
            Tend = Tori + 60.0 + TDUR
            # First case: we can get the data from IRIS
            if (server == 'IRIS'):
                (D, orientation) = get_from_IRIS(station, network, channels, \
                    location, Tstart, Tend, filt, ndt, nattempts, waittime, \
                    errorfile)
            # Second case: we get the data from NCEDC
            elif (server == 'NCEDC'):
                (D, orientation) = get_from_NCEDC(station, network, channels, \
                    location, Tstart, Tend, filt, ndt, nattempts, waittime, \
                    errorfile)
            else:
                raise ValueError( \
                    'You can only download data from IRIS and NCEDC')
            if (type(D) == obspy.core.stream.Stream):
                # Add to stream
                if (channels == 'EH1,EH2,EHZ'):
                    EW.append(D.select(channel='EH1').slice(Tori, \
                        Tori + 60.0)[0])
                    NS.append(D.select(channel='EH2').slice(Tori, \
                        Tori + 60.0)[0])
                    UD.append(D.select(channel='EHZ').slice(Tori, \
                        Tori + 60.0)[0])
                else:
                    EW.append(D.select(component='E').slice(Tori, \
                        Tori + 60.0)[0])
                    NS.append(D.select(component='N').slice(Tori, \
                        Tori + 60.0)[0])
                    UD.append(D.select(component='Z').slice(Tori, \
                        Tori + 60.0)[0])
            else:
                print('Failed at downloading data')
        # Stack and plot
        if (len(EW) > 0 and len(NS) > 0 and len(UD) > 0):
            # Stack waveforms
            EWstack = linstack([EW], normalize=True, method=method)
            NSstack = linstack([NS], normalize=True, method=method)
            UDstack = linstack([UD], normalize=True, method=method)
            # First figure
            # Comparison with the waveforms from Plourde et al. (2015)
            plt.figure(1, figsize=(20, 15))
            station4 = station
            if (len(station4) < 4):
                for j in range(len(station), 4):
                    station4 = station4 + ' '
            index = np.argwhere(ordlst == station4)[0][0]
            # EW component
            ax1 = plt.subplot(311)
            dt = EWstack[0].stats.delta
            nt = EWstack[0].stats.npts
            t = dt * np.arange(0, nt)
            norm = np.max(np.abs(EWstack[0].data))
            plt.plot(t, EWstack[0].data / norm, 'r', label='Stack')
            t0 = ndt * np.arange(0, np.shape(uk)[1])
            norm = np.max(np.abs(uk[ns + index, :]))
            plt.plot(t0, uk[ns + index, :] / norm, 'k', label='Waveform')
            plt.xlim(0.0, 60.0)
            plt.title('East component', fontsize=16)
            plt.xlabel('Time (s)', fontsize=16)
            plt.ylabel('Velocity (m/s)', fontsize=16)
            plt.legend(loc=1)
            # NS component
            ax2 = plt.subplot(312)
            dt = NSstack[0].stats.delta
            nt = NSstack[0].stats.npts
            t = dt * np.arange(0, nt)
            norm = np.max(np.abs(NSstack[0].data))
            plt.plot(t, NSstack[0].data / norm, 'r', label='Stack')
            t0 = ndt * np.arange(0, np.shape(uk)[1])
            norm = np.max(np.abs(uk[index, :]))
            plt.plot(t0, uk[index, :] / norm, 'k', label='Waveform')
            plt.xlim(0.0, 60.0)
            plt.title('North component', fontsize=16)
            plt.xlabel('Time (s)', fontsize=16)
            plt.ylabel('Velocity (m/s)', fontsize=16)
            plt.legend(loc=1)
            # UD component
            ax3 = plt.subplot(313)
            dt = UDstack[0].stats.delta
            nt = UDstack[0].stats.npts
            t = dt * np.arange(0, nt)
            norm = np.max(np.abs(UDstack[0].data))
            plt.plot(t, UDstack[0].data / norm, 'r', label='Stack')
            t0 = ndt * np.arange(0, np.shape(uk)[1])
            norm = np.max(np.abs(uk[2 * ns + index, :]))
            plt.plot(t0, uk[2 * ns + index, :] / norm, 'k', label='Waveform')
            plt.xlim(0.0, 60.0)
            plt.title('Vertical component', fontsize=16)
            plt.xlabel('Time (s)', fontsize=16)
            plt.ylabel('Velocity (m/s)', fontsize=16)
            plt.legend(loc=1)
            # End and save figure
            plt.suptitle(station, fontsize=24)
            plt.savefig(namedir + '/' + station + '_compare.eps', format='eps')
            ax1.clear()
            ax2.clear()
            ax3.clear()
            plt.close(1)
            # Second figure
            # Look at all the waveforms and the stack
            plt.figure(2, figsize=(30, 15))
            # EW component
            ax1 = plt.subplot(131)
            for i in range(0, len(EW)):
                dt = EW[i].stats.delta
                nt = EW[i].stats.npts
                t = dt * np.arange(0, nt)
                datanorm = EW[i].data / np.max(np.abs(EW[i].data))
                plt.plot(t, (2.0 * i + 1) + datanorm, 'k-')
            datanorm = EWstack[0].data / np.max(np.abs(EWstack[0].data))
            plt.plot(t, -2.0 + datanorm, 'r-')
            plt.xlim(0.0, 60.0)
            plt.ylim(-3.0, 2.0 * len(EW))
            plt.title('East component', fontsize=24)
            plt.xlabel('Time (s)', fontsize=24)
            plt.ylabel('Velocity (m/s)', fontsize=24)
            ax1.set_yticklabels([])
            ax1.tick_params(labelsize=20)
            # NS component
            ax2 = plt.subplot(132)
            for i in range(0, len(NS)):
                dt = NS[i].stats.delta
                nt = NS[i].stats.npts
                t = dt * np.arange(0, nt)
                datanorm = NS[i].data / np.max(np.abs(NS[i].data))
                plt.plot(t, (2.0 * i + 1) + datanorm, 'k-')
            datanorm = NSstack[0].data / np.max(np.abs(NSstack[0].data))
            plt.plot(t, -2.0 + datanorm, 'r-')
            plt.xlim(0.0, 60.0)
            plt.ylim(-3.0, 2.0 * len(NS))
            plt.title('North component', fontsize=24)
            plt.xlabel('Time (s)', fontsize=24)
            plt.ylabel('Velocity (m/s)', fontsize=24)
            ax2.set_yticklabels([])
            ax2.tick_params(labelsize=20)
            # UD component
            ax3 = plt.subplot(133)
            for i in range(0, len(UD)):
                dt = UD[i].stats.delta
                nt = UD[i].stats.npts
                t = dt * np.arange(0, nt)
                datanorm = UD[i].data / np.max(np.abs(UD[i].data))
                plt.plot(t, (2.0 * i + 1) + datanorm, 'k-')
            datanorm = UDstack[0].data / np.max(np.abs(UDstack[0].data))
            plt.plot(t, -2.0 + datanorm, 'r-')
            plt.xlim(0.0, 60.0)
            plt.ylim(-3.0, 2.0 * len(UD))
            plt.title('Vertical component', fontsize=24)
            plt.xlabel('Time (s)', fontsize=24)
            plt.ylabel('Velocity (m/s)', fontsize=24)
            ax3.set_yticklabels([])
            ax3.tick_params(labelsize=20)
            # End and save figure
            plt.suptitle(station, fontsize=24)
            plt.savefig(namedir + '/' + station + '_stack.eps', format='eps')
            ax1.clear()
            ax2.clear()
            ax3.clear()
            plt.close(2)
示例#6
0
def compute_new_templates(family, latitude, longitude, catalog, filename, \
    directory, max_dist, max_LFEs, \
    TDUR, filt, dt, nattempts, waittime, method='RMS'):
    """
    This function take only the best LFEs from an LFE catalog,
    downloads every one-minute time window where there is an LFE recorded,
    and stacks the signal over all the LFEs to get the template

    Input:
        type filename = string
        filename = Name of the template
        type catalog = string
        catalog = Name of the catalog containing the LFEs
        type threshold = float
        threshold = Minimun value of cross correlation to keep LFE
        type stations = list of strings
        stations = Name of the stations where we want a template
        type TDUR = float
        TDUR = Time to add before and after the time window for tapering
        type filt = tuple of floats
        filt = Lower and upper frequencies of the filter
        type dt = float
        dt = Time step for resampling
        type nattempts = integer
        nattempts = Number of times we try to download data
        type waittime = positive float
        waittime = Type to wait between two attempts at downloading
        type method = string
        method = Normalization method for linear stack (RMS or Max)
    Output:
        None
    """
    # Get the time of LFE detections
    namefile = '../data/' + catalog + '/catalogs/' + family + '/' + filename
    LFEtime = pickle.load(open(namefile, 'rb'))
    LFEsort = LFEtime.sort_values(by=['cc'], ascending=False)

    # Get the network, channels, and location of the stations
    stations_BK = filter_stations('BK')
    stations_NC = filter_stations('NC')
    stations_PB = filter_stations('PB')
    stations = pd.concat([stations_BK, stations_NC, stations_PB],
                         ignore_index=True)
    #    stations = pd.concat([stations_BK, stations_NC], ignore_index=True)
    #    stations = stations_PB

    # Create directory to store the waveforms
    namedir = directory + '/' + family
    if not os.path.exists(namedir):
        os.makedirs(namedir)

    # File to write error messages
    errorfile = 'error/' + family + '.txt'

    # Keep only stations close to LFE family
    a = 6378.136
    e = 0.006694470
    dx = (pi / 180.0) * a * cos(latitude * pi / 180.0) / sqrt(1.0 - e * e * \
        sin(latitude * pi / 180.0) * sin(latitude * pi / 180.0))
    dy = (3.6 * pi / 648.0) * a * (1.0 - e * e) / ((1.0 - e * e * sin(latitude * \
        pi / 180.0) * sin(latitude * pi / 180.0)) ** 1.5)
    x = dx * (stations['Lon'] - longitude)
    y = dy * (stations['Lat'] - latitude)
    stations['distance'] = np.sqrt(np.power(x, 2.0) + np.power(y, 2.0))
    mask = stations['distance'] <= max_dist
    stations = stations.loc[mask]

    mask = stations[stations['Lo'] == '2'].index
    stations.drop(mask, inplace=True)
    print(stations)

    # Loop over stations
    for ir in range(0, len(stations)):
        # Get station metadata for downloading
        network = stations['Net'].iloc[ir]
        station = stations['Stat'].iloc[ir]
        channels = stations['Cha'].iloc[ir]
        location = stations['Lo'].iloc[ir]
        starttime = stations['Start time'].iloc[ir]
        endtime = stations['End time'].iloc[ir]
        if (network == 'PB'):
            server = 'IRIS'
        else:
            server = 'NCEDC'

        # Filter LFEs for the period where the station was recording
        df = pd.DataFrame({
            'year': LFEsort['year'],
            'month': LFEsort['month'],
            'day': LFEsort['day']
        })
        date = pd.to_datetime(df)
        mask = (date >= starttime) & (date <= endtime)
        LFEsub = LFEsort.loc[mask]

        # Download instrument response
        if (server == 'IRIS'):
            get_responses.get_from_IRIS(station, network)
        elif (server == 'NCEDC'):
            get_responses.get_from_NCEDC(station, network)
        else:
            raise ValueError('You can only download data from IRIS and NCEDC')
        # Create streams
        cha_list = channels.split(',')
        streams = []
        for channel in cha_list:
            streams.append(Stream())
        # Create dictionary of channel orientations
        reference = dict.fromkeys(cha_list)
        # Initialization
        complete = False
        index = 0
        # Loop on LFEs
        while ((index < len(LFEsub)) and (complete == False)):
            mySecond = int(floor(LFEsub['second'].iloc[index]))
            myMicrosecond = int(1000000.0 * \
                (LFEsub['second'].iloc[index] - floor(LFEsub['second'].iloc[index])))
            Tori = UTCDateTime(year=LFEsub['year'].iloc[index], \
                month=LFEsub['month'].iloc[index], day=LFEsub['day'].iloc[index], \
                hour=LFEsub['hour'].iloc[index], minute=LFEsub['minute'].iloc[index], \
                second=mySecond, microsecond=myMicrosecond)
            Tstart = Tori - TDUR
            Tend = Tori + 60.0 + TDUR
            # First case: we can get the data from IRIS
            if (server == 'IRIS'):
                (D, orientation) = get_data.get_from_IRIS(station, network, channels, location, \
                    Tstart, Tend, filt, dt, nattempts, waittime, errorfile)
            # Second case: we get the data from NCEDC
            elif (server == 'NCEDC'):
                (D, orientation) = get_data.get_from_NCEDC(station, network, channels, location, \
                        Tstart, Tend, filt, dt, nattempts, waittime, errorfile)
            else:
                raise ValueError(
                    'You can only download data from IRIS and NCEDC')
            if (type(D) == obspy.core.stream.Stream):
                # Fill dictionary of channel orientations
                for channel in cha_list:
                    if reference[channel] == None:
                        mylocation = location
                        if (mylocation == '--'):
                            mylocation = ''
                        response = '../data/response/' + network + '_' + station + '.xml'
                        inventory = read_inventory(response,
                                                   format='STATIONXML')
                        angle = inventory.get_orientation(network + '.' + \
                            station + '.' + mylocation + '.' + channel, Tori)
                        reference[channel] = angle
                # Rotation of components
                D = rotate_stream(D, orientation, reference)
                # Add to stream
                for (i, channel) in enumerate(cha_list):
                    if len(streams[i]) < max_LFEs:
                        Dselect = D.select(channel=channel).slice(
                            Tori, Tori + 60.0)
                        if len(Dselect) == 1:
                            if Dselect[0].stats.npts == int(60.0 / dt + 1):
                                streams[i].append(Dselect[0])
            else:
                print('Failed at downloading data')
            # Update
            index = index + 1
            complete = True
            for (channel, stream) in zip(cha_list, streams):
                if len(stream) < max_LFEs:
                    complete = False
        # Stack
        for (channel, stream) in zip(cha_list, streams):
            if (len(stream) > 0):
                stack = linstack([stream], normalize=True, method=method)
                savename = namedir + '/' + station + '_' + channel + '.pkl'
                pickle.dump([stack[0], reference[channel]],
                            open(savename, 'wb'))
示例#7
0
def compute_templates(filename, TDUR, filt, ratios, dt, ncor, window, \
        winlength, nattempts, waittime, method='RMS'):
    """
    This function computes the waveform for each template, cross correlate
    them with the stack, and keep only the best to get the final template
    that will be used to find LFEs

    Input:
        type filename = string
        filename = Name of the template
        type TDUR = float
        TDUR = Time to add before and after the time window for tapering
        type filt = tuple of floats
        filt = Lower and upper frequencies of the filter
        type ratios = list of floats
        ratios = Percentage of LFEs to be kept for the final template
        type dt = float
        dt = Time step for resampling
        type ncor = integer
        ncor = Number of points for the cross correlation
        type window = boolean
        window = Do we do the cross correlation on the whole seismogram
                 or a selected time window?
        type winlength = float
        winlength = Length of the window to do the cross correlation
        type nattempts = integer
        nattempts = Number of times we try to download data
        type waittime = positive float
        waittime = Type to wait between two attempts at downloading
        type method = string
        method = Normalization method for linear stack (RMS or Max)
    Output:
        None
    """
    # To transform latitude and longitude into kilometers
    a = 6378.136
    e = 0.006694470
    lat0 = 41.0
    lon0 = -123.0
    dx = (pi / 180.0) * a * cos(lat0 * pi / 180.0) / sqrt(1.0 - e * e * \
        sin(lat0 * pi / 180.0) * sin(lat0 * pi / 180.0))
    dy = (3.6 * pi / 648.0) * a * (1.0 - e * e) / ((1.0 - e * e * sin(lat0 * \
        pi / 180.0) * sin(lat0 * pi / 180.0)) ** 1.5)

    # Get the names of the stations which have a waveform for this LFE family
    file = open('../data/Plourde_2015/detections/' + filename + \
        '_detect5_cull.txt')
    first_line = file.readline().strip()
    staNames = first_line.split()
    file.close()

    # Get the time of LFE detections
    LFEtime = np.loadtxt('../data/Plourde_2015/detections/' + filename + \
        '_detect5_cull.txt', \
        dtype={'names': ('unknown', 'day', 'hour', 'second', 'threshold'), \
             'formats': (np.float, '|S6', np.int, np.float, np.float)}, \
        skiprows=2)

    # Get the network, channels, and location of the stations
    staloc = pd.read_csv('../data/Plourde_2015/station_locations.txt', \
        sep=r'\s{1,}', header=None)
    staloc.columns = ['station', 'network', 'channels', 'location', \
        'server', 'latitude', 'longitude']

    # Get the location of the source of the LFE
    LFEloc = np.loadtxt('../data/Plourde_2015/templates_list.txt', \
        dtype={'names': ('name', 'family', 'lat', 'lon', 'depth', 'eH', \
        'eZ', 'nb'), \
             'formats': ('S13', 'S3', np.float, np.float, np.float, \
        np.float, np.float, np.int)}, \
        skiprows=1)
    for ie in range(0, len(LFEloc)):
        if (filename == LFEloc[ie][0].decode('utf-8')):
            lats = LFEloc[ie][2]
            lons = LFEloc[ie][3]
            xs = dx * (lons - lon0)
            ys = dy * (lats - lat0)

    # Create directory to store the waveforms
    namedir = 'templates/' + filename
    if not os.path.exists(namedir):
        os.makedirs(namedir)

    # Read origin time and station slowness files
    origintime = pickle.load(open('timearrival/origintime.pkl', 'rb'))
    slowness = pickle.load(open('timearrival/slowness.pkl', 'rb'))

    # File to write error messages
    errorfile = 'error/' + filename + '.txt'

    # Loop over stations
    for station in staNames:
        # Create streams
        EW = Stream()
        NS = Stream()
        UD = Stream()
        # Get station metadata for downloading
        for ir in range(0, len(staloc)):
            if (station == staloc['station'][ir]):
                network = staloc['network'][ir]
                channels = staloc['channels'][ir]
                location = staloc['location'][ir]
                server = staloc['server'][ir]
                # Compute source-receiver distance
                latitude = staloc['latitude'][ir]
                longitude = staloc['longitude'][ir]
                xr = dx * (longitude - lon0)
                yr = dy * (latitude - lat0)
                distance = sqrt((xr - xs)**2.0 + (yr - ys)**2.0)
        # Loop on LFEs
        for i in range(0, np.shape(LFEtime)[0]):
            YMD = LFEtime[i][1]
            myYear = 2000 + int(YMD[0:2])
            myMonth = int(YMD[2:4])
            myDay = int(YMD[4:6])
            myHour = LFEtime[i][2] - 1
            myMinute = int(LFEtime[i][3] / 60.0)
            mySecond = int(LFEtime[i][3] - 60.0 * myMinute)
            myMicrosecond = int(1000000.0 * \
                (LFEtime[i][3] - 60.0 * myMinute - mySecond))
            Tori = UTCDateTime(year=myYear, month=myMonth, day=myDay, \
                hour=myHour, minute=myMinute, second=mySecond, \
                microsecond=myMicrosecond)
            Tstart = Tori - TDUR
            Tend = Tori + 60.0 + TDUR
            # First case: we can get the data from IRIS
            if (server == 'IRIS'):
                (D, orientation) = get_from_IRIS(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            # Second case: we get the data from NCEDC
            elif (server == 'NCEDC'):
                (D, orientation) = get_from_NCEDC(station, network, channels, \
                    location, Tstart, Tend, filt, dt, nattempts, waittime, \
                    errorfile)
            else:
                raise ValueError( \
                    'You can only download data from IRIS and NCEDC')
            if (type(D) == obspy.core.stream.Stream):
                # Add to stream
                if (channels == 'EH1,EH2,EHZ'):
                    EW.append(D.select(channel='EH1').slice(Tori, \
                        Tori + 60.0)[0])
                    NS.append(D.select(channel='EH2').slice(Tori, \
                        Tori + 60.0)[0])
                    UD.append(D.select(channel='EHZ').slice(Tori, \
                        Tori + 60.0)[0])
                else:
                    EW.append(D.select(component='E').slice(Tori, \
                        Tori + 60.0)[0])
                    NS.append(D.select(component='N').slice(Tori, \
                        Tori + 60.0)[0])
                    UD.append(D.select(component='Z').slice(Tori, \
                        Tori + 60.0)[0])
            else:
                print('Failed at downloading data')
        # Stack
        if (len(EW) > 0 and len(NS) > 0 and len(UD) > 0):
            # Stack waveforms
            EWstack = linstack([EW], normalize=True, method=method)
            NSstack = linstack([NS], normalize=True, method=method)
            UDstack = linstack([UD], normalize=True, method=method)
            # Initializations
            maxCC = np.zeros(len(EW))
            cc0EW = np.zeros(len(EW))
            cc0NS = np.zeros(len(EW))
            cc0UD = np.zeros(len(EW))
            if (window == True):
                # Get time arrival
                arrivaltime = origintime[filename] + \
                    slowness[station] * distance
                Tmin = arrivaltime - winlength / 2.0
                Tmax = arrivaltime + winlength / 2.0
                if Tmin < 0.0:
                    Tmin = 0.0
                if Tmax > EWstack[0].stats.delta * (EWstack[0].stats.npts - 1):
                    Tmax = EWstack[0].stats.delta * (EWstack[0].stats.npts - 1)
                ibegin = int(Tmin / EWstack[0].stats.delta)
                iend = int(Tmax / EWstack[0].stats.delta) + 1
                # Cross correlation
                for i in range(0, len(EW)):
                    ccEW = correlate(EWstack[0].data[ibegin : iend], \
                        EW[i].data[ibegin : iend], ncor)
                    ccNS = correlate(NSstack[0].data[ibegin : iend], \
                        NS[i].data[ibegin : iend], ncor)
                    ccUD = correlate(UDstack[0].data[ibegin : iend], \
                        UD[i].data[ibegin : iend], ncor)
                    maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)
                    cc0EW[i] = ccEW[ncor]
                    cc0NS[i] = ccNS[ncor]
                    cc0UD[i] = ccUD[ncor]
            else:
                # Cross correlation
                for i in range(0, len(EW)):
                    ccEW = correlate(EWstack[0].data, EW[i].data, ncor)
                    ccNS = correlate(NSstack[0].data, NS[i].data, ncor)
                    ccUD = correlate(UDstack[0].data, UD[i].data, ncor)
                    maxCC[i] = np.max(ccEW) + np.max(ccNS) + np.max(ccUD)
                    cc0EW[i] = ccEW[ncor]
                    cc0NS[i] = ccNS[ncor]
                    cc0UD[i] = ccUD[ncor]
            # Sort cross correlations
            index = np.flip(np.argsort(maxCC), axis=0)
            EWbest = Stream()
            NSbest = Stream()
            UDbest = Stream()
            # Compute stack of best LFEs
            for j in range(0, len(ratios)):
                nLFE = int(ratios[j] * len(EW) / 100.0)
                EWselect = Stream()
                NSselect = Stream()
                UDselect = Stream()
                for i in range(0, nLFE):
                    EWselect.append(EW[index[i]])
                    NSselect.append(NS[index[i]])
                    UDselect.append(UD[index[i]])
                # Stack best LFEs
                EWbest.append(linstack([EWselect], normalize=True, \
                    method=method)[0])
                NSbest.append(linstack([NSselect], normalize=True, \
                    method=method)[0])
                UDbest.append(linstack([UDselect], normalize=True, \
                    method=method)[0])
            # Plot figure
            plt.figure(1, figsize=(20, 15))
            params = {'xtick.labelsize': 16, 'ytick.labelsize': 16}
            pylab.rcParams.update(params)
            colors = cm.rainbow(np.linspace(0, 1, len(ratios)))
            # East - West component
            ax1 = plt.subplot(311)
            dt = EWstack[0].stats.delta
            nt = EWstack[0].stats.npts
            t = dt * np.arange(0, nt)
            for j in range(0, len(ratios)):
                if (method == 'RMS'):
                    norm = EWbest[j].data / np.sqrt(np.mean(np.square( \
                        EWbest[j].data)))
                elif (method == 'MAD'):
                    norm = EWbest[j].data / np.median(np.abs(EWbest[j].data - \
                        np.median(EWbest[j].data)))
                else:
                    raise ValueError('Method must be RMS or MAD')
                norm = np.nan_to_num(norm)
                plt.plot(t, norm, color = colors[j], \
                    label = str(int(ratios[j])) + '%')
            if (method == 'RMS'):
                norm = EWstack[0].data / np.sqrt(np.mean(np.square( \
                    EWstack[0].data)))
            elif (method == 'MAD'):
                norm = EWstack[0].data / np.median(np.abs(EWstack[0].data - \
                    np.median(EWstack[0].data)))
            else:
                raise ValueError('Method must be RMS or MAD')
            norm = np.nan_to_num(norm)
            plt.plot(t, norm, 'k', label='All')
            if (window == True):
                plt.axvline(Tmin, linewidth=2, color='grey')
                plt.axvline(Tmax, linewidth=2, color='grey')
            plt.xlim([np.min(t), np.max(t)])
            plt.title('East - West component', fontsize=24)
            plt.xlabel('Time (s)', fontsize=24)
            plt.legend(loc=1)
            # North - South component
            ax2 = plt.subplot(312)
            dt = NSstack[0].stats.delta
            nt = NSstack[0].stats.npts
            t = dt * np.arange(0, nt)
            for j in range(0, len(ratios)):
                if (method == 'RMS'):
                    norm = NSbest[j].data / np.sqrt(np.mean(np.square( \
                        NSbest[j].data)))
                elif (method == 'MAD'):
                    norm = NSbest[j].data / np.median(np.abs(NSbest[j].data - \
                        np.median(NSbest[j].data)))
                else:
                    raise ValueError('Method must be RMS or MAD')
                norm = np.nan_to_num(norm)
                plt.plot(t, norm, color = colors[j], \
                    label = str(int(ratios[j])) + '%')
            if (method == 'RMS'):
                norm = NSstack[0].data / np.sqrt(np.mean(np.square( \
                    NSstack[0].data)))
            elif (method == 'MAD'):
                norm = NSstack[0].data / np.median(np.abs(NSstack[0].data - \
                    np.median(NSstack[0].data)))
            else:
                raise ValueError('Method must be RMS or MAD')
            norm = np.nan_to_num(norm)
            plt.plot(t, norm, 'k', label='All')
            if (window == True):
                plt.axvline(Tmin, linewidth=2, color='grey')
                plt.axvline(Tmax, linewidth=2, color='grey')
            plt.xlim([np.min(t), np.max(t)])
            plt.title('North - South component', fontsize=24)
            plt.xlabel('Time (s)', fontsize=24)
            plt.legend(loc=1)
            # Vertical component
            ax3 = plt.subplot(313)
            dt = UDstack[0].stats.delta
            nt = UDstack[0].stats.npts
            t = dt * np.arange(0, nt)
            for j in range(0, len(ratios)):
                if (method == 'RMS'):
                    norm = UDbest[j].data / np.sqrt(np.mean(np.square( \
                        UDbest[j].data)))
                elif (method == 'MAD'):
                    norm = UDbest[j].data / np.median(np.abs(UDbest[j].data - \
                        np.median(UDbest[j].data)))
                else:
                    raise ValueError('Method must be RMS or MAD')
                norm = np.nan_to_num(norm)
                plt.plot(t, norm, color = colors[j], \
                    label = str(int(ratios[j])) + '%')
            if (method == 'RMS'):
                norm = UDstack[0].data / np.sqrt(np.mean(np.square( \
                    UDstack[0].data)))
            elif (method == 'MAD'):
                norm = UDstack[0].data / np.median(np.abs(UDstack[0].data - \
                    np.median(UDstack[0].data)))
            else:
                raise ValueError('Method must be RMS or MAD')
            norm = np.nan_to_num(norm)
            plt.plot(t, norm, 'k', label='All')
            if (window == True):
                plt.axvline(Tmin, linewidth=2, color='grey')
                plt.axvline(Tmax, linewidth=2, color='grey')
            plt.xlim([np.min(t), np.max(t)])
            plt.title('Vertical component', fontsize=24)
            plt.xlabel('Time (s)', fontsize=24)
            plt.legend(loc=1)
            # End figure
            plt.suptitle(station, fontsize=24)
            plt.savefig(namedir + '/' + station + '.eps', format='eps')
            ax1.clear()
            ax2.clear()
            ax3.clear()
            plt.close(1)
            # Save stacks into files
            savename = namedir + '/' + station + '.pkl'
            pickle.dump([EWstack[0], NSstack[0], UDstack[0]], \
                open(savename, 'wb'))
            for j in range(0, len(ratios)):
                savename = namedir + '/' + station + '_' + \
                    str(int(ratios[j])) + '.pkl'
                pickle.dump([EWbest[j], NSbest[j], UDbest[j]], \
                    open(savename, 'wb'))
            # Save cross correlations into files
            savename = namedir + '/' + station + '_cc.pkl'
            pickle.dump([cc0EW, cc0NS, cc0UD], \
                open(savename, 'wb'))