コード例 #1
0
ファイル: FiniteFaultinv.py プロジェクト: philcummins/ffipy
def GFSelectZ(stlat,stlon,hdir): 
    dist = locations2degrees(eplat,eplon,stlat,stlon) 
    
    
    dist_str = str(int(dist*10.)/2*2+1)#Some GFs have only odd dists.
    #dist_str = str(int(dist*10.))#Some GFs have only odd dists.
    dist_form = dist_str.zfill(4)    
     
    ## Loading files
    trPP = read(hdir + "PP/GF." + dist_form + ".SY.LHZ.SAC"  )[0]    
    #trPP.data -= trPP.data[0]
    
    
    trRR = read(hdir + "RR/GF." + dist_form + ".SY.LHZ.SAC"  )[0] 
    #trRR.data -= trRR.data[0]
   
    
    trRT = read(hdir + "RT/GF." + dist_form + ".SY.LHZ.SAC"  )[0] 
    #trRT.data -= trRT.data[0]
    
    
    
    trTT = read(hdir + "TT/GF." + dist_form + ".SY.LHZ.SAC"  )[0] 
    #trTT.data -= trTT.data[0]
    
    

                                  
                                  
    return trPP, trRR, trRT,  trTT                           
コード例 #2
0
ファイル: ffisynTEST.py プロジェクト: philcummins/ffipy
def StDistandAzi(traces, hyp, dir):
    '''
    Given a list with st ids, a tuple with 
    (lat, lon, depth) of the hypocentre and 
    thw directory with the xml metafiles; it returns 
    dictionary with the distance and the azimuth of
    each station       
       
    '''
    Stdistribution = {}
    for trid in traces:     
            
        metafile = dir + "META." + trid + ".xml"
        META = DU.getMetadataFromXML(metafile)[trid]
        lat = META['latitude']
        lon = META['longitude']            
        dist = locations2degrees(hyp[0],hyp[1],lat,lon)                                
        azi =  -np.pi/180.*gps2DistAzimuth(lat,lon,
                   hyp[0],hyp[1])[2]
        Stdistribution[trid] = {'azi' : azi, 'dist' : dist} 

    
    #~ fig = plt.figure()  
    #~ plt.rc('grid', color='#316931', linewidth=1, linestyle='-')
    #~ plt.show()
    
    return Stdistribution
コード例 #3
0
def get_travel_time(sourcelatitude, sourcelongitude, sourcedepthinmeters,
                    receiverlatitude, receiverlongitude,
                    receiverdepthinmeters, phase_name):
    """
    Fully working travel time callback implementation.
    """
    if receiverdepthinmeters:
        raise ValueError("This travel time implementation cannot calculate "
                         "buried receivers.")

    great_circle_distance = geodetics.locations2degrees(
        sourcelatitude, sourcelongitude, receiverlatitude, receiverlongitude)

    try:
        tts = MODEL.get_travel_times(
            source_depth_in_km=sourcedepthinmeters / 1000.0,
            distance_in_degree=great_circle_distance,
            phase_list=[phase_name])
    except Exception as e:
        raise ValueError(str(e))

    if not tts:
        return None

    # For any phase, return the first time.
    return tts[0].time
コード例 #4
0
ファイル: misfit_gui.py プロジェクト: Debesys/LASIF
    def on_stations_listWidget_currentItemChanged(self, current, previous):
        if current is None:
            return
        wave = self.comm.query.get_matching_waveforms(
            self.current_event, self.current_iteration, self.current_station)

        event = self.comm.events.get(self.current_event)

        great_circle_distance = locations2degrees(
            event["latitude"], event["longitude"],
            wave.coordinates["latitude"], wave.coordinates["longitude"])
        tts = getTravelTimes(great_circle_distance, event["depth_in_km"],
                             model="ak135")

        windows_for_station = \
            self.current_window_manager.get_windows_for_station(
                self.current_station)

        self._reset_all_plots()

        for component in ["Z", "N", "E"]:
            plot_widget = getattr(self.ui, "%s_graph" % component.lower())
            data_tr = [tr for tr in wave.data
                       if tr.stats.channel[-1].upper() == component]
            if data_tr:
                tr = data_tr[0]
                plot_widget.data_id = tr.id
                times = tr.times()
                plot_widget.plot(times, tr.data, pen="k")
            else:
                plot_widget.data_id = None
            synth_tr = [tr for tr in wave.synthetics
                        if tr.stats.channel[-1].upper() == component]
            if synth_tr:
                tr = synth_tr[0]
                times = tr.times()
                plot_widget.plot(times, tr.data, pen="r", )

            if data_tr or synth_tr:
                for tt in tts:
                    if tt["time"] >= times[-1]:
                        continue
                    if tt["phase_name"][0].lower() == "p":
                        pen = "#008c2866"
                    else:
                        pen = "#95000066"
                    plot_widget.addLine(x=tt["time"], pen=pen, z=-10)

            plot_widget.autoRange()

            window = [_i for _i in windows_for_station
                      if _i.channel_id[-1].upper() == component]
            if window:
                plot_widget.windows = window[0]
                for win in window[0].windows:
                    WindowLinearRegionItem(win, event, parent=plot_widget)
コード例 #5
0
ファイル: ffisynTEST.py プロジェクト: philcummins/ffipy
def hypo2dist(hyploc1,hyploc2,r=6371.):
    ''' Given  2 tuples with hypcenter coordinates
        (lat, lon, depth) returns the distance in KM
        between them. '''
    r1 = r-hyploc1[2]
    r2 = r-hyploc2[2]    
    theta = np.pi/180.*locations2degrees(hyploc1[0],hyploc1[1],\
                                         hyploc2[0],hyploc2[1])   
    ##Law of Cosines:    
    l = np.sqrt(r1**2.+r2**2.-2.*r2*r1*np.cos(theta))
    return l
コード例 #6
0
ファイル: window_selector.py プロジェクト: KNMI/VERCE
 def calculate_ttimes(self):
     """
     Calculate theoretical travel times. Only call if station and event
     information is available!
     """
     dist_in_deg = geodetics.locations2degrees(
         self.station.latitude, self.station.longitude,
         self.event.latitude, self.event.longitude)
     tts = getTravelTimes(dist_in_deg, self.event.depth_in_m / 1000.0,
                          model=self.config.earth_model)
     self.ttimes = sorted(tts, key=lambda x: x["time"])
     logger.info("Calculated travel times.")
コード例 #7
0
ファイル: misfit_gui.py プロジェクト: seancug/LASIF
 def plot_traveltimes(self):
     great_circle_distance = locations2degrees(
         self.event["latitude"], self.event["longitude"],
         self.data["coordinates"]["latitude"],
         self.data["coordinates"]["longitude"])
     tts = getTravelTimes(great_circle_distance, self.event["depth_in_km"],
                          model="ak135")
     for component in ["z", "n", "e"]:
         axis = getattr(self, "plot_axis_%s" % component)
         ymin, ymax = axis.get_ylim()
         for phase in tts:
             if phase["phase_name"].lower().startswith("p"):
                 color = "green"
             else:
                 color = "red"
             axis.axvline(x=phase["time"], ymin=-1, ymax=+1,
                          color=color, alpha=0.5)
         axis.set_ylim(ymin, ymax)
コード例 #8
0
ファイル: mess2014.py プロジェクト: obspy/mess2014
def attach_coordinates_to_traces(stream, inventory, event=None):
    """
    Function to add coordinates to traces.

    It extracts coordinates from a :class:`obspy.station.inventory.Inventory`
    object and writes them to each trace's stats attribute. If an event is
    given, the distance in degree will also be attached.

    :param stream: Waveforms for the array processing.
    :type stream: :class:`obspy.core.stream.Stream`
    :param inventory: Station metadata for waveforms
    :type inventory: :class:`obspy.station.inventory.Inventory`
    :param event: If the event is given, the event distance in degree will also
     be attached to the traces.
    :type event: :class:`obspy.core.event.Event`
    """
    # Get the coordinates for all stations
    coords = {}
    for network in inventory:
        for station in network:
            coords["%s.%s" % (network.code, station.code)] = \
                {"latitude": station.latitude,
                 "longitude": station.longitude,
                 "elevation": station.elevation}

    # Calculate the event-station distances.
    if event:
        event_lat = event.origins[0].latitude
        event_lng = event.origins[0].longitude
        for value in coords.values():
            value["distance"] = locations2degrees(
                value["latitude"], value["longitude"], event_lat, event_lng)

    # Attach the information to the traces.
    for trace in stream:
        station = ".".join(trace.id.split(".")[:2])
        value = coords[station]
        trace.stats.coordinates = AttribDict()
        trace.stats.coordinates.latitude = value["latitude"]
        trace.stats.coordinates.longitude = value["longitude"]
        trace.stats.coordinates.elevation = value["elevation"]
        if event:
            trace.stats.distance = value["distance"]
コード例 #9
0
ファイル: request.py プロジェクト: ninety47/pisces
def distaz_query(records, deg=None, km=None, swath=None):
    """ 
    Out-of-database subset based on distances and/or azimuths.
    
    Parameters
    ----------
    records : iterable of objects with lat, lon attribute floats
        Target of the subset.
    deg : list or tuple of numbers, optional
        (centerlat, centerlon, minr, maxr)
        minr, maxr in degrees or None for unconstrained.
    km : list or tuple of numbers, optional
        (centerlat, centerlon, minr, maxr)
        minr, maxr in km or None for unconstrained.
    swath : list or tuple of numbers, optional
        (lat, lon, azimuth, tolerance)
        Azimuth (from North) +/-tolerance from lat,lon point in degrees.

    Returns
    -------
    list
        Subset of supplied records.

    """
    #initial True array to propagate through multiple logical AND comparisons
    mask0 = np.ones(len(records), dtype=np.bool)

    if deg:
        dgen = (geod.locations2degrees(irec.lat, irec.lon, deg[0], deg[1]) \
                for irec in records)
        degrees = np.fromiter(dgen, dtype=float)
        if deg[2] is not None:
            mask0 = np.logical_and(mask0, deg[2] <= degrees)
        if deg[3] is not None:
            mask0 = np.logical_and(mask0, deg[3] >= degrees)

        #mask0 = np.logical_and(mask0, mask)

    if km:
        #???: this may be backwards
        mgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[0] \
                  for irec in records)
        kilometers = np.fromiter(mgen, dtype=float)/1e3
        #meters, azs, bazs = zip(*valgen)
        #kilometers = np.array(meters)/1e3
        if km[2] is not None:
            mask0 = np.logical_and(mask0, km[2] <= kilometers)
        if km[3] is not None:
            mask0 = np.logical_and(mask0, km[3] >= kilometers)

        #mask0 = np.logical_and(mask0, mask)

    if swath is not None:
        minaz = swath[2] - swath[3]
        maxaz = swath[2] + swath[3]
        #???: this may be backwards
        azgen = (geod.gps2DistAzimuth(irec.lat, irec.lon, km[0], km[1])[1] \
                 for irec in records)
        azimuths = np.fromiter(azgen, dtype=float)
        mask0 = np.logical_and(mask0, azimuths >= minaz)
        mask0 = np.logical_and(mask0, azimuths <= maxaz)

    #idx = np.nonzero(np.atleast_1d(mask0))[0] ???
    idx = np.nonzero(mask0)[0]
    recs = [records[i] for i in idx]

    return recs
コード例 #10
0
 def assertLoc(lat1, long1, lat2, long2, approx_distance):
     self.assertTrue( \
     abs(math.radians(locations2degrees(lat1, long1, lat2, long2)) \
         * 6371 - approx_distance) <= 20)
コード例 #11
0
ファイル: ffisynTEST.py プロジェクト: philcummins/ffipy
def main(argv=sys.argv): 
    
    #Earth's parameters 
    #~ beta = 4.e3 #m/s 
    #~ rho = 3.e3 #kg/m^3 
    #~ mu = rho*beta*beta
    
    PLotSt = ["IU.TRQA.00.LHZ",
             "IU.LVC.00.LHZ",
             "II.NNA.00.LHZ",
              "IU.RAR.00.LHZ"]
             
             
    #PlotSubf = [143, 133, 123, 113, 103, 93,
     #           83, 73, 63, 53]
    PlotSubf = [6,3]

    
    
    #Set rup_vel = 0 to have a point source solution
    RupVel = 2.1 #Chilean eq from Lay et al
    t_h     = 10. # Half duration for each sf  
    noiselevel = 0.0# L1 norm level of noise
    mu =40e9
    #W-Phase filter 
    corners = 4.
    fmin = 0.001
    fmax = 0.005
    
    ### Data from Chilean 2010 EQ (Same as W phase inv.) 
    strike = 18.
    dip    = 18.
    rake   = 104. # 109.
    
    rakeA = rake + 45.
    rakeB = rake - 45.
    
    
    ### Fault's grid parameters
    nsx   = 21 #Number of sf along strike
    nsy   = 11 #Number of sf along dip
    flen  = 600. #Fault's longitude [km] along strike
    fwid  = 300. #Fault's longitude [km] along dip
    direc = 0    #Directivity 0 = bilateral
    Min_h = 10.  #Min depth of the fault
    
    
    ### Derivated parameters:
    nsf = nsx*nsy
    sflen = flen/float(nsx)         
    sfwid = fwid/float(nsy)
    swp = [1, 0, 2] # useful to swap (lat,lon, depth)  
    mindist = flen*fwid # minimun dist to the hypcen (initializing)
    
    ###Chessboard
    #weight = np.load("RealSol.npy") 
    weight = np.zeros(nsf)
    weight[::2] = 1 
    #weight[::2] = 1 
    #~ weight[10]=15
    #~ weight[5001]=10
    #~ weight[3201]=2
    
    
    
    ## Setting dirs and reading files.
    GFdir = "/home/roberto/data/GFS/"
    workdir = os.path.abspath(".")+"/"
    datadir = workdir + "DATA/"
    tracesfilename = workdir + "goodtraces.dat"
    tracesdir = workdir + "WPtraces/"
    
    try:
        reqfilename    = glob.glob(workdir + '*.syn.req')[0]
    except IndexError:   
        print "There is not *.syn.req file in the dir"
        sys.exit()
    
    basename = reqfilename.split("/")[-1][:-4]
    
    if not os.path.exists(tracesfilename): 
        print tracesfilename, "does not exist."
        exit()
    
    if not os.path.exists(datadir):
            os.makedirs(datadir)
    
    if not os.path.exists(tracesdir):
            os.makedirs(tracesdir)
 
    tracesfile = open(tracesfilename)    
    reqfile =  open(reqfilename)    
    
    trlist = readtraces(tracesfile)
    eqdata = readreq(reqfile)    

    tracesfile.close()
    reqfile.close()   
    
    ####Hypocentre from
    ### http://earthquake.usgs.gov/earthquakes/eqinthenews/2010/us2010tfan/    
    cmteplat = -35.91#-35.85#-36.03#-35.83
    cmteplon = -72.73#-72.72#-72.83# -72.67
    cmtepdepth= 35.
    eq_hyp = (cmteplat,cmteplon,cmtepdepth)
    
    
      ############
    

    # Defining the sf system
    grid, sblt = fault_grid('CL-2010',cmteplat,cmteplon,
                            cmtepdepth, direc,
                            Min_h, strike, dip, rake, flen,fwid ,nsx,nsy,
                            Verbose=False,ffi_io=True,gmt_io=True)
    
    print ('CL-2010',cmteplat,cmteplon,
                            cmtepdepth, direc,
                            Min_h, strike, dip, rake, flen,fwid ,nsx,nsy)
    print grid[0][1]
    #sys.exit()
    #This calculation is inside of the loop
    #~ NP = [strike, dip, rake]
    #~ M = np.array(NodalPlanetoMT(NP))  
    #~ Mp = np.sum(M**2)/np.sqrt(2)    
     
    #############################################################################
    ######Determining the sf closest to the hypocentre:    
    min_Dist_hyp_subf = flen *fwid
    for subf in range(nsf):
        sblat   = grid[subf][1]
        sblon   = grid[subf][0]
        sbdepth = grid[subf][2]              
        sf_hyp =  (sblat,sblon, sbdepth)        
        Dist_hyp_subf = hypo2dist(eq_hyp,sf_hyp)
        if Dist_hyp_subf < min_Dist_hyp_subf:
            min_Dist_hyp_subf = Dist_hyp_subf
            min_sb_hyp = sf_hyp
            hyp_subf = subf
    ####Determining trimming times:    
    test_tr = read(GFdir + "H003.5/PP/GF.0001.SY.LHZ.SAC")[0]
    t0 = test_tr.stats.starttime
    TrimmingTimes = {}   # Min. Distace from the fault to each station. 
    A =0
    for trid in trlist:     
        metafile = workdir + "DATA/" + "META." + trid + ".xml"
        META = DU.getMetadataFromXML(metafile)[trid]
        stlat = META['latitude']
        stlon = META['longitude'] 
        dist =   locations2degrees(min_sb_hyp[0],min_sb_hyp[1],\
                                   stlat,stlon) 
        parrivaltime = getTravelTimes(dist,min_sb_hyp[2])[0]['time']        
        ta = t0 + parrivaltime
        tb = ta + round(15.*dist) 
        TrimmingTimes[trid] = (ta, tb)
        
    
    ###########################

      
    
    DIST = []
    # Ordering the stations in terms of distance
    for trid in trlist: 
        metafile = workdir + "DATA/" + "META." + trid + ".xml"
        META = DU.getMetadataFromXML(metafile)[trid]
        lat = META['latitude']
        lon = META['longitude']
        trdist = locations2degrees(cmteplat,
                                   cmteplon,lat,lon) 
        DIST.append(trdist)   

    DistIndex = lstargsort(DIST)
    trlist = [trlist[i] for i in DistIndex]
  
    stdistribution = StDistandAzi(trlist, eq_hyp , workdir + "DATA/")
    StDistributionPlot(stdistribution)
    #exit()
    #Main loop
   

 

        
    for subf in range(nsf):
        print subf
        sflat   = grid[subf][1]
        sflon   = grid[subf][0]           
        sfdepth = grid[subf][2]
        #~ strike = grid[subf][3] #+ 360.
        #~ dip    = grid[subf][4]
        #~ rake   = grid[subf][5] #     
        NP = [strike, dip, rake]  
        NPA = [strike, dip, rakeA]
        NPB = [strike, dip, rakeB]        


        
        M = np.array(NodalPlanetoMT(NP))   
        MA = np.array(NodalPlanetoMT(NPA)) 
        MB = np.array(NodalPlanetoMT(NPB)) 
        #Time delay is calculated as the time in which 
        #the rupture reach the subfault
            
        sf_hyp = (sflat, sflon, sfdepth) 
        Dist_ep_subf = hypo2dist(eq_hyp,sf_hyp)
        
        if Dist_ep_subf < mindist:
            mindist = Dist_ep_subf
            minsubf = subf
        
                
        if RupVel == 0:
            t_d = eqdata['time_shift']
        else:
            t_d = round(Dist_ep_subf/RupVel) #-59.
       
        print sflat, sflon, sfdepth
        # Looking for the best depth dir:
        depth = []
        depthdir = []
        for file in os.listdir(GFdir):
            if file[-2:] == ".5":
                depthdir.append(file)
                depth.append(float(file[1:-2]))            
        BestDirIndex = np.argsort(abs(sfdepth\
                                  - np.array(depth)))[0]      
        hdir = GFdir + depthdir[BestDirIndex] + "/"     
        
        ###

        SYN = np.array([])
        SYNA = np.array([])
        SYNB = np.array([])
        for trid in trlist:     
            
            metafile = workdir + "DATA/" + "META." + trid + ".xml"
            META = DU.getMetadataFromXML(metafile)[trid]
            lat = META['latitude']
            lon = META['longitude']  
            
            #Subfault loop               
            #GFs Selection:
            ##Change to folloing loop
            
            dist = locations2degrees(sflat,sflon,lat,lon)                                
            azi =  -np.pi/180.*gps2DistAzimuth(lat,lon,
                       sflat,sflon)[2] 
            trPPsy,  trRRsy, trRTsy,  trTTsy = \
                                       GFSelectZ(hdir,dist)          
            
            
 
            
            trROT =  MTrotationZ(azi, trPPsy,  trRRsy, trRTsy,  trTTsy) 
            orig = trROT[0].stats.starttime  
            dt = trROT[0].stats.delta                       

            trianglen = 2.*int(t_h/dt)-1.
            FirstValid = int(trianglen/2.) + 1 # to delete
            window = triang(trianglen)
            window /= np.sum(window)
            #window = np.array([1.])
            
      
            
            
            parrivaltime = getTravelTimes(dist,sfdepth)[0]['time']
            
            t1 = TrimmingTimes[trid][0] - t_d
            t2 = TrimmingTimes[trid][1] - t_d
            
            
            
            for trR in trROT:
                trR.data *= 10.**-21 ## To get M in Nm                   
                trR.data -= trR.data[0]
                AUX1 = len(trR)
                trR.data = convolve(trR.data,window,mode='valid') 
                AUX2 = len(trR)
                mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                               trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #mean = np.mean(trR.data[:60])
                trR.data -= mean      
                trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.delta,\
                                             corners , 1 , fmin, fmax)  
                t_l = dt*0.5*(AUX1 - AUX2)                             
                trR.trim(t1-t_l,t2-t_l, pad=True, fill_value=trR.data[0])  #We lost t_h due to the convolution        
            


                   
            #~ for trR in trROT:
                #~ trR.data *= 10.**-23 ## To get M in Nm                   
                #~ trR.data -= trR.data[0]
 
                #~ trR.data = convolve(trR.data,window,mode='same') 

                #~ #mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                               #~ #trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #~ mean = np.mean(trR.data[:60])
                #~ trR.data -= mean      
                #~ trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.delta,\
                                             #~ corners , 1 , fmin, fmax)  
                            
                #~ trR.trim(t1,t2,pad=True, fill_value=trR.data[0])     
           
            trROT = np.array(trROT)  
            syn  =  np.dot(trROT.T,M) 
            synA =  np.dot(trROT.T,MA)
            synB =  np.dot(trROT.T,MB)
            
            SYN = np.append(SYN,syn)  
            SYNA = np.append(SYNA,synA)
            SYNB = np.append(SYNB,synB)
            
            
        print np.shape(A), np.shape(np.array([SYN]))    
        if subf == 0: 
            A = np.array([SYN])
            AA = np.array([SYNA])
            AB = np.array([SYNB])
        else:
            A = np.append(A,np.array([SYN]),0)    
            AA = np.append(AA,np.array([SYNA]),0)
            AB = np.append(AB,np.array([SYNB]),0)
            
            
            
    AC = np.vstack((AA,AB))
    print np.shape(AC)
    print np.shape(weight)
    B = np.dot(A.T,weight)
    stsyn = Stream()
    n = 0
    Ntraces= {}
    for trid in trlist: 
        spid = trid.split(".")        
        print trid
        NMIN = 1. + (TrimmingTimes[trid][1] - TrimmingTimes[trid][0]) / dt
        Ntraces[trid] = (n,NMIN + n)
        trsyn = Trace(B[n:NMIN+n])   
        n += NMIN        
        trsyn.stats.network = spid[0]
        trsyn.stats.station = spid[1]
        trsyn.stats.location = spid[2]
        trsyn.stats.channel = spid[3] 
        trsyn = AddNoise(trsyn,level = noiselevel)
        #trsyn.stats.starttime = 
        stsyn.append(trsyn)
        
       
    stsyn.write(workdir+"WPtraces/" + basename + ".decov.trim.mseed",
                 format="MSEED")           
                
    #####################################################    
    # Plotting:
    #####################################################
    #we are going to reflect the y axis later, so:
    print minsubf
    hypsbloc = [minsubf / nsy , -(minsubf % nsy) - 2]

    #Creating the strike and dip axis:
    StrikeAx= np.linspace(0,flen,nsx+1)
    DipAx= np.linspace(0,fwid,nsy+1)
    DepthAx = DipAx*np.sin(np.pi/180.*dip) + Min_h    
    hlstrike = StrikeAx[hypsbloc[0]] + sflen*0.5
        
    hldip = DipAx[hypsbloc[1]] + sfwid*0.5 
    hldepth = DepthAx[hypsbloc[1]] + sfwid*0.5*np.sin(np.pi/180.*dip)
       
    StrikeAx = StrikeAx - hlstrike
    DipAx =     DipAx   - hldip
 

    
    XX, YY = np.meshgrid(StrikeAx, DepthAx)
    XX, ZZ = np.meshgrid(StrikeAx, DipAx )

   
    sbarea = sflen*sfwid
    
    SLIPS = weight.reshape(nsx,nsy).T#[::-1,:]
    SLIPS /= mu*1.e6*sbarea
    
    ######Plot:#####################
    plt.figure()
    ax = host_subplot(111)
    im = ax.pcolor(XX, YY, SLIPS, cmap="jet")    
    ax.set_ylabel('Depth [km]')       
    ax.set_ylim(DepthAx[-1],DepthAx[0])  
    
    # Creating a twin plot 
    ax2 = ax.twinx()
    #im2 = ax2.pcolor(XX, ZZ, SLIPS[::-1,:], cmap="Greys") 
    im2 = ax2.pcolor(XX, ZZ, SLIPS[::-1,:], cmap="jet")    
    
    ax2.set_ylabel('Distance along the dip [km]')
    ax2.set_xlabel('Distance along the strike [km]')    
    ax2.set_ylim(DipAx[0],DipAx[-1])
    ax2.set_xlim(StrikeAx[0],StrikeAx[-1])       
                         
                         
    ax.axis["bottom"].major_ticklabels.set_visible(False) 
    ax2.axis["bottom"].major_ticklabels.set_visible(False)
    ax2.axis["top"].set_visible(True)
    ax2.axis["top"].label.set_visible(True)
    
    
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("bottom", size="5%", pad=0.1)
    cb = plt.colorbar(im, cax=cax, orientation="horizontal")
    cb.set_label("Slip [m]") 
    ax2.plot([0], [0], '*', ms=225./(nsy+4))
    ax2.set_xticks(ax2.get_xticks()[1:-1])
    #ax.set_yticks(ax.get_yticks()[1:])
    #ax2.set_yticks(ax2.get_yticks()[:-1])
    

    
    #########Plotting the selected traces:
    nsp = len(PLotSt) * len(PlotSubf)
    plt.figure(figsize=(13,11))
    plt.title("Synthetics for rake = " + str(round(rake)))
    mindis = []
    maxdis = []
    for i, trid in enumerate(PLotSt):   
        x = np.arange(0,Ntraces[trid][1]-Ntraces[trid][0],
                      dt)
        for j, subf in enumerate(PlotSubf):
            y = A[subf, Ntraces[trid][0]:Ntraces[trid][1]]
            if j == 0:
                yy = y
            else:
                yy = np.vstack((yy,y))        
        maxdis.append(np.max(yy))
        mindis.append(np.min(yy))
        
    

    for i, trid in enumerate(PLotSt):   
        x = np.arange(0,Ntraces[trid][1]-Ntraces[trid][0],
                      dt)

        for j, subf in enumerate(PlotSubf):
            y = A[subf, Ntraces[trid][0]:Ntraces[trid][1]]
            plt.subplot2grid((len(PlotSubf), len(PLotSt)),
                              (j, i))                                
            plt.plot(x,y, linewidth=2.5)
            if j == 0:
                plt.title(trid)
            fig = plt.gca()            
            fig.axes.get_yaxis().set_ticks([])
            fig.set_ylabel(str(subf),rotation=0)
            fig.set_xlim((x[0],x[-1]))
            fig.set_ylim((mindis[i],maxdis[i]))
            if subf != PlotSubf[-1]:
                fig.axes.get_xaxis().set_ticks([])

    
    plt.show()
コード例 #12
0
ファイル: CodaNorm.py プロジェクト: cormorant/CodaNorm
     continue
 #==
 # for every frequency, calc...
 for NumFreq, Freq in enumerate(Freqs):
     # low and high freq corners
     f1, f2 = 0.5 * Freq, 1.5 * Freq
     #===
     sheet = sheets[NumFreq]
     if has_coords_key:
         # calculate distances, azimuths from the event to the station
         lat, lon = parts["LAT"], parts["LON"]
         # gps2DistAzimuth(lat1, lon1, lat2, lon2)
         dist, azAB, azBA = gps2DistAzimuth(Settings["station_lat"],
             Settings["station_lon"], lat, lon)
         # locations2degrees(lat1, long1, lat2, long2)
         dist_degr = locations2degrees(Settings["station_lat"],
             Settings["station_lon"], lat, lon)
         dist /= 1000. # must be in kilometers
         # write distances to sheet
         for col, item in enumerate([dist, dist_degr, azAB, azBA]):
             sheet.write(NumLine+1, col+columns-4, item)
     # raw data from the directory
     for col, item in enumerate(line):
         sheet.write(NumLine+1, col, item)
     #=== calculations
     try:
         result = main(Freq, f1, f2, filename, secondsE, secondsP,
             secondsS, Settings)
     except BaseException, e:
         print e,
         result = None
     #=== calculations ended
コード例 #13
0
ファイル: sac.py プロジェクト: ninety47/pisces
def tr2assoc(tr, pickmap=None):
    """
    Takes a sac header dictionary, and produces a list of up to 10 
    Assoc instances. Header->phase mappings follow SAC2000, i.e.:

    * t0: P
    * t1: Pn
    * t2: Pg
    * t3: S
    * t4: Sn
    * t5: Sg
    * t6: Lg
    * t7: LR
    * t8: Rg
    * t9: pP

    An alternate mapping for some or all picks can be supplied, however, 
    as a dictionary of strings in the above form.  
    
    Note: arid values will not be filled in, so do:
    >>> for assoc in kbio.tables['assoc']:
            assoc.arid = lastarid+1
            lastarid += 1

    """
    pick2phase = {'t0': 'P', 't1': 'Pn', 't2': 'Pg', 't3': 'S',
    't4': 'Sn', 't5': 'Sg', 't6': 'Lg', 't7': 'LR', 't8': 'Rg',
    't9': 'pP'} 

    #overwrite defaults with supplied map
    if pickmap:
        pick2phase.update(pickmap)

    #geographic relations
    # obspy.read tries to calculate these values if lcalca is True and needed
    #header info is there, so we only need to try to if lcalca is False.
    #XXX: I just calculate it if no values are currently filled in.
    assocdict = {}
    try:
        assocdict.update(_map_header({'az': 'esaz', 'baz': 'seaz', 
                                      'gcarc': 'delta'}, tr.stats.sac, 
                                      SACDEFAULT))
    except AttributeError:
        # no tr.stats.sac
        pass

    #overwrite if any are None
    if not assocdict:
        try:
            delta = geod.locations2degrees(tr.stats.sac.stla, tr.stats.sac.stlo, 
                                           tr.stats.sac.evla, tr.stats.sac.evlo)
            m, seaz, esaz = geod.gps2DistAzimuth(tr.stats.sac.stla, 
                tr.stats.sac.stlo, tr.stats.sac.evla, tr.stats.sac.evlo)
            assocdict['esaz'] = esaz
            assocdict['seaz'] = seaz
            assocdict['delta'] = delta
        except (AttributeError, TypeError):
            #some sac header values are None
            pass

    if tr.stats.station:
        assocdict['sta'] = tr.stats.station

    assocdict.update(_map_header({'norid': 'orid'}, tr.stats.sac, SACDEFAULT))

    #now, do the phase arrival mappings
    #for each pick in hdr, make a separate dictionary containing assocdict plus
    #the new phase info.
    assocs = []
    for key in pick2phase:
        kkey = 'k' + key
        #if there's a value in t[0-9]
        if tr.stats.sac[key] != SACDEFAULT[key]:
            #if the phase name kt[0-9] is null
            if tr.stats.sac[kkey] == SACDEFAULT[kkey]:
                #take it from the map
                iassoc = {'phase': pick2phase[key]}
            else:
                #take it directly
                iassoc = {'phase': tr.stats.sac[kkey]}

            iassoc.update(assocdict)
            assocs.append(iassoc)

    return [Assoc(**assoc) for assoc in assocs]
コード例 #14
0
     continue
 #==
 # for every frequency, calc...
 for NumFreq, Freq in enumerate(Freqs):
     # low and high freq corners
     f1, f2 = 0.5 * Freq, 1.5 * Freq
     #===
     sheet = sheets[NumFreq]
     if has_coords_key:
         # calculate distances, azimuths from the event to the station
         lat, lon = parts["LAT"], parts["LON"]
         # gps2DistAzimuth(lat1, lon1, lat2, lon2)
         dist, azAB, azBA = gps2DistAzimuth(Settings["station_lat"],
             Settings["station_lon"], lat, lon)
         # locations2degrees(lat1, long1, lat2, long2)
         dist_degr = locations2degrees(Settings["station_lat"],
             Settings["station_lon"], lat, lon)
         dist /= 1000. # must be in kilometers
         # write distances to sheet
         for col, item in enumerate([dist, dist_degr, azAB, azBA]):
             sheet.write(NumLine+1, col+columns-4, item)
     # raw data from the directory
     for col, item in enumerate(line):
         sheet.write(NumLine+1, col, item)
     #=== calculations
     try:
         result = main(Freq, f1, f2, filename, secondsE, secondsP,
             secondsS, Settings)
     except BaseException, e:
         print e,
         result = None
     #=== calculations ended
コード例 #15
0
ファイル: window_selection.py プロジェクト: seancug/LASIF
def select_windows(data_trace, synthetic_trace, ev_lat, ev_lng, ev_depth_in_km,
                   st_lat, st_lng, minimum_period, maximum_period):
    """
    Window selection algorithm for picking windows suitable for misfit
    calculation based on phase differences.

    :param data_trace:
    :param synthetic_trace:
    :param ev_lat:
    :param ev_lng:
    :param ev_depth_in_km:
    :param st_lat:
    :param st_lng:
    :param minimum_period:
    :param maximum_period:
    """

    print "* ---------------------------"
    print "* autoselect " + data_trace.stats.channel

    # =========================================================================
    # set a couple of selection parameters - might become part of the input in
    # future versions
    # =========================================================================

    # Minimum normalised correlation coefficient of the complete traces.
    min_cc = 0.0
    # Maximum relative noise level for the whole trace. Measured from maximum
    # amplitudes before and after the first arrival.
    max_noise = 0.3
    # Maximum relative noise level for individual windows.
    max_noise_window = 0.4
    # All arrivals later than those corresponding to the threshold velocity
    # [km/s] will be excluded.
    threshold_velocity = 2.4
    # Maximum allowable time shift within a window, as a fraction of the
    # minimum period.
    threshold_shift = 0.2
    # Minimum normalised correlation coeficient within a window.
    threshold_correlation = 0.5
    # Minimum length of the time windows relative to the minimum period.
    min_length_period = 1.5
    # Minimum number of extreme in an individual time window (excluding the
    # edges).
    min_peaks_troughs = 2
    # Maximum energy ratio between data and synthetics within a time window.
    max_energy_ratio = 3.0

    # =========================================================================
    # initialisations
    # =========================================================================

    dt = synthetic_trace.stats.delta
    npts = synthetic_trace.stats.npts
    dist_in_deg = geodetics.locations2degrees(st_lat, st_lng, ev_lat, ev_lng)
    dist_in_km = geodetics.calcVincentyInverse(
        st_lat, st_lng, ev_lat, ev_lng)[0] / 1000.0
    tts = getTravelTimes(dist_in_deg, ev_depth_in_km, model="ak135")
    first_tt_arrival = min([_i["time"] for _i in tts])

    # Number of samples in the sliding window. Currently, the length of the
    # window is set to a multiple of the dominant period of the synthetics.
    # Make sure it is an uneven number; just to have an easy midpoint
    # definition.
    window_length = int(round(float(2 * minimum_period) / dt))

    if not window_length % 2:
        window_length += 1

    # Allocate arrays to collect the time dependent values.
    sliding_time_shift = np.zeros(npts, dtype="float32")
    max_cc_coeff = np.zeros(npts, dtype="float32")

    taper = np.hanning(window_length)

    # =========================================================================
    # check if whole seismograms are sufficiently correlated and estimate noise
    # level
    # =========================================================================

    synth = synthetic_trace.data
    data = data_trace.data

    #  compute correlation coefficient
    norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2))
    cc = np.sum(data * synth) / norm
    print "** correlation coefficient: " + str(cc)

    #  estimate noise level from waveforms prior to the first arrival
    idx = int(np.ceil((first_tt_arrival - minimum_period * 0.5) / dt))
    noise_absolute = data[50:idx].ptp()
    noise_relative = noise_absolute / data.ptp()
    print "** absolute noise level: " + str(noise_absolute) + " m/s"
    print "** relative noise level: " + str(noise_relative)

    #  rejection criteria
    accept = True
    if cc < min_cc:
        print "** no windows selected, correlation " + str(cc) + \
            " is below threshold value of " + str(min_cc)
        accept = False
    if noise_relative > max_noise:
        print "** no windows selected, noise level " + str(noise_relative) + \
            " is above threshold value of " + str(max_noise)
        accept = False

    if accept is False:
        print "* autoselect done"
        return []

    # =========================================================================
    # compute sliding time shifts and correlation coefficients
    # =========================================================================

    for start_idx, end_idx, midpoint_idx in _window_generator(npts,
                                                              window_length):

        # Slice windows. Create a copy to be able to taper without affecting
        # the original time series.
        data_window = data_trace.data[start_idx: end_idx].copy() * taper
        synthetic_window = \
            synthetic_trace.data[start_idx: end_idx].copy() * taper

        # Skip windows that have essentially no energy to avoid instabilities.
        if synthetic_window.ptp() < synthetic_trace.data.ptp() * 0.001:
            continue

        # Calculate the time shift. Here this is defined as the shift of the
        # synthetics relative to the data. So a value of 2, for instance, means
        # that the synthetics are 2 timesteps later then the data.
        cc = np.correlate(data_window, synthetic_window, mode="full")

        time_shift = cc.argmax() - window_length + 1
        # Express the time shift in fraction of the minimum period.
        sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period

        # Normalized cross correlation.
        max_cc_value = cc.max() / np.sqrt((synthetic_window ** 2).sum() *
                                          (data_window ** 2).sum())
        max_cc_coeff[midpoint_idx] = max_cc_value

    # =========================================================================
    # compute the initial mask, i.e. intervals (windows) where no measurements
    # are made.
    # =========================================================================

    # Step 1: Initialise masked arrays. The mask will be set to True where no
    # windows are chosen.
    time_windows = np.ma.ones(npts)
    time_windows.mask = np.zeros(npts)

    # Step 2: Mark everything more then half a dominant period before the first
    # theoretical arrival as positive.
    time_windows.mask[:int(np.ceil(
                      (first_tt_arrival - minimum_period * 0.5) / dt))] = True

    # Step 3: Mark everything more then half a dominant period after the
    # threshold arrival time - computed from the threshold velocity - as
    # negative.
    time_windows.mask[int(np.floor(dist_in_km / threshold_velocity / dt)):] = \
        True

    # Step 4: Mark everything with an absolute travel time shift of more than
    # threshold_shift times the dominant period as negative
    time_windows.mask[np.abs(sliding_time_shift) > threshold_shift] = True

    # Step 5: Mark the area around every "travel time shift jump" (based on
    # the traveltime time difference) negative. The width of the area is
    # currently chosen to be a tenth of a dominant period to each side.
    sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
    indices = np.ma.where(np.abs(np.diff(sliding_time_shift)) > 0.1)[0]
    for index in indices:
        time_windows.mask[index - sample_buffer: index + sample_buffer] = True

    # Step 6: Mark all areas where the normalized cross correlation coefficient
    # is under threshold_correlation as negative
    time_windows.mask[max_cc_coeff < threshold_correlation] = True

    # =========================================================================
    #  Make the final window selection.
    # =========================================================================

    min_length = min(
        minimum_period / dt * min_length_period, maximum_period / dt)
    final_windows = []

    #  loop through all the time windows
    for i in np.ma.flatnotmasked_contiguous(time_windows):

        window_npts = i.stop - i.start
        synthetic_window = synthetic_trace.data[i.start: i.stop]
        data_window = data_trace.data[i.start: i.stop]

        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            continue

        # Step 8: Exclude windows without a real peak or trough (except for the
        # edges).
        data_p, data_t, data_extrema = find_local_extrema(data_window, 0)
        synth_p, synth_t, synth_extrema = find_local_extrema(synthetic_window,
                                                             0)
        if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \
                min_peaks_troughs:
            continue

        # Step 9: Peak and trough matching algorithm
        window_mask = np.ones(window_npts, dtype="bool")

        closest_peaks = find_closest(data_p, synth_p)
        diffs = np.diff(closest_peaks)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_p[idx - 1]
            else:
                start = 0
            if idx < (len(synth_p) - 1):
                end = synth_p[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        closest_troughs = find_closest(data_t, synth_t)
        diffs = np.diff(closest_troughs)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_t[idx - 1]
            else:
                start = 0
            if idx < (len(synth_t) - 1):
                end = synth_t[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        window_mask = np.ma.masked_array(window_mask, mask=window_mask)
        if window_mask.mask.all():
            continue

        # Step 10: Check if the time windows have sufficiently similar energy
        # and are above the noise
        for j in np.ma.flatnotmasked_contiguous(window_mask):

            # Again assert a certain minimal length.
            if (j.stop - j.start) < min_length:
                continue

            # Compare the energy in the data window and the synthetic window.
            data_energy = (data_window[j.start: j.stop] ** 2).sum()
            synth_energy = (synthetic_window[j.start: j.stop] ** 2).sum()
            energies = sorted([data_energy, synth_energy])
            if energies[1] > max_energy_ratio * energies[0]:
                continue

            # Check that amplitudes in the data are above the noise
            if noise_absolute / data_window[j.start: j.stop].ptp() > \
                    max_noise_window:
                continue

            final_windows.append((i.start + j.start, i.start + j.stop))

    print "* autoselect done"

    return final_windows
コード例 #16
0
ファイル: instaseis_gui.py プロジェクト: MMesch/instaseis
    def update(self, force=False):

        try:
            self._plot_receiver()
            self._plot_event()
        except AttributeError:
            return

        if (not bool(self.ui.auto_update_check_box.checkState()) and
                self.ui.finsource_tab.currentIndex() == 1 and not force and
                self.st_copy is None):
            return

        components = ["z", "n", "e"]
        components_map = {0: ("Z", "N", "E"),
                          1: ("Z", "R", "T")}

        components_choice = int(self.ui.components_combo.currentIndex())

        label_map = {0: {"z": "vertical", "n": "north", "e": "east"},
                     1: {"z": "vertical", "n": "radial", "e": "transverse"}}

        for component in components:
            p = getattr(self.ui, "%s_graph" % component)
            p.setTitle(label_map[components_choice][component].capitalize() +
                       " component")

        if self.ui.finsource_tab.currentIndex() == 0:
            src_latitude = self.source.latitude
            src_longitude = self.source.longitude
            src_depth_in_m = self.source.depth_in_m
        else:
            src_latitude = self.finite_source.hypocenter_latitude
            src_longitude = self.finite_source.hypocenter_longitude
            src_depth_in_m = self.finite_source.hypocenter_depth_in_m

        rec = self.receiver
        try:
            # Grab resampling settings from the UI.
            if bool(self.ui.resample_check_box.checkState()):
                dt = float(self.ui.resample_factor.value())
                dt = self.instaseis_db.info.dt / dt
            else:
                dt = None
            if self.ui.finsource_tab.currentIndex() == 0:
                st = self.instaseis_db.get_seismograms(
                    source=self.source, receiver=self.receiver, dt=dt,
                    components=components_map[components_choice])
            elif (not bool(self.ui.auto_update_check_box.checkState()) and
                    self.ui.finsource_tab.currentIndex() == 1 and
                    not force):
                st = self.st_copy.copy()
            else:
                prog_diag = QtGui.QProgressDialog(
                    "Calculating", "Cancel", 0, len(self.finite_source), self)
                prog_diag.setWindowModality(QtCore.Qt.WindowModal)
                prog_diag.setMinimumDuration(0)

                def get_prog_fct():
                    def set_value(value, count):
                        prog_diag.setValue(value)
                        if prog_diag.wasCanceled():
                            return True
                    return set_value

                prog_diag.setValue(0)
                st = self.instaseis_db.get_seismograms_finite_source(
                    sources=self.finite_source, receiver=self.receiver, dt=dt,
                    components=('Z', 'N', 'E'),
                    progress_callback=get_prog_fct())
                prog_diag.setValue(len(self.finite_source))
                if not st:
                    return

                baz = geodetics.gps2DistAzimuth(
                    self.finite_source.CMT.latitude,
                    self.finite_source.CMT.longitude,
                    rec.latitude, rec.longitude)[2]
                self.st_copy = st.copy()
                st.rotate('NE->RT', baz)
                st += self.st_copy
                self.st_copy = st.copy()

            if self.ui.finsource_tab.currentIndex() == 1 \
                    and bool(self.ui.plot_CMT_check_box.checkState()):
                st_cmt = self.instaseis_db.get_seismograms(
                    source=self.finite_source.CMT, receiver=self.receiver,
                    dt=dt, components=components_map[components_choice],
                    reconvolve_stf=True)
            else:
                st_cmt = None

            # check filter values from the UI
            zp = bool(self.ui.zero_phase_check_box.checkState())
            if bool(self.ui.lowpass_check_box.checkState()):
                try:
                    freq = 1.0 / float(self.ui.lowpass_period.value())
                    st.filter('lowpass', freq=freq, zerophase=zp)
                    if st_cmt is not None:
                        st_cmt.filter('lowpass', freq=freq, zerophase=zp)
                except ZeroDivisionError:
                    # this happens when typing in the lowpass_period box
                    pass

            if bool(self.ui.highpass_check_box.checkState()):
                try:
                    freq = 1.0 / float(self.ui.highpass_period.value())
                    st.filter('highpass', freq=freq, zerophase=zp)
                    if st_cmt is not None:
                        st_cmt.filter('highpass', freq=freq, zerophase=zp)
                except ZeroDivisionError:
                    # this happens when typing in the highpass_period box
                    pass

        except AttributeError:
            return

        if bool(self.ui.tt_times.checkState()):
            great_circle_distance = geodetics.locations2degrees(
                src_latitude, src_longitude,
                rec.latitude, rec.longitude)
            self.tts = tau_model.get_travel_times(
                source_depth_in_km=src_depth_in_m / 1000.0,
                distance_in_degree=great_circle_distance)

        for ic, component in enumerate(components):
            plot_widget = getattr(self.ui, "%s_graph" % component.lower())
            plot_widget.clear()
            tr = st.select(component=components_map[components_choice][ic])[0]
            times = tr.times()
            plot_widget.plot(times, tr.data, pen="k")
            plot_widget.ptp = tr.data.ptp()
            if st_cmt is not None:
                tr = st_cmt.select(
                    component=components_map[components_choice][ic])[0]
                times = tr.times()
                plot_widget.plot(times, tr.data, pen="r")

            if bool(self.ui.tt_times.checkState()):
                tts = []
                for tt in self.tts:
                    if tt.time >= times[-1]:
                        continue
                    tts.append(tt)
                    if tt.name[0].lower() == "p":
                        pen = "#008c2866"
                    else:
                        pen = "#95000066"
                    plot_widget.addLine(x=tt.time, pen=pen, z=-10)
                self.tts = tts
        self.set_info()
コード例 #17
0
                      int(line[7]), int(line[8]), float(line[9]))
 print("Downloading station inventory for eve_" + str(int(line[0])))
 inv = client.get_stations(network="AU",
                           station="*",
                           channel="BH*",
                           starttime=eve_ot,
                           endtime=eve_ot + 3600,
                           latitude=eve_lat,
                           longitude=eve_lon,
                           maxradius=radius)
 print("completed for eve_" + str(int(line[0])))
 # bulk request from IRIS
 bulk_req = []
 S = []
 for sta in inv[0].stations:
     dist = locations2degrees(eve_lat, eve_lon, sta.latitude, sta.longitude)
     arrival = model.get_travel_times(source_depth_in_km=eve_depth,
                                      distance_in_degree=dist,
                                      phase_list=['P'])
     p_arrival = eve_ot + arrival[0].time
     t1 = p_arrival - wl_10deg
     t2 = p_arrival + wl_10deg
     bulk_req.append(('AU', sta.code, '*', 'BH*', t1, t2))
     s = sta.code + ' ' + str(sta.latitude) + ' ' + str(sta.longitude) \
         + ' ' + str(dist)
     S.append(s)
 print("Downloading waveforms for eve_" + str(int(line[0])))
 st = client.get_waveforms_bulk(bulk_req, attach_response=True)
 print("completed for eve_" + str(int(line[0])))
 # write stations.txt (see plan-phase1.txt for format details)
 ##    stations = []
コード例 #18
0
     dtype='S')
 coords = genfromtxt(
     '/Users/dmelgar/Slip_inv/Nepal_ALOS/data/station_info/nepal_kinematic_gpsonly.gflist',
     usecols=[1, 2])
 for k in range(len(stanames)):
     sta = stanames[k]
     print sta
     n = read(path + 'PPP/' + sta + '.LXN.sac')
     e = read(path + 'PPP/' + sta + '.LXE.sac')
     u = read(path + 'PPP/' + sta + '.LXZ.sac')
     ##Low pass filter
     #n[0].data=lowpass(n[0].data,fcorner,1./n[0].stats.delta,10)
     #e[0].data=lowpass(e[0].data,fcorner,1./e[0].stats.delta,10)
     #u[0].data=lowpass(u[0].data,fcorner,1./u[0].stats.delta,10)
     #Get station to hypocenter delta distance
     delta = locations2degrees(coords[k, 1], coords[k, 0], epicenter[1],
                               epicenter[0])
     #Get p-time to site
     tt = getTravelTimes(delta, epicenter[2])
     tp = timedelta(seconds=float64(tt[0]['time']))
     print tp
     if stanames[k] == 'LHAZ':
         tp = timedelta(seconds=160)
         tmax = timedelta(seconds=120)
     elif stanames[k] == 'SYBC':
         tmax = timedelta(seconds=60)
         tp = tp + timedelta(seconds=20)
     elif stanames[k] == 'RMTE':
         tmax = timedelta(seconds=60)
         tp = tp + timedelta(seconds=20)
     else:
         tmax = timedelta(seconds=60)
コード例 #19
0
def select_windows(data_trace, synthetic_trace, event_latitude,
                   event_longitude, event_depth_in_km,
                   station_latitude, station_longitude, minimum_period,
                   maximum_period,
                   min_cc=0.10, max_noise=0.10, max_noise_window=0.4,
                   min_velocity=2.4, threshold_shift=0.30,
                   threshold_correlation=0.75, min_length_period=1.5,
                   min_peaks_troughs=2, max_energy_ratio=10.0,
                   min_envelope_similarity=0.2,
                   verbose=False, plot=False):
    """
    Window selection algorithm for picking windows suitable for misfit
    calculation based on phase differences.

    Returns a list of windows which might be empty due to various reasons.

    This function is really long and a lot of things. For a more detailed
    description, please see the LASIF paper.

    :param data_trace: The data trace.
    :type data_trace: :class:`~obspy.core.trace.Trace`
    :param synthetic_trace: The synthetic trace.
    :type synthetic_trace: :class:`~obspy.core.trace.Trace`
    :param event_latitude: The event latitude.
    :type event_latitude: float
    :param event_longitude: The event longitude.
    :type event_longitude: float
    :param event_depth_in_km: The event depth in km.
    :type event_depth_in_km: float
    :param station_latitude: The station latitude.
    :type station_latitude: float
    :param station_longitude: The station longitude.
    :type station_longitude: float
    :param minimum_period: The minimum period of the data in seconds.
    :type minimum_period: float
    :param maximum_period: The maximum period of the data in seconds.
    :type maximum_period: float
    :param min_cc: Minimum normalised correlation coefficient of the
        complete traces.
    :type min_cc: float
    :param max_noise: Maximum relative noise level for the whole trace.
        Measured from maximum amplitudes before and after the first arrival.
    :type max_noise: float
    :param max_noise_window: Maximum relative noise level for individual
        windows.
    :type max_noise_window: float
    :param min_velocity: All arrivals later than those corresponding to the
        threshold velocity [km/s] will be excluded.
    :type min_velocity: float
    :param threshold_shift: Maximum allowable time shift within a window,
        as a fraction of the minimum period.
    :type threshold_shift: float
    :param threshold_correlation: Minimum normalised correlation coeeficient
        within a window.
    :type threshold_correlation: float
    :param min_length_period: Minimum length of the time windows relative to
        the minimum period.
    :type min_length_period: float
    :param min_peaks_troughs: Minimum number of extrema in an individual
        time window (excluding the edges).
    :type min_peaks_troughs: float
    :param max_energy_ratio: Maximum energy ratio between data and
        synthetics within a time window. Don't make this too small!
    :type max_energy_ratio: float
    :param min_envelope_similarity: The minimum similarity of the envelopes of
        both data and synthetics. This essentially assures that the
        amplitudes of data and synthetics can not diverge too much within a
        window. It is a bit like the inverse of the ratio of both envelopes
        so a value of 0.2 makes sure neither amplitude can be more then 5
        times larger than the other.
    :type min_envelope_similarity: float
    :param verbose: No output by default.
    :type verbose: bool
    :param plot: Create a plot of the algortihm while it does its work.
    :type plot: bool
    """
    # Shortcuts to frequently accessed variables.
    data_starttime = data_trace.stats.starttime
    data_delta = data_trace.stats.delta
    dt = data_trace.stats.delta
    npts = data_trace.stats.npts
    synth = synthetic_trace.data
    data = data_trace.data
    times = data_trace.times()

    # Fill cache if necessary.
    if not TAUPY_MODEL_CACHE:
        from obspy.taup import TauPyModel  # NOQA
        TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
    model = TAUPY_MODEL_CACHE["model"]

    # -------------------------------------------------------------------------
    # Geographical calculations and the time of the first arrival.
    # -------------------------------------------------------------------------
    dist_in_deg = geodetics.locations2degrees(station_latitude,
                                              station_longitude,
                                              event_latitude, event_longitude)
    dist_in_km = geodetics.calcVincentyInverse(
        station_latitude, station_longitude, event_latitude,
        event_longitude)[0] / 1000.0

    # Get only a couple of P phases which should be the first arrival
    # for every epicentral distance. Its quite a bit faster than calculating
    # the arrival times for every phase.
    # Assumes the first sample is the centroid time of the event.
    tts = model.get_travel_times(source_depth_in_km=event_depth_in_km,
                                 distance_in_degree=dist_in_deg,
                                 phase_list=["ttp"])
    # Sort just as a safety measure.
    tts = sorted(tts, key=lambda x: x.time)
    first_tt_arrival = tts[0].time

    # -------------------------------------------------------------------------
    # Window settings
    # -------------------------------------------------------------------------
    # Number of samples in the sliding window. Currently, the length of the
    # window is set to a multiple of the dominant period of the synthetics.
    # Make sure it is an uneven number; just to have a trivial midpoint
    # definition and one sample does not matter much in any case.
    window_length = int(round(float(2 * minimum_period) / dt))
    if not window_length % 2:
        window_length += 1

    # Use a Hanning window. No particular reason for it but its a well-behaved
    # window and has nice spectral properties.
    taper = np.hanning(window_length)

    # =========================================================================
    # check if whole seismograms are sufficiently correlated and estimate
    # noise level
    # =========================================================================

    # Overall Correlation coefficient.
    norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2))
    cc = np.sum(data * synth) / norm
    if verbose:
        _log_window_selection(data_trace.id,
                              "Correlation Coefficient: %.4f" % cc)

    # Estimate noise level from waveforms prior to the first arrival.
    idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
    idx_end = max(10, idx_end)
    idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
    idx_start = max(10, idx_start)

    if idx_start >= idx_end:
        idx_start = max(0, idx_end - 10)

    abs_data = np.abs(data)
    noise_absolute = abs_data[idx_start:idx_end].max()
    noise_relative = noise_absolute / abs_data.max()

    if verbose:
        _log_window_selection(data_trace.id,
                              "Absolute Noise Level: %e" % noise_absolute)
        _log_window_selection(data_trace.id,
                              "Relative Noise Level: %e" % noise_relative)

    # Basic global rejection criteria.
    accept_traces = True
    if (cc < min_cc) and (noise_relative > max_noise / 3.0):
        msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc)
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    if noise_relative > max_noise:
        msg = "Noise level %.3f is above threshold of %.3f" % (
            noise_relative, max_noise)
        if verbose:
            _log_window_selection(
                data_trace.id, msg)
        accept_traces = msg

    # Calculate the envelope of both data and synthetics. This is to make sure
    # that the amplitude of both is not too different over time and is
    # used as another selector. Only calculated if the trace is generally
    # accepted as it is fairly slow.
    if accept_traces is True:
        data_env = obspy.signal.filter.envelope(data)
        synth_env = obspy.signal.filter.envelope(synth)

    # -------------------------------------------------------------------------
    # Initial Plot setup.
    # -------------------------------------------------------------------------
    # All the plot calls are interleaved. I realize this is really ugly but
    # the alternative would be to either have two functions (one with plots,
    # one without) or split the plotting function in various subfunctions,
    # neither of which are acceptable in my opinion. The impact on
    # performance is minimal if plotting is turned off: all imports are lazy
    # and a couple of conditionals are cheap.
    if plot:
        import matplotlib.pylab as plt  # NOQA
        import matplotlib.patheffects as PathEffects  # NOQA

        if accept_traces is True:
            plt.figure(figsize=(18, 12))
            plt.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.95,
                                wspace=None, hspace=0.0)
            grid = (31, 1)

            # Axes showing the data.
            data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8)
        else:
            # Only show one axes it the traces are not accepted.
            plt.figure(figsize=(18, 3))

        # Plot envelopes if needed.
        if accept_traces is True:
            plt.plot(times, data_env, color="black", alpha=0.5, lw=0.4,
                     label="data envelope")
            plt.plot(synthetic_trace.times(), synth_env, color="#e41a1c",
                     alpha=0.4, lw=0.5, label="synthetics envelope")

        plt.plot(times, data, color="black", label="data", lw=1.5)
        plt.plot(synthetic_trace.times(), synth, color="#e41a1c",
                 label="synthetics",  lw=1.5)

        # Symmetric around y axis.
        middle = data.mean()
        d_max, d_min = data.max(), data.min()
        r = max(d_max - middle, middle - d_min) * 1.1
        ylim = (middle - r, middle + r)
        xlim = (times[0], times[-1])
        plt.ylim(*ylim)
        plt.xlim(*xlim)

        offset = (xlim[1] - xlim[0]) * 0.005
        plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2)
        plt.text(first_tt_arrival + offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "first arrival", verticalalignment="top",
                 horizontalalignment="left", color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        plt.vlines(first_tt_arrival - minimum_period / 2.0, ylim[0], ylim[1],
                   colors="#ff7f00", lw=2)
        plt.text(first_tt_arrival - minimum_period / 2.0 - offset,
                 ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                 "first arrival - min period / 2", verticalalignment="bottom",
                 horizontalalignment="right", color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        for velocity in [6, 5, 4, 3, min_velocity]:
            tt = dist_in_km / velocity
            plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2)
            if velocity == min_velocity:
                hal = "right"
                o_s = -1.0 * offset
            else:
                hal = "left"
                o_s = offset
            plt.text(tt + o_s, ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                     str(velocity) + " km/s", verticalalignment="bottom",
                     horizontalalignment=hal, color="0.15")
        plt.vlines(dist_in_km / min_velocity + minimum_period / 2.0,
                   ylim[0], ylim[1], colors="gray", lw=2)
        plt.text(dist_in_km / min_velocity + minimum_period / 2.0 - offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "min surface velocity + min period / 2",
                 verticalalignment="top",
                 horizontalalignment="right", color="0.15", path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        plt.hlines(noise_absolute, xlim[0], xlim[1], linestyle="--",
                   color="gray")
        plt.hlines(-noise_absolute, xlim[0], xlim[1], linestyle="--",
                   color="gray")
        plt.text(offset, noise_absolute + (ylim[1] - ylim[0]) * 0.01,
                 "noise level", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")
        plt.gca().xaxis.set_ticklabels([])

        # Plot the basic global information.
        ax = plt.gca()
        txt = (
            "Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f"
            % (cc, noise_absolute, noise_relative))
        ax.text(0.01, 0.95, txt, transform=ax.transAxes,
                fontdict=dict(fontsize="small", ha='left', va='top'),
                bbox=dict(boxstyle="round", fc="w", alpha=0.8))
        plt.suptitle("Channel %s" % data_trace.id, fontsize="larger")

    # Show plot and return if not accepted.
        if accept_traces is not True:
            txt = "Rejected: %s" % (accept_traces)
            ax.text(0.99, 0.95, txt, transform=ax.transAxes,
                    fontdict=dict(fontsize="small", ha='right', va='top'),
                    bbox=dict(boxstyle="round", fc="red", alpha=1.0))
            plt.show()
    if accept_traces is not True:
        return []

    # Initialise masked arrays. The mask will be set to True where no
    # windows are chosen.
    time_windows = np.ma.ones(npts)
    time_windows.mask = False
    if plot:
        old_time_windows = time_windows.copy()

    # Elimination Stage 1: Eliminate everything half a period before or
    # after the minimum and maximum travel times, respectively.
    # theoretical arrival as positive.
    min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt)
    max_idx = int(math.ceil((
        dist_in_km / min_velocity + minimum_period / 2.0) / dt))
    time_windows.mask[:min_idx + 1] = True
    time_windows.mask[max_idx:] = True
    if plot:
        plt.subplot2grid(grid, (8, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TRAVELTIME ELIMINATION")
        old_time_windows = time_windows.copy()

    # -------------------------------------------------------------------------
    # Compute sliding time shifts and correlation coefficients for time
    # frames that passed the traveltime elimination stage.
    # -------------------------------------------------------------------------
    # Allocate arrays to collect the time dependent values.
    sliding_time_shift = np.ma.zeros(npts, dtype="float32")
    sliding_time_shift.mask = True
    max_cc_coeff = np.ma.zeros(npts, dtype="float32")
    max_cc_coeff.mask = True

    for start_idx, end_idx, midpoint_idx in _window_generator(npts,
                                                              window_length):
        if not min_idx < midpoint_idx < max_idx:
            continue

        # Slice windows. Create a copy to be able to taper without affecting
        # the original time series.
        data_window = data[start_idx: end_idx].copy() * taper
        synthetic_window = \
            synth[start_idx: end_idx].copy() * taper

        # Elimination Stage 2: Skip windows that have essentially no energy
        # to avoid instabilities. No windows can be picked in these.
        if synthetic_window.ptp() < synth.ptp() * 0.001:
            time_windows.mask[midpoint_idx] = True
            continue

        # Calculate the time shift. Here this is defined as the shift of the
        # synthetics relative to the data. So a value of 2, for instance, means
        # that the synthetics are 2 timesteps later then the data.
        cc = np.correlate(data_window, synthetic_window, mode="full")

        time_shift = cc.argmax() - window_length + 1
        # Express the time shift in fraction of the minimum period.
        sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period

        # Normalized cross correlation.
        max_cc_value = cc.max() / np.sqrt((synthetic_window ** 2).sum() *
                                          (data_window ** 2).sum())
        max_cc_coeff[midpoint_idx] = max_cc_value

    if plot:
        plt.subplot2grid(grid, (9, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="NO ENERGY IN CC WINDOW")
        # Axes with the CC coeffs
        plt.subplot2grid(grid, (15, 0), rowspan=4)
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.hlines(-threshold_shift, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.hlines(threshold_shift, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.text(5, -threshold_shift - (2) * 0.03,
                 "threshold", verticalalignment="top",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, sliding_time_shift, color="#377eb8",
                 label="Time shift in fraction of minimum period", lw=1.5)
        ylim = plt.ylim()
        plt.yticks([-0.75, 0, 0.75])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0])
        plt.ylim(-1.0, 1.0)
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

        plt.subplot2grid(grid, (10, 0), rowspan=4)
        plt.hlines(threshold_correlation, xlim[0], xlim[1], color="0.15",
                   linestyle="--")
        plt.hlines(1, xlim[0], xlim[1], color="lightgray")
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.text(5, threshold_correlation + (1.4) * 0.01,
                 "threshold", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, max_cc_coeff, color="#4daf4a",
                 label="Maximum CC coefficient", lw=1.5)
        plt.ylim(-0.2, 1.2)
        plt.yticks([0, 0.5, 1])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

    # Elimination Stage 3: Mark all areas where the normalized cross
    # correlation coefficient is under threshold_correlation as negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[max_cc_coeff < threshold_correlation] = True
    if plot:
        plt.subplot2grid(grid, (14, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="CORRELATION COEFF THRESHOLD ELIMINATION")

    # Elimination Stage 4: Mark everything with an absolute travel time
    # shift of more than # threshold_shift times the dominant period as
    # negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True
    if plot:
        plt.subplot2grid(grid, (19, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TIME SHIFT THRESHOLD ELIMINATION")

    # Elimination Stage 5: Mark the area around every "travel time shift
    # jump" (based on the traveltime time difference) negative. The width of
    # the area is currently chosen to be a tenth of a dominant period to
    # each side.
    if plot:
        old_time_windows = time_windows.copy()
    sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
    indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0]
    for index in indices:
        time_windows.mask[index - sample_buffer: index + sample_buffer] = True
    if plot:
        plt.subplot2grid(grid, (20, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TIME SHIFT JUMPS ELIMINATION")

    # Clip both to avoid large numbers by division.
    stacked = np.vstack([
        np.ma.clip(synth_env, synth_env.max() * min_envelope_similarity * 0.5,
                   synth_env.max()),
        np.ma.clip(data_env, data_env.max() * min_envelope_similarity * 0.5,
                   data_env.max())])
    # Ratio.
    ratio = stacked.min(axis=0) / stacked.max(axis=0)

    # Elimination Stage 6: Make sure the amplitudes of both don't vary too
    # much.
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[ratio < min_envelope_similarity] = True
    if plot:
        plt.subplot2grid(grid, (25, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION")

    if plot:
        plt.subplot2grid(grid, (21, 0), rowspan=4)
        plt.hlines(min_envelope_similarity, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.text(5, min_envelope_similarity + (2) * 0.03,
                 "threshold", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                 PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, ratio, color="#9B59B6",
                 label="Envelope amplitude similarity", lw=1.5)
        plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
        plt.ylim(0.05, 1.05)
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

    # First minimum window length elimination stage. This is cheap and if
    # not done it can easily destabilize the peak-and-trough marching stage
    # which would then have to deal with way more edge cases.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant periodele
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (26, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 1")

    # -------------------------------------------------------------------------
    # Peak and trough marching algorithm
    # -------------------------------------------------------------------------
    final_windows = []
    for i in flatnotmasked_contiguous(time_windows):
        # Cut respective windows.
        window_npts = i.stop - i.start
        synthetic_window = synth[i.start: i.stop]
        data_window = data[i.start: i.stop]

        # Find extrema in the data and the synthetics.
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)

        window_mask = np.ones(window_npts, dtype="bool")

        closest_peaks = find_closest(data_p, synth_p)
        diffs = np.diff(closest_peaks)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_p[idx - 1]
            else:
                start = 0
            if idx < (len(synth_p) - 1):
                end = synth_p[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        closest_troughs = find_closest(data_t, synth_t)
        diffs = np.diff(closest_troughs)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_t[idx - 1]
            else:
                start = 0
            if idx < (len(synth_t) - 1):
                end = synth_t[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        window_mask = np.ma.masked_array(window_mask,
                                         mask=window_mask)

        if window_mask.mask.all():
            continue

        for j in flatnotmasked_contiguous(window_mask):
            final_windows.append((i.start + j.start, i.start + j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False
    if plot:
        plt.subplot2grid(grid, (27, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="PEAK AND TROUGH MARCHING ELIMINATION")

    # Loop through all the time windows, remove windows not satisfying the
    # minimum number of peaks and troughs per window. Acts mainly as a
    # safety guard.
    old_time_windows = time_windows.copy()
    for i in flatnotmasked_contiguous(old_time_windows):
        synthetic_window = synth[i.start: i.stop]
        data_window = data[i.start: i.stop]
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)
        if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \
                min_peaks_troughs:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (28, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="PEAK/TROUGH COUNT ELIMINATION")

    # Second minimum window length elimination stage.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (29, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 2")

    # Final step, eliminating windows with little energy.
    final_windows = []
    for j in flatnotmasked_contiguous(time_windows):
        # Again assert a certain minimal length.
        if (j.stop - j.start) < min_length:
            continue

        # Compare the energy in the data window and the synthetic window.
        data_energy = (data[j.start: j.stop] ** 2).sum()
        synth_energy = (synth[j.start: j.stop] ** 2).sum()
        energies = sorted([data_energy, synth_energy])
        if energies[1] > max_energy_ratio * energies[0]:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due to energy ratio between "
                    "data and synthetics.")
            continue

        # Check that amplitudes in the data are above the noise
        if noise_absolute / data[j.start: j.stop].ptp() > \
                max_noise_window:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due having no amplitude above the "
                    "signal to noise ratio.")
        final_windows.append((j.start, j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False

    if plot:
        plt.subplot2grid(grid, (30, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="LITTLE ENERGY ELIMINATION")

    if verbose:
        _log_window_selection(
            data_trace.id,
            "Done, Selected %i window(s)" % len(final_windows))

    # Final step is to convert the index value windows to actual times.
    windows = []
    for start, stop in final_windows:
        start = data_starttime + start * data_delta
        stop = data_starttime + stop * data_delta
        windows.append((start, stop))

    if plot:
        # Plot the final windows to the data axes.
        import matplotlib.transforms as mtransforms  # NOQA
        ax = data_plot
        trans = mtransforms.blended_transform_factory(ax.transData,
                                                      ax.transAxes)
        for start, stop in final_windows:
            ax.fill_between([start * data_delta, stop * data_delta], 0, 1,
                            facecolor="#CDDC39", alpha=0.5, transform=trans)

        plt.show()

    return windows
コード例 #20
0
    def update(self, autorange=False, force=False):

        try:
            self._plot_receiver()
            self._plot_event()
        except AttributeError:
            return

        if (not bool(self.ui.auto_update_check_box.checkState())
                and self.ui.finsource_tab.currentIndex() == 1 and not force):
            return

        components = ["z", "n", "e"]
        components_map = {0: ("Z", "N", "E"),
                          1: ("Z", "R", "T")}

        components_choice = int(self.ui.components_combo.currentIndex())

        label_map = {0: {"z": "vertical", "n": "east", "e": "north"},
                     1: {"z": "vertical", "n": "radial", "e": "transverse"}}

        for component in components:
            p = getattr(self.ui, "%s_graph" % component)
            p.setTitle(label_map[components_choice][component].capitalize()
                       + " component")

        if self.ui.finsource_tab.currentIndex() == 0:
            src_latitude = self.source.latitude
            src_longitude = self.source.longitude
            src_depth_in_m = self.source.depth_in_m
        else:
            src_latitude = self.finite_source.hypocenter_latitude
            src_longitude = self.finite_source.hypocenter_longitude
            src_depth_in_m = self.finite_source.hypocenter_depth_in_m

        rec = self.receiver
        try:
            # Grab resampling settings from the UI.
            if bool(self.ui.resample_check_box.checkState()):
                dt = float(self.ui.resample_factor.value())
                dt = self.instaseis_db.dt / dt
            else:
                dt = None
            if self.ui.finsource_tab.currentIndex() == 0:
                st = self.instaseis_db.get_seismograms(
                    source=self.source, receiver=self.receiver, dt=dt,
                    components=components_map[components_choice])
            elif self.ui.finsource_tab.currentIndex() == 1:
                st = self.instaseis_db.get_seismograms_finite_source(
                    sources=self.finite_source, receiver=self.receiver, dt=dt,
                    components=components_map[components_choice])

            # check filter values from the UI
            if bool(self.ui.lowpass_check_box.checkState()):
                try:
                    freq = 1.0 / float(self.ui.lowpass_period.value())
                    st.filter('lowpass', freq=freq, zerophase=True)
                except ZeroDivisionError:
                    # this happens when typing in the lowpass_period box
                    pass

            if bool(self.ui.highpass_check_box.checkState()):
                try:
                    freq = 1.0 / float(self.ui.highpass_period.value())
                    st.filter('highpass', freq=freq, zerophase=True)
                except ZeroDivisionError:
                    # this happens when typing in the highpass_period box
                    pass

        except AttributeError:
            return

        if bool(self.ui.tt_times.checkState()):
            great_circle_distance = locations2degrees(
                src_latitude, src_longitude,
                rec.latitude, rec.longitude)
            self.tts = getTravelTimes(great_circle_distance,
                                      src_depth_in_m / 1000.0, model="ak135")

        for ic, component in enumerate(components):
            plot_widget = getattr(self.ui, "%s_graph" % component.lower())
            plot_widget.clear()
            tr = st.select(component=components_map[components_choice][ic])[0]
            times = tr.times()
            plot_widget.plot(times, tr.data, pen="k")

            if bool(self.ui.tt_times.checkState()):
                for tt in self.tts:
                    if tt["time"] >= times[-1]:
                        self.tts.remove(tt)
                        continue
                    if tt["phase_name"][0].lower() == "p":
                        pen = "#008c2866"
                    else:
                        pen = "#95000066"
                    plot_widget.addLine(x=tt["time"], pen=pen, z=-10)

            if autorange:
                plot_widget.autoRange()
コード例 #21
0
 def assertLoc(lat1, long1, lat2, long2, approx_distance):
     self.assertTrue(abs(math.radians(locations2degrees(
         lat1, long1, lat2, long2)) * 6371 - approx_distance) <= 20)
コード例 #22
0
def main(argv=sys.argv): 

    global eplat, eplon, epdepth, orig
    
    
    GFdir = "/home/roberto/data/GFS/"
    beta = 4.e3 #m/s 
    rho = 3.e3 #kg/m^3 
    mu = rho*beta*beta
    mu =40e9
    
    Lbdm0min = 1e-26*np.array([125.])
    Lbdsmooth = 1e-26*np.array([100.])
    
    #~ Lbdm0min = 1e-26*np.linspace(60.,500,40)
    #~ Lbdsmooth = 1e-26*np.linspace(60.,500,40)#*0.5

    corners = 4.
    fmin = 0.001
    fmax = 0.005
    
    ### Data from Chilean 2010 EQ (Same as W phase inv.) 
    strike = 18.
    dip    = 18.
    rake   = 104. # 109.
    #rake = 45.
    
    rakeA = rake + 45.
    rakeB = rake - 45.
    ####################
    nsx = 21
    nsy = 11
    Min_h = 10.
    flen  = 600. #Fault's longitude [km] along strike
    fwid  = 300. #Fault's longitude [km] along dip
    sflen = flen/float(nsx)
    sfwid = fwid/float(nsy)    
    swp = [1, 0, 2]
    nsf = nsx*nsy
    ###################
    
    t_h     = 10.
    MISFIT = np.array([])
    #RUPVEL = np.arange(1.0, 5.0, 0.05)
    RupVel = 2.1 # Best fit
    #RupVel = 2.25 #From Lay et al.
    
    
    #for RupVel in RUPVEL:
    print "****************************"
    print RupVel
    print "****************************"
    NP = [strike, dip, rake]
    NPA = [strike, dip, rakeA]
    NPB = [strike, dip, rakeB]
    
    M  = np.array(NodalPlanetoMT(NP))  
    MA = np.array(NodalPlanetoMT(NPA)) 
    MB = np.array(NodalPlanetoMT(NPB)) 
    
    Mp = np.sum(M**2)/np.sqrt(2)
    
    
    #############
        #Loading req file and EQparameters
    parameters={}    
    with open(argv[1],'r') as file:
        for line in file:
            line = line.split()
            key = line[0]
            val = line[1:]
            parameters[key] = val 
    #~ cmteplat = float(parameters['eplat'][0])
    #~ cmteplon = float(parameters['eplon'][0])
    #~ cmtepdepth=float(parameters['epdepth'][0])    
    orig = UTCDateTime(parameters['origin_time'][0])
    
    ####Hypocentre from
    ### http://earthquake.usgs.gov/earthquakes/eqinthenews/2010/us2010tfan/    
    cmteplat = -35.91#-35.85#-36.03#-35.83
    cmteplon = -72.73#-72.72#-72.83# -72.67
    cmtepdepth= 35.
    eq_hyp = (cmteplat,cmteplon,cmtepdepth)
      ############

    
    
    grid, sblt = fault_grid('CL-2010',cmteplat,cmteplon,cmtepdepth,0, Min_h,\
                             strike, dip, rake, flen, fwid, nsx, nsy,                            Verbose=False, ffi_io=True, gmt_io=True)
                             
    print ('CL-2010',cmteplat,cmteplon,cmtepdepth,0, Min_h,\
                             strike, dip, rake, flen, fwid, nsx, nsy,\
                            )
    print grid[0][1]
    #sys.exit()
    #############
    #Loading files and setting dirs:
    
    inputfile =  os.path.abspath(argv[1]) 
    if not os.path.exists(inputfile): print inputfile, "does not exist."; exit() 
    workdir = "/".join(inputfile.split("/")[:-1]) 
    basename = inputfile.split("/")[-1][:-4]
    if workdir[-1] != "/": workdir += "/"
    
    try :
        os.mkdir(workdir+"WPinv")
    except OSError:
        pass#print "Directory WPtraces already exists. Skipping"
    
    trfile = open(workdir+"goodtraces.dat")
    trlist = []
    #Loading Good traces files:
    while 1:
        line = trfile.readline().rstrip('\r\n')
        if not line: break        
        trlist.append(line.split()[0])        
    trfile.close()
    #############
    
    # Reading traces:    
    st = read(workdir+"WPtraces/" + basename + ".decov.trim.mseed")  
    #############################################################################
    ######Determining the sf closest to the hypocentre:    
    min_Dist_hyp_subf = flen *fwid
    for subf in range(nsf):
        sblat   = grid[subf][1]
        sblon   = grid[subf][0]
        sbdepth = grid[subf][2]              
        sf_hyp =  (sblat,sblon, sbdepth)        
        Dist_hyp_subf = hypo2dist(eq_hyp,sf_hyp)
        if Dist_hyp_subf < min_Dist_hyp_subf:
            min_Dist_hyp_subf = Dist_hyp_subf
            min_sb_hyp = sf_hyp
            hyp_subf = subf
    print hyp_subf,  min_sb_hyp,  min_Dist_hyp_subf    
    
    
    ####Determining trimming times:
    
    test_tr = read(GFdir + "H003.5/PP/GF.0001.SY.LHZ.SAC")[0]
    t0 = test_tr.stats.starttime
    TrimmingTimes = {}   # Min. Distace from the fault to each station. 
    A =0
    for trid in trlist:     
        tr = st.select(id=trid)[0]
        metafile = workdir + "DATA/" + "META." + tr.id + ".xml"
        META = DU.getMetadataFromXML(metafile)[tr.id]
        stlat = META['latitude']
        stlon = META['longitude'] 
        dist =   locations2degrees(min_sb_hyp[0],min_sb_hyp[1],\
                                   stlat,stlon) 
        parrivaltime = getTravelTimes(dist,min_sb_hyp[2])[0]['time']        
        ta = t0 + parrivaltime
        tb = ta + round(15.*dist) 
        TrimmingTimes[trid] = (ta, tb)
        
    
    ##############################################################################
     
    #####

    DIST = []
    # Ordering the stations in terms of distance
    for trid in trlist: 
        metafile = workdir + "DATA/" + "META." + trid + ".xml"
        META = DU.getMetadataFromXML(metafile)[trid]
        lat = META['latitude']
        lon = META['longitude']
        trdist = locations2degrees(cmteplat,cmteplon,lat,lon) 
        DIST.append(trdist)   

    
    DistIndex = lstargsort(DIST)
    
    if len(argv) == 3:
        trlist = [argv[2]]
        OneStation = True
    else:
        trlist = [trlist[i] for i in DistIndex]
        OneStation = False
        
   ##### 

    client = Client()
    ObservedDisp = np.array([])   
    gridlat = []
    gridlon = []
    griddepth = []
    sbarea = []
    mindist = flen*fwid # min distance hyp-subfault 
    

    ##########Loop for each subfault
    for subf in range(nsf):
        print "**********"
        print subf
        eplat   = grid[subf][1]
        eplon   = grid[subf][0]           
        epdepth = grid[subf][2]
        
        ## Storing the subfault's location centered in the hypcenter 
        gridlat.append(eplat-cmteplat)
        gridlon.append(eplon-cmteplon)
        griddepth.append(epdepth)
        
        
        strike = grid[subf][3] #+ 360.
        dip    = grid[subf][4]
        rake   = grid[subf][5] #     
        NP = [strike, dip, rake]
        
        M = np.array(NodalPlanetoMT(NP))   

        
                #Calculating the time dalay:
            
        sf_hyp = (eplat,eplon, epdepth)        
        Dist_ep_subf = hypo2dist(eq_hyp,sf_hyp)
        t_d = round(Dist_ep_subf/RupVel) #-59.
        print eplat,eplon, epdepth
    
        #t_d  = 0.
        
    
        # Determining depth dir:
        depth = []
        depthdir = []
        for file in os.listdir(GFdir):
            if file[-2:] == ".5":
                depthdir.append(file)
                depth.append(float(file[1:-2]))            
        BestDirIndex = np.argsort(abs(epdepth-np.array(depth)))[0]      
        hdir = GFdir + depthdir[BestDirIndex] + "/"   
        # hdir is the absolute path to the closest deepth. 
        
        
        
        SYN = np.array([])
        SYNA = np.array([])
        SYNB = np.array([])

        
        #Main loop :
        for trid in trlist:  
                       
            tr = st.select(id=trid)[0]    
            metafile = workdir + "DATA/" + "META." + tr.id + ".xml"
            META = DU.getMetadataFromXML(metafile)[tr.id]
            lat = META['latitude']
            lon = META['longitude']    
            trPPsy,  trRRsy, trRTsy,  trTTsy = \
                                   GFSelectZ(lat,lon,hdir) 
            
            tr.stats.delta = trPPsy.stats.delta
            azi =   -np.pi/180.*gps2DistAzimuth(lat,lon,\
                                               eplat,eplon)[2]
            trROT = MTrotationZ(azi, trPPsy,  trRRsy, trRTsy,  trTTsy)        
                        
            
                    #Triangle 
            dt = trROT[0].stats.delta          
            trianglen = 2.*t_h/dt-1.
            window = triang(trianglen)
            window /= np.sum(window)
            #window = np.array([1.])
            
            FirstValid = int(trianglen/2.) + 1
            dist =   locations2degrees(eplat,eplon,lat,lon) 
            parrivaltime = getTravelTimes(dist,epdepth)[0]['time']
            
            t1 = TrimmingTimes[trid][0] - t_d
            t2 = TrimmingTimes[trid][1] - t_d
            
   
            #~ t1 = trROT[0].stats.starttime + parrivaltime- t_d
            #~ t2 = t1+ round(MinDist[tr.id]*15. )
                           
           
            N = len(trROT[0])
            for trR in trROT:
                trR.data *= 10.**-21 ## To get M in Nm                   
                trR.data -= trR.data[0]
                AUX1 = len(trR)
                trR.data = convolve(trR.data,window,mode='valid') 
                AUX2 = len(trR)
                mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                               trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #mean = np.mean(trR.data[:60])
                trR.data -= mean      
                trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.                                              delta,
                corners , 1 , fmin, fmax)  
                t_l = dt*0.5*(AUX1 - AUX2)                             
                trR.trim(t1-t_l,t2-t_l, pad=True, fill_value=trR.data[0])  #We lost t_h due to the convolution                  
            
            
         
            #~ for trR in trROT:
                #~ trR.data *= 10.**-23 ## To get M in Nm
                #~ trR.data -= trR.data[0]                
                #~ trR.data = convolve(trR.data,window,mode='same')
                #~ # mean = np.mean(np.hstack((trR.data[0]*np.ones(FirstValid),\
                             #~ # trR.data[:60./trR.stats.delta*1.-FirstValid+1])))
                #~ mean = np.mean(trR.data[:60])               
                #~ trR.data -= mean
                #~ trR.data = bp.bandpassfilter(trR.data,len(trR), trR.stats.delta,\
                                             #~ corners ,1 , fmin, fmax)
                #~ trR.trim(t1,t2,pad=True, fill_value=trR.data[0])  

            nmin = min(len(tr.data),len(trROT[0].data))             
            tr.data = tr.data[:nmin]
            for trR in trROT:
                trR.data = trR.data[:nmin]
              
                
             #############            
            trROT = np.array(trROT)  
            syn  =  np.dot(trROT.T,M) 
            synA =  np.dot(trROT.T,MA)
            synB =  np.dot(trROT.T,MB)
            
            SYN = np.append(SYN,syn)  
            SYNA = np.append(SYNA,synA)
            SYNB = np.append(SYNB,synB)
            
            if subf == 0 : ObservedDisp =  np.append(ObservedDisp,tr.data,0) 
            
  
        sbarea.append(grid[subf][6])
   
        print np.shape(A), np.shape(np.array([SYN]))
        if subf == 0: 
            A  = np.array([SYN])
            AA = np.array([SYNA])
            AB = np.array([SYNB])
        else:
            A = np.append(A,np.array([SYN]),0) 
            AA = np.append(AA,np.array([SYNA]),0)
            AB = np.append(AB,np.array([SYNB]),0)
        
    
    
    #Full matrix with the two rake's component
    AC = np.vstack((AA,AB))

#MISFIT = np.array([])
########## Stabilizing the solution:         

#### Moment minimization:
#~ constraintD  = np.zeros(nsf)
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*np.eye(nsf,nsf)         
    #~ Acons = np.append(A,constraintF,1)   
    #~ print np.shape(Acons.T), np.shape(ObservedDispcons)
    #~ R = nnls(Acons.T,ObservedDispcons)
    #~ M = R[0]
    #~ #M = np.zeros(nsf)
    #~ #M[::2] = 1
    #~ fit = np.dot(A.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))
    
    #~ MISFIT = np.append(MISFIT,misfit)
#~ plt.figure()
#~ plt.plot(Lbd,MISFIT)
#~ ###########################################
#~ ### Smoothing:
#~ constraintF_base = SmoothMatrix(nsx,nsy)
#~ constraintD  = np.zeros(np.shape(constraintF_base)[0])
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*constraintF_base   
    #~ Acons = np.append(A,constraintF.T,1)   
    #~ #print np.shape(Acons.T), np.shape(ObservedDispcons)
    #~ R = nnls(Acons.T,ObservedDispcons)
    #~ M = R[0]
    #~ fit = np.dot(A.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))
    #~ print lbd, misfit
    #~ MISFIT = np.append(MISFIT,misfit)
#~ ###########################################    
###########################################
#~ ##### Moment Minimization (including rake projections):
#~ constraintD  = np.zeros(2*nsf)
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*np.eye(2*nsf,2*nsf)         
    #~ ACcons = np.append(AC,constraintF,1)   
    #~ print np.shape(ACcons.T), np.shape(ObservedDispcons)
    #~ R = nnls(ACcons.T,ObservedDispcons)
    #~ M = R[0]
    #~ fit = np.dot(AC.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))        
    #~ MISFIT = np.append(MISFIT,misfit)  
    #~ M = np.sqrt(M[:nsf]**2+M[nsf:]**2)

##############################################
### Smoothing (including rake projections):
#~ constraintF_base = SmoothMatrix(nsx,nsy)
#~ Nbase = np.shape(constraintF_base)[0]
#~ constraintD  = np.zeros(2*Nbase)
#~ constraintF_base_big = np.zeros((2*Nbase, 2*nsf))
#~ constraintF_base_big[:Nbase,:nsf]= constraintF_base
#~ constraintF_base_big[Nbase:,nsf:]= constraintF_base 
#~ ObservedDispcons = np.append(ObservedDisp,constraintD)
#~ for lbd in Lbd:
    #~ constraintF  = lbd*constraintF_base_big   
    #~ ACcons = np.append(AC,constraintF.T,1)   
    #~ #print np.shape(Acons.T), np.shape(ObservedDispcons)
    #~ R = nnls(ACcons.T,ObservedDispcons)
    #~ M = R[0]
    #~ fit = np.dot(AC.T,M)
    #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
             #~ /np.sum(np.abs(ObservedDisp))
    #~ print lbd, misfit
    #~ MISFIT = np.append(MISFIT,misfit)
#~ M = np.sqrt(M[:nsf]**2+M[nsf:]**2)    
###########################################    
#~ ##### Moment Minimization and  Smoothing
  #~ #### (including rake projections):
    #~ mom0 = []
    #~ constraintF_base = SmoothMatrix(nsx,nsy)
    #~ Nbase = np.shape(constraintF_base)[0]
    #~ constraintDsmoo = np.zeros(2*Nbase)
    #~ constraintDmin  = np.zeros(2*nsf)
    #~ constraintF_base_big = np.zeros((2*Nbase, 2*nsf))
    #~ constraintF_base_big[:Nbase,:nsf]= constraintF_base
    #~ constraintF_base_big[Nbase:,nsf:]= constraintF_base 
    #~ ObservedDispcons = np.concatenate((ObservedDisp,
                                  #~ constraintDmin,
                             #~ constraintDsmoo  ))    
   
    #~ for lbdm0 in Lbdm0min:
        #~ constraintFmin  = lbdm0*np.eye(2*nsf,2*nsf)
        #~ for lbdsm in Lbdsmooth:              
            #~ constraintFsmoo  = lbdsm*constraintF_base_big 
            #~ ACcons = np.hstack((AC, constraintFmin, constraintFsmoo.T))   
            #~ print lbdm0, lbdsm
            #~ R = nnls(ACcons.T,ObservedDispcons)
            #~ M = R[0]
            #~ fit = np.dot(AC.T,M)
            #~ misfit = 100.*np.sum(np.abs(fit-ObservedDisp))\
                     #~ /np.sum(np.abs(ObservedDisp))        
            #~ MISFIT = np.append(MISFIT,misfit) 
            #~ MA = M[:nsf]
            #~ MB = M[nsf:]
            #~ M = np.sqrt(MA**2+MB**2)
            #~ mom0.append(np.sum(M))
    ##############################################

    # Rotation to the rake's conventional angle:  
    #MB, MA = Rot2D(MB,MA,-rakeB)
    print np.shape(M), np.shape(A.T)
    R = nnls(A.T,ObservedDisp)
    M = R[0]
    
    #~ M = np.zeros(nsf)
    #~ M[::2] = 1 
    fit = np.dot(A.T,M)
    MA = M
    MB = M
    
    np.save("RealSol", M)
      
    nm0 = np.size(Lbdm0min) 
    nsmth = np.size(Lbdsmooth)
    #~ plt.figure()
    #~ plt.pcolor(1./Lbdsmooth, 1./Lbdm0min,MISFIT.reshape(nm0,nsmth))
    #~ plt.xlabel(r'$1/ \lambda_{2}$', fontsize = 24)
    #~ plt.ylabel(r'$1/ \lambda_{1}$',fontsize = 24 )
    #~ plt.ylim((1./Lbdm0min).min(),(1./Lbdm0min).max() )
    #~ plt.ylim((1./Lbdsmooth).min(),(1./Lbdsmooth).max() )
    #~ cbar = plt.colorbar()
    #~ cbar.set_label("Misfit %")
    #~ print np.shape(Lbdm0min), np.shape(mom0)
    
    #~ plt.figure()
    #~ CS = plt.contour(1./Lbdsmooth, 1./Lbdm0min,MISFIT.reshape(nm0,nsmth) )
    #~ plt.xlabel(r'$1/ \lambda_{2}$', fontsize = 24)
    #~ plt.ylabel(r'$1/ \lambda_{1}$',fontsize = 24 )
    #~ plt.clabel(CS, inline=1, fontsize=10)
    #~ plt.title('Misfit')
    
    
    
    #~ plt.figure()
    #~ plt.plot(1./Lbdm0min,MISFIT)
    #~ plt.xlabel(r'$1/ \lambda_{2}$', fontsize = 24)
    #~ plt.ylabel("Misfit %")
    #~ plt.figure()
    #~ plt.plot(Lbdm0min,mom0)
    #~ plt.ylabel(r'$M_0\, [Nm]$', fontsize = 24)
    #~ plt.xlabel(r'$\lambda_{M0}$', fontsize = 24)

   
    misfit = 100.*np.sum(np.abs(fit-ObservedDisp))/np.sum(np.abs(ObservedDisp))
    print "Residual: ", 1000.*R[1]
    print misfit
    
    
    #SLIP = M*Mp/mu/(1.e6*np.array(sbarea))
    
    sbarea = sflen*sfwid
    SLIP = M/(mu*1.e6*sbarea)
    SLIP = SLIP.reshape(nsx,nsy).T[::-1]
    moment = M.reshape(nsx,nsy).T[::-1]
    
    plt.figure(figsize = (13,5))
    plt.plot(fit,'b' ,label="Fit")
    plt.plot(ObservedDisp,'r',label="Observed")
    plt.xlabel("Time [s]")
    plt.ylabel("Displacement [m]")
    plt.legend()
    
    
    np.set_printoptions(linewidth=1000,precision=3)
    print "***********"
    print sbarea
    print SLIP
    print np.mean(SLIP)
    print "Moment:"
    print np.sum(M)
 

    ### SLIPS Distribution (as the synthetics) :
    SLIPS = M.reshape(nsx,nsy).T
    SLIPS /=  mu*1.e6*sbarea
    
    
    #~ #########Ploting slip distribution:
    #~ #we are going to reflect the y axis later, so:
    hypsbloc = [hyp_subf / nsy , -(hyp_subf % nsy) - 2]

    #Creating the strike and dip axis:
    StrikeAx= np.linspace(0,flen,nsx+1)
    DipAx= np.linspace(0,fwid,nsy+1)
    DepthAx = DipAx*np.sin(np.pi/180.*dip) + Min_h
    print DepthAx
    hlstrike = StrikeAx[hypsbloc[0]] + sflen*0.5
    #we are going to reflect the axis later, so:
    hldip = DipAx[hypsbloc[1]] + sfwid*0.5 
    hldepth = DepthAx[hypsbloc[1]] + sfwid*0.5*np.sin(np.pi/180.*dip)    
    
    StrikeAx = StrikeAx - hlstrike
    DipAx =     DipAx   - hldip
    
    XX, YY = np.meshgrid(StrikeAx, DepthAx)
    XX, ZZ = np.meshgrid(StrikeAx, DipAx )  

   ######Plot: (Old colormap: "gist_rainbow_r")
    plt.figure(figsize = (13,6))
    ax = host_subplot(111)
    im = ax.pcolor(XX, YY, SLIPS, cmap="jet")    
    ax.set_ylabel('Depth [km]')       
    ax.set_ylim(DepthAx[-1],DepthAx[0])  
    
    # Creating a twin plot 
    ax2 = ax.twinx()
    im2 = ax2.pcolor(XX, ZZ, SLIPS[::-1,:], cmap="jet")    
    ax2.set_ylabel('Distance along the dip [km]')
    ax2.set_xlabel('Distance along the strike [km]')    
    ax2.set_ylim(DipAx[0],DipAx[-1])
    ax2.set_xlim(StrikeAx[0],StrikeAx[-1])       
                         
                         
    ax.axis["bottom"].major_ticklabels.set_visible(False) 
    ax2.axis["bottom"].major_ticklabels.set_visible(False)
    ax2.axis["top"].set_visible(True)
    ax2.axis["top"].label.set_visible(True)
    
    
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("bottom", size="5%", pad=0.1)
    cb = plt.colorbar(im, cax=cax, orientation="horizontal")
    cb.set_label("Slip [m]")             
    ax2.plot([0], [0], '*', ms=225./(nsy+4))
    ax2.set_xticks(ax2.get_xticks()[1:-1])

    
    #~ ### Rake plot:
    plt.figure(figsize = (13,6))
    fig = host_subplot(111)
    XXq, ZZq = np.meshgrid(StrikeAx[:-1]+sflen, DipAx[:-1]+sfwid )
    Q = plt.quiver(XXq,ZZq, MB.reshape(nsx,nsy).T[::-1,:]/(mu*1.e6*sbarea), 
                    MA.reshape(nsx,nsy).T[::-1,:]/(mu*1.e6*sbarea),
                    SLIPS[::-1,:],
                units='xy',scale = 0.5  ,  linewidths=(2,), 
                edgecolors=('k'), headaxislength=5  )
    fig.set_ylim([ZZq.min()-80,ZZq.max()+80])
    fig.set_xlim([XXq.min()-20, XXq.max()+20 ])
    fig.set_ylabel('Distance along dip [km]') 
    fig.set_xlabel('Distance along the strike [km]') 
    
    fig2 = fig.twinx()
    fig2.set_xlabel('Distance along the strike [km]') 
    
    fig.axis["bottom"].major_ticklabels.set_visible(False) 
    fig.axis["bottom"].label.set_visible(False)
    fig2.axis["top"].set_visible(True)
    fig2.axis["top"].label.set_visible(True)
    fig2.axis["right"].major_ticklabels.set_visible(False)

    divider = make_axes_locatable(fig)
    cax = divider.append_axes("bottom", size="5%", pad=0.1)
    cb = plt.colorbar(im, cax=cax, orientation="horizontal")
    cb.set_label("Slip [m]") 
    

    
    
    plt.show()

    
        #############
    #~ print np.shape(MISFIT),  np.shape(RUPVEL)
    #~ plt.figure()
    #~ plt.plot(RUPVEL,MISFIT)
    #~ plt.xlabel("Rupture Velocity [km/s]")
    #~ plt.ylabel("Misfit %")
    #~ plt.show()
     

    print np.shape(MB.reshape(nsx,nsy).T)
    print np.shape(ZZ)