dist = a_AS_dist[i] dist = dist[dist>0] mag = a_MS_mag[i] # plot the histogram HD_bins = np.arange(np.amin(np.log10(dist)), np.amax(np.log10(dist)), dPar['HD_binsize']) HD_dist, HD_bins = np.histogram(np.log10(dist), HD_bins) HD_bins = 10**HD_bins HD_dist, HD_bins = np.array(list(zip(*zip(HD_dist, HD_bins)))) aBinSize = [bin*10**dPar['HD_binsize'] - bin for bin in HD_bins] HD_dist /= aBinSize HD_dist /= len(dist) ax.loglog(HD_bins[HD_dist>0], HD_dist[HD_dist>0], '-',c=cols[i], mfc = 'none', mew = .2, label='Histogram') # plot the rate HD_bins, HD_rate = seis_utils.eqRate(dist, 24) HD_rate /= len(dist) #ax.loglog(HD_bins, HD_rate, 'r-',c=cols[i], lw=1, label='<m>=%.2f' % a_MS_mag[i])##TODO: nullify this line # plot the smoothed rate HD_bin_s, HD_rate_smoothed = seis_utils.eqlgRate(np.log10(dist),65) HD_bin_s, HD_rate_smoothed=HD_bin_s[HD_rate_smoothed > 0], HD_rate_smoothed[HD_rate_smoothed > 0] HD_rate_smoothed = scipy.signal.savgol_filter(HD_rate_smoothed,3,1) ##ax.loglog(HD_bin_s[HD_rate_smoothed>0],HD_rate_smoothed[HD_rate_smoothed>0],\ ## 'b-', c=cols[i],lw=1.5, label='<m>=%.2f'% a_MS_mag[i]) ##TODO: use this line """ # plot Emily's fit L = HD_bins[np.argmax(HD_dist)] sel = HD_bins >= L
#C# create time vector --> use obspy UTCDateTime or mx.DateTime a_t_decYr = np.zeros(m_EvInRmax[0].shape[0]) a_t_decYr2 = np.zeros(m_EvInRmax[0].shape[0]) for i in range(m_EvInRmax[0].shape[0]): a_t_decYr[i] = seis_utils.dateTime2decYr([ m_EvInRmax[0, i], m_EvInRmax[1, i], m_EvInRmax[2, i], m_EvInRmax[3, i], m_EvInRmax[4, i], m_EvInRmax[5, i] ]) #==================================3================================================ # power-law fitting (AS decay rate) #=================================================================================== # subtract t MS from aftershock times - new vector has only AS with time relative to MS in days a_tAS_day = (a_t_decYr[i_MS_ID + 1::] - a_t_decYr[i_MS_ID]) * 365.25 # compute rates at_bin_AS, aN_bin_AS = seis_utils.eqRate(a_tAS_day, dPar['k']) ####A##### power law fit - least squares sel_t = np.logical_and(at_bin_AS >= dPar['tmin'], at_bin_AS <= dPar['tmax']) # only events within tmin and tmax, do log-transformation f_p_LS, f_K_LS, __, __, __ = scipy.stats.linregress(np.log10(at_bin_AS[sel_t]), np.log10(aN_bin_AS[sel_t])) f_K_LS = 10**f_K_LS print('Omori p-value (least-squares): ', round(f_p_LS, 1), 'K', round(f_K_LS, 1)) ####A##### power law fit - least squares # for dN/dt = K*t**(-p); remember that we're solving a problem analogous to: # y_hat = alpha * t**beta, with alpha = 10**a and beta = b ####B##### power law fit - maximum likelihood dOm = omori.fit_omoriMLE(a_tAS_day[a_tAS_day > 0], bounds=dPar['a_limit'], par0=dPar['a_par0'],
a_MS_mag = asSets['a_MS_mag'] a_AS_dist = asSets['a_AS_dist'] i = 0 for f_MSmag, dist in zip(a_MS_mag, a_AS_dist): # ==================================3============================================= # plot distance decay # ================================================================================ name = '<m>=%.2f' % f_MSmag fig1 = plt.figure(1) ax1 = plt.subplot(111) ax1.set_title(name) for k in [20, 50, 100, 200]: a_r_bin, a_dens = seis_utils.eqRate(dist, k) ax1.loglog(a_r_bin, a_dens, 'o', label=str(k), mew=0) #--------- plot -1.4 slope from felzer and Brodsky for comparison----------------- gamma = -1.4 selPeak = a_dens == a_dens.max() preFac = a_dens[selPeak] / (a_r_bin[selPeak]**gamma) ax1.plot(a_r_bin, preFac * a_r_bin**gamma, 'w-', lw=2.5) ax1.plot(a_r_bin, preFac * a_r_bin**gamma, 'k--', label='Dens ~ r ^ %.1f' % (gamma)) # ==================================4============================================= # highlight mainshock rupture dimension # ================================================================================ l0, sigma = 0.01, 0.44 f_L1 = l0 * 10**(sigma * f_MSmag) # Hainzl , Moradpour et al.
catChild.copy(eqCatMc) catParent.copy(eqCatMc) # catChild, catPar = create_parent_child_cat( projCat, dNND) catChild.selEventsFromID(dNND['aEqID_c'], repeats=True) catParent.selEventsFromID(dNND['aEqID_p'], repeats=True) print('size of parent catalog', catParent.size(), 'size of offspring cat', catChild.size()) ## l dsigma = 5e6 Mw = catParent.data['Mag'] M0 = 10**(1.5 * (Mw + 6.03)) l = 0.001 * (7 / 16 * M0 / dsigma)**(1 / 3) ## hd HD = dNND['aHD'] ## Rl aRl = HD / l print("haversine distance:", HD) print("rupture dimension", l) ###histogram aBins = np.arange(0, 30, dPar['eta_binsize']) aHist, aBins = np.histogram(aRl, aBins) aHist, aBins = np.array(list( zip(*zip(aHist, aBins)))) # cut to same length aRate_bin, aRate = seis_utils.eqRate(aRl, k) aHist /= dPar['eta_binsize'] # =================================4============================================== # plot histogram # ================================================================================ #myplot.plot_rate_pd(aBins, aHist, aRate_bin, aRate,curr_Mc,'a_hs',dPar['eta_binsize']) myplot.plot_loglog(aBins, aHist, aRate_bin, aRate, curr_Mc, 'b_hs', k)
'q': 0.35, 'd': 1.2, 'gamma': 0.6, } # =================================1============================================== # load catalog and select # ================================================================================ eqcat = EqCat() dCluster = data_utils.loadmat(input_file) a_MS_mag = dCluster['a_MS_mag'] a_AS_dist = dCluster['a_AS_dist'] a_AS_Rm = np.zeros(len(a_MS_mag)) # maxima of distance distribution # find the maxima of each set for i, dist in enumerate(a_AS_dist): aRateBin_HD, aRate_HD = seis_utils.eqRate(dist, 2 * int(len(dist) / 30)) # ,minK=70 aRate_HD /= len(dist) sel = aRateBin_HD > 0.1 aRateBin_HD = aRateBin_HD[sel] aRate_HD = aRate_HD[sel] iRm = np.argmax(aRate_HD) a_AS_Rm[i] = aRateBin_HD[iRm] # =================================3============================================== # fix single power law # ================================================================================ print(a_AS_Rm[-1]) sigma, lgC, __, __, __ = scipy.stats.linregress(a_MS_mag, np.log10(a_AS_Rm)) C = 10 ** lgC # unit: km--> m print("C:%.5f, sigma:%.2f" % (C, sigma))
dsigma = 5e6 Mw = catParent.data['Mag'] M0 = 10**(1.5 * (Mw + 6.03)) l = 0.001 * (7 / 16 * M0 / dsigma)**(1 / 3) ## hd HD = dNND['aHD'] ## Rl aRl = HD / l #print("haversine distance:", HD) #print("rupture dimension", l) ###histogram aBins = np.arange(0, 30, dPar['eta_binsize']) aHist, aBins = np.histogram(aRl, aBins) aHist, aBins = np.array(list( zip(*zip(aHist, aBins)))) # cut to same length aRate_bin, aRate = seis_utils.eqRate(aRl, k) aHist /= dPar['eta_binsize'] # ================================================================================ # bigger parent event pairs # ================================================================================ # select event pairs with parent event larger than M_pt sel = catParent.data['Mag'] >= dPar['M_pt'] catChild.selEventsFromID(dNND['aEqID_c'][sel], repeats=True) catParent.selEventsFromID(dNND['aEqID_p'][sel], repeats=True) print('after::: size of parent catalog', catParent.size(), 'size of offspring cat', catChild.size()) ## l Mw1 = catParent.data['Mag'] M01 = 10**(1.5 * (Mw1 + 6.03)) l1 = 0.001 * (7 / 16 * M01 / dsigma)**(1 / 3) ## hd
# ================================================================================ # select only the clustering event pairs sel_cl = np.log10(dNND['aNND']) <= -5 #-4.7 # HD HD = dNND['aHD'][sel_cl] print('catalog size: ', len(HD)) # ==================================3============================================= # plot distance decay # ================================================================================ fig1 = plt.figure(1) ax1 = plt.subplot(111) ax1.set_title(name) for k in [20, 50, 100, 200]: a_r_bin, a_dens = seis_utils.eqRate(dNND['aHD'], k) ax1.loglog(a_r_bin, a_dens, 'o', label=str(k), mew=0) #--------- plot -1.4 slope from felzer and Brodsky for comparison----------------- gamma = -1.4 selPeak = a_dens == a_dens.max() preFac = a_dens[selPeak] / (a_r_bin[selPeak]**gamma) ax1.plot(a_r_bin, preFac * a_r_bin**gamma, 'w-', lw=2.5) ax1.plot(a_r_bin, preFac * a_r_bin**gamma, 'k--', label='Dens ~ r ^ %.1f' % (gamma)) # ==================================4============================================= # highlight mainshock rupture dimension # ================================================================================ # find MS magnitude print(eqCat.data.keys(), asCat.data.keys())