def fit_a_value(bval, mrng, cum_rates, src_mmax, bin_width, midx): beta = bval2beta(bval) # get dummy curve dummyN0 = 1. #m_min_reg = src_mmin_reg[i] + bin_width/2. bc_tmp, bc_mrng = get_oq_incrementalMFD(beta, dummyN0, mrng[0], src_mmax, bin_width) # fit to lowest magnitude considered and observed Nminmag = cum_rates[midx][0] * (bc_tmp / bc_tmp[0]) # solve for N0 fn0 = 10**(log10(Nminmag[0]) + bval * bc_mrng[midx][0]) return fn0
def get_mfds(mvect, mxvect, tvect, dec_tvect, ev_dict, mcomps, ycomps, ymax, mrng, src_mmax, \ src_mmin_reg, src_bval_fix, src_bval_fix_sd, bin_width, poly): # remove incomplete events based on original preferred magnitudes (mxvect) mvect, mxvect, tvect, dec_tvect, ev_dict, out_idx, ev_out = \ remove_incomplete_events(mvect, mxvect, tvect, dec_tvect, ev_dict, mcomps, ycomps, bin_width) # get annualised rates using preferred MW (mvect) cum_rates, cum_num, bin_rates, n_obs, n_yrs = \ get_annualised_rates(mcomps, ycomps, mvect, mrng, bin_width, ymax) ############################################################################### # calculate MFDs if at least 50 events ############################################################################### # get index of min reg mag and valid mag bins diff_cum = abs(hstack((diff(cum_rates), 0.))) midx = where((mrng >= src_mmin_reg - bin_width / 2.) & (isfinite(diff_cum)))[0] # check if length of midx = 0 and get highest non-zero mag if len(midx) == 0: midx = [where(isfinite(diff_cum))[0][-1]] # make sure there is at least 4 observations for b-value calculations if len(midx) < 5: idxstart = midx[0] - 1 while idxstart >= 0 and len(midx) < 5: # if num observations greater than zero, add to midx if n_obs[idxstart] > 0: midx = hstack((idxstart, midx)) print ' get lower mag T', midx idxstart -= 1 # first, check if using fixed bval and fit curve using to solve for N0 if src_bval_fix > 0: print ' Using fixed b-value =', src_bval_fix, src_bval_fix_sd # set source beta bval = src_bval_fix beta = bval2beta(bval) sigb = src_bval_fix_sd sigbeta = bval2beta(sigb) # get dummy curve dummyN0 = 1. m_min_reg = src_mmin_reg + bin_width / 2. bc_tmp, bc_mrng = get_oq_incrementalMFD(beta, dummyN0, m_min_reg, src_mmax, bin_width) # fit to lowest mahnitude considered bc_lo100 = cum_rates[midx][0] * (bc_tmp / bc_tmp[0]) # scale for N0 fn0 = 10**(log10(bc_lo100[0]) + beta2bval(beta) * bc_mrng[0]) # do Aki ML first if N events less than 50 elif len(mvect) >= 50 and len(mvect) < 80: # do Aki max likelihood bval, sigb = aki_maximum_likelihood( mrng[midx] + bin_width / 2, n_obs[midx], 0.) # assume completeness taken care of beta = bval2beta(bval) sigbeta = bval2beta(sigb) # now recalc N0 dummyN0 = 1. bc_tmp, bc_mrng = get_oq_incrementalMFD(beta, dummyN0, mrng[0], src_mmax, bin_width) # fit to lowest magnitude considered and observed Nminmag = cum_rates[midx][0] * (bc_tmp / bc_tmp[0]) # !!!!!! check into why this must be done - I suspect it may be that there is an Mmax eq in the zones !!!! fidx = midx[0] # solve for N0 fn0 = 10**(log10(Nminmag[0]) + bval * bc_mrng[fidx]) print ' Aki ML b-value =', bval, sigb # do Weichert for zones with more events elif len(mvect) >= 80: # calculate weichert bval, sigb, a_m, siga_m, fn0, stdfn0 = weichert_algorithm(array(n_yrs[midx]), \ mrng[midx]+bin_width/2, n_obs[midx], mrate=0.0, \ bval=1.1, itstab=1E-4, maxiter=1000) beta = bval2beta(bval) sigbeta = bval2beta(sigb) print ' Weichert b-value = ', bval, sigb ############################################################################### # calculate MFDs using NSHA13_Background if fewer than 50 events ############################################################################### else: print 'Getting b-value from NSHA Background...' # set B-value to nan bval = nan # load Leonard zones lsf = shapefile.Reader( path.join('shapefiles', 'NSHA13_Background', 'NSHA13_Background_NSHA18_MFD.shp')) # get Leonard polygons l08_shapes = lsf.shapes() # get Leonard b-values lbval = get_field_data(lsf, 'BVAL_BEST', 'str') # get centroid of current poly clon, clat = get_shapely_centroid(poly) point = Point(clon, clat) # loop through zones and find point in poly for zone_bval, l_shape in zip(lbval, l08_shapes): l_poly = Polygon(l_shape.points) # check if leonard centroid in domains poly if point.within(l_poly): bval = float(zone_bval) # for those odd sites outside of L08 bounds, assign b-vale if isnan(bval): bval = 0.85 beta = bval2beta(bval) sigb = 0.1 sigbeta = bval2beta(sigb) # solve for N0 fn0 = fit_a_value(bval, mrng, cum_rates, src_mmax, bin_width, midx) print ' Leonard2008 b-value =', bval, sigb # get confidence intervals err_up, err_lo = get_confidence_intervals(n_obs, cum_rates) return bval, beta, sigb, sigbeta, fn0, cum_rates, ev_out, err_up, err_lo
# convert min Mx to MW mcompminml = around(ceil(mcompmin_ml * 10.) / 10., decimals=1) mcompminml = 2.95 mcompminmw = around(ceil(mcompmin_mw * 10.) / 10., decimals=1) mcompminmw = 2.95 mrng_mw = arange(mcompminmw - bin_width / 2, src_mmax[i], bin_width) mrng_ml = arange(mcompminml - bin_width / 2, src_mmax[i], bin_width) # set null values to avoid plotting issues later bval = 1. bval_sig = 0.1 new_bval_b[i] = 1.0 new_n0_b[i] = 1E-30 # set beta params beta = bval2beta(bval) sigbeta = bval2beta(bval_sig) # set polygons poly = polygons[i] # get area (in km**2) of sources for normalisation src_area.append(get_WGS84_area(poly)) ############################################################################### # set preferred catalogue for each source ############################################################################### # mw alt based om mw_ble; pref_mw based on mw_qds magKeys = ['mx_origML', 'mx_revML', 'mw_alt_ble', 'mw_alt_qde', 'mw_pref']
def get_mfds(mvect, mxvect, tvect, dec_tvect, ev_dict, mcomps, ycomps, ymax, mrng, src_mmax, \ src_mmin_reg, src_bval_fix, src_bval_fix_sd, bin_width, poly): # remove incomplete events based on original preferred magnitudes (mxvect) mvect, mxvect, tvect, dec_tvect, ev_dict, out_idx, ev_out = \ remove_incomplete_events(mvect, mxvect, tvect, dec_tvect, ev_dict, mcomps, ycomps, bin_width) # get annualised rates using preferred MW (mvect) cum_rates, cum_num, bin_rates, n_obs, n_yrs = \ get_annualised_rates(mcomps, ycomps, mvect, mrng, bin_width, ymax) print(' Number of events:', len(mvect)) #print(cum_rates ############################################################################### # calculate MFDs if at least 50 events ############################################################################### # get index of min reg mag and valid mag bins diff_cum = abs(hstack((diff(cum_rates), 0.))) midx = where((mrng >= src_mmin_reg - bin_width / 2.) & (isfinite(diff_cum)))[0] # check if length of midx = 0 and get highest non-zero mag if len(midx) == 0: midx = [where(isfinite(diff_cum))[0][-1]] # make sure there is at least 4 observations for b-value calculations if len(midx) < 5: idxstart = midx[0] - 1 while idxstart >= 0 and len(midx) < 5: # if num observations greater than zero, add to midx if n_obs[idxstart] > 0: midx = hstack((idxstart, midx)) print(' get lower mag T', midx) idxstart -= 1 # first, check if using fixed bval and fit curve using to solve for N0 if src_bval_fix > 0: print(' Using fixed b-value =', src_bval_fix, src_bval_fix_sd) # set source beta bval = src_bval_fix beta = bval2beta(bval) sigb = src_bval_fix_sd sigbeta = bval2beta(sigb) # get dummy curve dummyN0 = 1. m_min_reg = src_mmin_reg + bin_width / 2. bc_tmp, bc_mrng = get_oq_incrementalMFD(beta, dummyN0, m_min_reg, src_mmax, bin_width) # fit to lowest mahnitude considered bc_lo100 = cum_rates[midx][0] * (bc_tmp / bc_tmp[0]) # scale for N0 fn0 = 10**(log10(bc_lo100[0]) + beta2bval(beta) * bc_mrng[0]) # do Aki ML first if N events less than 80 elif len(mvect) >= 30 and len(mvect) < 80: # do Aki max likelihood bval, sigb = aki_maximum_likelihood( mrng[midx] + bin_width / 2, n_obs[midx], 0.) # assume completeness taken care of beta = bval2beta(bval) sigbeta = bval2beta(sigb) # now recalc N0 dummyN0 = 1. bc_tmp, bc_mrng = get_oq_incrementalMFD(beta, dummyN0, mrng[0], src_mmax, bin_width) # fit to lowest magnitude considered and observed Nminmag = cum_rates[midx][0] * (bc_tmp / bc_tmp[0]) # !!!!!! check into why this must be done - I suspect it may be that there is an Mmax eq in the zones !!!! fidx = midx[0] # solve for N0 fn0 = 10**(log10(Nminmag[0]) + bval * bc_mrng[fidx]) print(' Aki ML b-value =', bval, sigb) # do Weichert for zones with more events elif len(mvect) >= 80: # calculate weichert bval, sigb, a_m, siga_m, fn0, stdfn0 = weichert_algorithm(array(n_yrs[midx]), \ mrng[midx]+bin_width/2, n_obs[midx], mrate=0.0, \ bval=1.1, itstab=1E-4, maxiter=1000) beta = bval2beta(bval) sigbeta = bval2beta(sigb) print(' Weichert b-value =', str('%0.3f' % bval), str('%0.3f' % sigb)) ############################################################################### # calculate MFDs using NSHA13_Background if fewer than 50 events ############################################################################### else: print('Setting b-value to 1.0...') bval = 1.0 beta = bval2beta(bval) sigb = 0.1 sigbeta = bval2beta(sigb) # solve for N0 fn0 = fit_a_value(bval, mrng, cum_rates, src_mmax, bin_width, midx) print(' Automatic b-value =', bval, sigb) ############################################################################### # get confidence intervals ############################################################################### err_up, err_lo = get_confidence_intervals(n_obs, cum_rates) return bval, beta, sigb, sigbeta, fn0, cum_rates, ev_out, err_up, err_lo
# calculate MFDs` ############################################################################### # get magnitude indices being considered for regression midx = where(mrng >= src_mmin_reg[i])[0] # if beta not fixed, do Weichert if src_bval_fix[i] == -99: # calculate weichert bval, sigb, a_m, siga_m, fn0, stdfn0 = weichert_algorithm(array(n_yrs[midx]), \ mrng[midx]+bin_width/2, n_obs[midx], mrate=0.0, \ bval=1.0, itstab=1E-5, maxiter=1000) beta = bval2beta(bval) sigbeta = bval2beta(sigb) # else, fit curve using fixed beta and solve for N0 else: # set source beta beta = src_bval_fix[i] bval = beta2bval(beta) sigbeta = src_bval_fix_sd[i] # get dummy curve dummyN0 = 1. m_min_reg = src_mmin_reg[i] + bin_width / 2. bc_tmp, bc_mrng = get_oq_incrementalMFD(beta, dummyN0, m_min_reg, src_mmax[i], bin_width)
# calculate MFDs` ############################################################################### # get magnitude indices being considered for regression midx = where(mrng >= src_mmin_reg[i])[0] # if beta not fixed, do Weichert if src_beta_fix[i] == -99: # calculate weichert bval, sigb, a_m, siga_m, fn0, stdfn0 = weichert_algorithm(array(n_yrs[midx]), \ mrng[midx]+bin_width/2, n_obs[midx], mrate=0.0, \ bval=1.0, itstab=1E-5, maxiter=1000) beta = bval2beta(bval) sigbeta = bval2beta(sigb) # else, fit curve using fixed beta and solve for N0 else: # set source beta beta = src_beta_fix[i] bval = beta2bval(beta) sigbeta = src_beta_fix_sd[i] # get dummy curve dummyN0 = 1. bc_tmp, bc_mrng = get_oq_incrementalMFD(beta, dummyN0, src_mmin_reg[i], src_mmax[i], bin_width)