def common_dir_boot(dir1,dir2,bootsteps=1000,plot='no'): #test for a common direction using bootstrap method #directions given as pandas Series listing mean direction/pole parameters #this is assuming published means without input directions, so bootstrap is a little different from the one #described in Tauxe: not a pseudoselection from specified points but drawing from fisher distribution directly. #adds a level of abstraction which hopefully does not invalidate procedure BDI1,BDI2=[],[] for s in range(bootsteps): fpars1=pmag.fisher_mean(get_fish(dir1)) fpars2=pmag.fisher_mean(get_fish(dir2)) BDI1.append([fpars1['dec'],fpars1['inc']]) BDI2.append([fpars2['dec'],fpars2['inc']]) bounds1=get_bounds(BDI1) bounds2=get_bounds(BDI2) #now want to check if is a pass or a fail - only pass if error bounds overlap in x,y,and z bresult=[] for b1,b2 in zip(bounds1,bounds2): if (b1[0]>b2[1] or b1[1]<b2[0]): bresult.append(0) else: bresult.append(1) if sum(bresult)==3: outcome='Pass' else: outcome='Fail' angle=pmag.angle((dir1['dec'],dir1['inc']),(dir2['dec'],dir2['inc'])) # set up plots - can run this if want to visually check what's going on. if plot=='yes': CDF={'X':1,'Y':2,'Z':3} pmagplotlib.plot_init(CDF['X'],4,4) pmagplotlib.plot_init(CDF['Y'],4,4) pmagplotlib.plot_init(CDF['Z'],4,4) # draw the cdfs pmagplotlib.plotCOM(CDF,BDI1,BDI2,["",""]) pmagplotlib.drawFIGS(CDF) return pd.Series([outcome,angle[0]],index=['Outcome','angle'])
def main(): """ NAME gofish.py DESCRIPTION calculates fisher parameters from dec inc data INPUT FORMAT takes dec/inc as first two columns in space delimited file SYNTAX gofish.py [options] [< filename] OPTIONS -h prints help message and quits -i for interactive filename entry -f FILE, specify input file -F FILE, specifies output file name < filename for reading from standard input OUTPUT mean dec, mean inc, N, R, k, a95, csd """ if '-h' in sys.argv: # check if help is needed print main.__doc__ sys.exit() # graceful quit if '-i' in sys.argv: # ask for filename file=raw_input("Enter file name with dec, inc data: ") f=open(file,'rU') data=f.readlines() elif '-f' in sys.argv: dat=[] ind=sys.argv.index('-f') file=sys.argv[ind+1] f=open(file,'rU') data=f.readlines() else: data = sys.stdin.readlines() # read from standard input ofile = "" if '-F' in sys.argv: ind = sys.argv.index('-F') ofile= sys.argv[ind+1] out = open(ofile, 'w + a') DIs= [] # set up list for dec inc data for line in data: # read in the data from standard input if '\t' in line: rec=line.split('\t') # split each line on space to get records else: rec=line.split() # split each line on space to get records DIs.append((float(rec[0]),float(rec[1]))) # fpars=pmag.fisher_mean(DIs) outstring='%7.1f %7.1f %i %10.4f %8.1f %7.1f %7.1f'%(fpars['dec'],fpars['inc'],fpars['n'],fpars['r'],fpars['k'],fpars['alpha95'], fpars['csd']) if ofile == "": print outstring else: out.write(outstring+'\n')
def main(): """ NAME probRo.py DESCRIPTION Calculates Ro as a test for randomness - if R exceeds Ro given N, then set is not random at 95% level of confindence SYNTAX probRo.py [command line options] OPTIONS -h prints this help message -Nm number of Monte Carlo simulations (default is 10000) -Nmax maximum number for dataset (default is 10) """ Ns,Nm=range(4,11),10000 files,fmt={},'svg' if '-h' in sys.argv: # check if help is needed print main.__doc__ sys.exit() # graceful quit if '-Nm' in sys.argv: ind=sys.argv.index('-Nm') Nm=int(sys.argv[ind+1]) if '-Nmax' in sys.argv: ind=sys.argv.index('-Nmax') Nmax=int(sys.argv[ind+1]) Ns=range(3,Nmax+1) PLT={'plt':1} pmagplotlib.plot_init(PLT['plt'],5,5) Ro=[] for N in Ns: Rs=[] n=0 while n<Nm: dirs=pmag.get_unf(N) pars=pmag.fisher_mean(dirs) Rs.append(pars['r']) n+=1 Rs.sort() crit=int(.95*Nm) Ro.append(Rs[crit]) pmagplotlib.plotXY(PLT['plt'],Ns,Ro,'-','N','Ro','') pmagplotlib.plotXY(PLT['plt'],Ns,Ro,'ro','N','Ro','') pmagplotlib.drawFIGS(PLT) for key in PLT.keys(): files[key]=key+'.'+fmt ans=raw_input(" S[a]ve to save plot, [q]uit without saving: ") if ans=="a": pmagplotlib.saveP(PLT,files)
def main(): """ NAME watsonsV.py DESCRIPTION calculates Watson's V statistic from input files INPUT FORMAT takes dec/inc as first two columns in two space delimited files SYNTAX watsonsV.py [command line options] OPTIONS -h prints help message and quits -f FILE (with optional second) -f2 FILE (second file) -ant, flip antipodal directions to opposite direction in first file if only one file or flip all in second, if two files -P (don't save or show plot) -sav save figure and quit silently -fmt [png,svg,eps,pdf,jpg] format for saved figure OUTPUT Watson's V and the Monte Carlo Critical Value Vc. in plot, V is solid and Vc is dashed. """ Flip=0 show,plot=1,0 fmt='svg' file2="" if '-h' in sys.argv: # check if help is needed print main.__doc__ sys.exit() # graceful quit if '-ant' in sys.argv: Flip=1 if '-sav' in sys.argv: show,plot=0,1 # don't display, but do save plot if '-fmt' in sys.argv: ind=sys.argv.index('-fmt') fmt=sys.argv[ind+1] if '-P' in sys.argv: show=0 # don't display or save plot if '-f' in sys.argv: ind=sys.argv.index('-f') file1=sys.argv[ind+1] data=numpy.loadtxt(file1).transpose() D1=numpy.array([data[0],data[1]]).transpose() else: print "-f is required" print main.__doc__ sys.exit() if '-f2' in sys.argv: ind=sys.argv.index('-f2') file2=sys.argv[ind+1] data2=numpy.loadtxt(file2).transpose() D2=numpy.array([data2[0],data2[1]]).transpose() if Flip==1: D2,D=pmag.flip(D2) # D2 are now flipped if len(D2)!=0: if len(D)!=0: D2=numpy.concatenate(D,D2) # put all in D2 elif len(D)!=0: D2=D else: print 'length of second file is zero' sys.exit() elif Flip==1:D2,D1=pmag.flip(D1) # peel out antipodal directions, put in D2 # counter,NumSims=0,5000 # # first calculate the fisher means and cartesian coordinates of each set of Directions # pars_1=pmag.fisher_mean(D1) pars_2=pmag.fisher_mean(D2) # # get V statistic for these # V=pmag.vfunc(pars_1,pars_2) # # do monte carlo simulation of datasets with same kappas, but common mean # Vp=[] # set of Vs from simulations if show==1:print "Doing ",NumSims," simulations" for k in range(NumSims): counter+=1 if counter==50: if show==1:print k+1 counter=0 Dirp=[] # get a set of N1 fisher distributed vectors with k1, calculate fisher stats for i in range(pars_1["n"]): Dirp.append(pmag.fshdev(pars_1["k"])) pars_p1=pmag.fisher_mean(Dirp) # get a set of N2 fisher distributed vectors with k2, calculate fisher stats Dirp=[] for i in range(pars_2["n"]): Dirp.append(pmag.fshdev(pars_2["k"])) pars_p2=pmag.fisher_mean(Dirp) # get the V for these Vk=pmag.vfunc(pars_p1,pars_p2) Vp.append(Vk) # # sort the Vs, get Vcrit (95th one) # Vp.sort() k=int(.95*NumSims) if show==1: print "Watson's V, Vcrit: " print ' %10.1f %10.1f'%(V,Vp[k]) if show==1 or plot==1: CDF={'cdf':1} pmagplotlib.plot_init(CDF['cdf'],5,5) pmagplotlib.plotCDF(CDF['cdf'],Vp,"Watson's V",'r',"") pmagplotlib.plotVs(CDF['cdf'],[V],'g','-') pmagplotlib.plotVs(CDF['cdf'],[Vp[k]],'b','--') if plot==0:pmagplotlib.drawFIGS(CDF) files={} if file2!="": files['cdf']='WatsonsV_'+file1+'_'+file2+'.'+fmt else: files['cdf']='WatsonsV_'+file1+'.'+fmt if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles={} titles['cdf']='Cumulative Distribution' CDF = pmagplotlib.addBorders(CDF,titles,black,purple) pmagplotlib.saveP(CDF,files) elif plot==0: ans=raw_input(" S[a]ve to save plot, [q]uit without saving: ") if ans=="a": pmagplotlib.saveP(CDF,files) if plot==1: # save and quit silently pmagplotlib.saveP(CDF,files)
def main(): """ NAME plotdi_e.py DESCRIPTION plots equal area projection from dec inc data and cones of confidence (Fisher, kent or Bingham or bootstrap). INPUT FORMAT takes dec/inc as first two columns in space delimited file SYNTAX plotdi_e.py [command line options] OPTIONS -h prints help message and quits -i for interactive parameter entry -f FILE, sets input filename on command line -Fish plots unit vector mean direction, alpha95 -Bing plots Principal direction, Bingham confidence ellipse -Kent plots unit vector mean direction, confidence ellipse -Boot E plots unit vector mean direction, bootstrapped confidence ellipse -Boot V plots unit vector mean direction, distribution of bootstrapped means """ dist='F' # default distribution is Fisherian mode=1 EQ={'eq':1} if len(sys.argv) > 0: if '-h' in sys.argv: # check if help is needed print main.__doc__ sys.exit() # graceful quit if '-i' in sys.argv: # ask for filename file=raw_input("Enter file name with dec, inc data: ") dist=raw_input("Enter desired distrubution: [Fish]er, [Bing]ham, [Kent] [Boot] [default is Fisher]: ") if dist=="":dist="F" if dist=="Boot": type=raw_input(" Ellipses or distribution of vectors? [E]/V ") if type=="" or type=="E": dist="BE" else: dist="BE" else: # if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] else: print 'you must specify a file name' print main.__doc__ sys.exit() if '-Bing' in sys.argv:dist='B' if '-Kent' in sys.argv:dist='K' if '-Boot' in sys.argv: ind=sys.argv.index('-Boot') type=sys.argv[ind+1] if type=='E': dist='BE' elif type=='V': dist='BV' EQ['bdirs']=2 pmagplotlib.plot_init(EQ['bdirs'],5,5) else: print main.__doc__ sys.exit() pmagplotlib.plot_init(EQ['eq'],5,5) # # get to work f=open(file,'r') data=f.readlines() # DIs= [] # set up list for dec inc data DiRecs=[] pars=[] nDIs,rDIs,npars,rpars=[],[],[],[] mode =1 for line in data: # read in the data from standard input DiRec={} rec=line.split() # split each line on space to get records DIs.append((float(rec[0]),float(rec[1]),1.)) DiRec['dec']=rec[0] DiRec['inc']=rec[1] DiRec['direction_type']='l' DiRecs.append(DiRec) # split into two modes ppars=pmag.doprinc(DIs) # get principal directions for rec in DIs: angle=pmag.angle([rec[0],rec[1]],[ppars['dec'],ppars['inc']]) if angle>90.: rDIs.append(rec) else: nDIs.append(rec) if dist=='B': # do on whole dataset title="Bingham confidence ellipse" bpars=pmag.dobingham(DIs) for key in bpars.keys(): if key!='n':print " ",key, '%7.1f'%(bpars[key]) if key=='n':print " ",key, ' %i'%(bpars[key]) npars.append(bpars['dec']) npars.append(bpars['inc']) npars.append(bpars['Zeta']) npars.append(bpars['Zdec']) npars.append(bpars['Zinc']) npars.append(bpars['Eta']) npars.append(bpars['Edec']) npars.append(bpars['Einc']) if dist=='F': title="Fisher confidence cone" if len(nDIs)>3: fpars=pmag.fisher_mean(nDIs) print "mode ",mode for key in fpars.keys(): if key!='n':print " ",key, '%7.1f'%(fpars[key]) if key=='n':print " ",key, ' %i'%(fpars[key]) mode+=1 npars.append(fpars['dec']) npars.append(fpars['inc']) npars.append(fpars['alpha95']) # Beta npars.append(fpars['dec']) isign=abs(fpars['inc'])/fpars['inc'] npars.append(fpars['inc']-isign*90.) #Beta inc npars.append(fpars['alpha95']) # gamma npars.append(fpars['dec']+90.) # Beta dec npars.append(0.) #Beta inc if len(rDIs)>3: fpars=pmag.fisher_mean(rDIs) print "mode ",mode for key in fpars.keys(): if key!='n':print " ",key, '%7.1f'%(fpars[key]) if key=='n':print " ",key, ' %i'%(fpars[key]) mode+=1 rpars.append(fpars['dec']) rpars.append(fpars['inc']) rpars.append(fpars['alpha95']) # Beta rpars.append(fpars['dec']) isign=abs(fpars['inc'])/fpars['inc'] rpars.append(fpars['inc']-isign*90.) #Beta inc rpars.append(fpars['alpha95']) # gamma rpars.append(fpars['dec']+90.) # Beta dec rpars.append(0.) #Beta inc if dist=='K': title="Kent confidence ellipse" if len(nDIs)>3: kpars=pmag.dokent(nDIs,len(nDIs)) print "mode ",mode for key in kpars.keys(): if key!='n':print " ",key, '%7.1f'%(kpars[key]) if key=='n':print " ",key, ' %i'%(kpars[key]) mode+=1 npars.append(kpars['dec']) npars.append(kpars['inc']) npars.append(kpars['Zeta']) npars.append(kpars['Zdec']) npars.append(kpars['Zinc']) npars.append(kpars['Eta']) npars.append(kpars['Edec']) npars.append(kpars['Einc']) if len(rDIs)>3: kpars=pmag.dokent(rDIs,len(rDIs)) print "mode ",mode for key in kpars.keys(): if key!='n':print " ",key, '%7.1f'%(kpars[key]) if key=='n':print " ",key, ' %i'%(kpars[key]) mode+=1 rpars.append(kpars['dec']) rpars.append(kpars['inc']) rpars.append(kpars['Zeta']) rpars.append(kpars['Zdec']) rpars.append(kpars['Zinc']) rpars.append(kpars['Eta']) rpars.append(kpars['Edec']) rpars.append(kpars['Einc']) else: # assume bootstrap if dist=='BE': if len(nDIs)>5: BnDIs=pmag.di_boot(nDIs) Bkpars=pmag.dokent(BnDIs,1.) print "mode ",mode for key in Bkpars.keys(): if key!='n':print " ",key, '%7.1f'%(Bkpars[key]) if key=='n':print " ",key, ' %i'%(Bkpars[key]) mode+=1 npars.append(Bkpars['dec']) npars.append(Bkpars['inc']) npars.append(Bkpars['Zeta']) npars.append(Bkpars['Zdec']) npars.append(Bkpars['Zinc']) npars.append(Bkpars['Eta']) npars.append(Bkpars['Edec']) npars.append(Bkpars['Einc']) if len(rDIs)>5: BrDIs=pmag.di_boot(rDIs) Bkpars=pmag.dokent(BrDIs,1.) print "mode ",mode for key in Bkpars.keys(): if key!='n':print " ",key, '%7.1f'%(Bkpars[key]) if key=='n':print " ",key, ' %i'%(Bkpars[key]) mode+=1 rpars.append(Bkpars['dec']) rpars.append(Bkpars['inc']) rpars.append(Bkpars['Zeta']) rpars.append(Bkpars['Zdec']) rpars.append(Bkpars['Zinc']) rpars.append(Bkpars['Eta']) rpars.append(Bkpars['Edec']) rpars.append(Bkpars['Einc']) title="Bootstrapped confidence ellipse" elif dist=='BV': if len(nDIs)>5: pmagplotlib.plotEQ(EQ['eq'],nDIs,'Data') BnDIs=pmag.di_boot(nDIs) pmagplotlib.plotEQ(EQ['bdirs'],BnDIs,'Bootstrapped Eigenvectors') if len(rDIs)>5: BrDIs=pmag.di_boot(rDIs) if len(nDIs)>5: # plot on existing plots pmagplotlib.plotDI(EQ['eq'],rDIs) pmagplotlib.plotDI(EQ['bdirs'],BrDIs) else: pmagplotlib.plotEQ(EQ['eq'],rDIs,'Data') pmagplotlib.plotEQ(EQ['bdirs'],BrDIs,'Bootstrapped Eigenvectors') pmagplotlib.drawFIGS(EQ) ans=raw_input('s[a]ve, [q]uit ') if ans=='q':sys.exit() if ans=='a': files={} for key in EQ.keys(): files[key]='BE_'+key+'.svg' pmagplotlib.saveP(EQ,files) sys.exit() if len(nDIs)>5: pmagplotlib.plotCONF(EQ['eq'],title,DiRecs,npars,1) if len(rDIs)>5 and dist!='B': pmagplotlib.plotCONF(EQ['eq'],title,[],rpars,0) elif len(rDIs)>5 and dist!='B': pmagplotlib.plotCONF(EQ['eq'],title,DiRecs,rpars,1) pmagplotlib.drawFIGS(EQ) ans=raw_input('s[a]ve, [q]uit ') if ans=='q':sys.exit() if ans=='a': files={} for key in EQ.keys(): files[key]=key+'.svg' pmagplotlib.saveP(EQ,files)
def watson_common_mean(Data1,Data2,NumSims=5000,plot='no'): """ Conduct a Watson V test for a common mean on two declination, inclination data sets This function calculates Watson's V statistic from input files through Monte Carlo simulation in order to test whether two populations of directional data could have been drawn from a common mean. The critical angle between the two sample mean directions and the corresponding McFadden and McElhinny (1990) classification is printed. Required Arguments ---------- Data1 : a list of directional data [dec,inc] Data2 : a list of directional data [dec,inc] Optional Arguments ---------- NumSims : number of Monte Carlo simulations (default is 5000) plot : the default is no plot ('no'). Putting 'yes' will the plot the CDF from the Monte Carlo simulations. """ pars_1=pmag.fisher_mean(Data1) pars_2=pmag.fisher_mean(Data2) cart_1=pmag.dir2cart([pars_1["dec"],pars_1["inc"],pars_1["r"]]) cart_2=pmag.dir2cart([pars_2['dec'],pars_2['inc'],pars_2["r"]]) Sw=pars_1['k']*pars_1['r']+pars_2['k']*pars_2['r'] # k1*r1+k2*r2 xhat_1=pars_1['k']*cart_1[0]+pars_2['k']*cart_2[0] # k1*x1+k2*x2 xhat_2=pars_1['k']*cart_1[1]+pars_2['k']*cart_2[1] # k1*y1+k2*y2 xhat_3=pars_1['k']*cart_1[2]+pars_2['k']*cart_2[2] # k1*z1+k2*z2 Rw=np.sqrt(xhat_1**2+xhat_2**2+xhat_3**2) V=2*(Sw-Rw) # keep weighted sum for later when determining the "critical angle" # let's save it as Sr (notation of McFadden and McElhinny, 1990) Sr=Sw # do monte carlo simulation of datasets with same kappas as data, # but a common mean counter=0 Vp=[] # set of Vs from simulations for k in range(NumSims): # get a set of N1 fisher distributed vectors with k1, # calculate fisher stats Dirp=[] for i in range(pars_1["n"]): Dirp.append(pmag.fshdev(pars_1["k"])) pars_p1=pmag.fisher_mean(Dirp) # get a set of N2 fisher distributed vectors with k2, # calculate fisher stats Dirp=[] for i in range(pars_2["n"]): Dirp.append(pmag.fshdev(pars_2["k"])) pars_p2=pmag.fisher_mean(Dirp) # get the V for these Vk=pmag.vfunc(pars_p1,pars_p2) Vp.append(Vk) # sort the Vs, get Vcrit (95th percentile one) Vp.sort() k=int(.95*NumSims) Vcrit=Vp[k] # equation 18 of McFadden and McElhinny, 1990 calculates the critical # value of R (Rwc) Rwc=Sr-(Vcrit/2) # following equation 19 of McFadden and McElhinny (1990) the critical # angle is calculated. If the observed angle (also calculated below) # between the data set means exceeds the critical angle the hypothesis # of a common mean direction may be rejected at the 95% confidence # level. The critical angle is simply a different way to present # Watson's V parameter so it makes sense to use the Watson V parameter # in comparison with the critical value of V for considering the test # results. What calculating the critical angle allows for is the # classification of McFadden and McElhinny (1990) to be made # for data sets that are consistent with sharing a common mean. k1=pars_1['k'] k2=pars_2['k'] R1=pars_1['r'] R2=pars_2['r'] critical_angle=np.degrees(np.arccos(((Rwc**2)-((k1*R1)**2) -((k2*R2)**2))/ (2*k1*R1*k2*R2))) D1=(pars_1['dec'],pars_1['inc']) D2=(pars_2['dec'],pars_2['inc']) angle=pmag.angle(D1,D2) print "Results of Watson V test: " print "" print "Watson's V: " '%.1f' %(V) print "Critical value of V: " '%.1f' %(Vcrit) if V<Vcrit: print '"Pass": Since V is less than Vcrit, the null hypothesis' print 'that the two populations are drawn from distributions' print 'that share a common mean direction can not be rejected.' elif V>Vcrit: print '"Fail": Since V is greater than Vcrit, the two means can' print 'be distinguished at the 95% confidence level.' print "" print "M&M1990 classification:" print "" print "Angle between data set means: " '%.1f'%(angle) print "Critical angle for M&M1990: " '%.1f'%(critical_angle) if V>Vcrit: print "" elif V<Vcrit: if critical_angle<5: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'A'" elif critical_angle<10: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'B'" elif critical_angle<20: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'C'" else: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'INDETERMINATE;" if plot=='yes': CDF={'cdf':1} #pmagplotlib.plot_init(CDF['cdf'],5,5) p1 = pmagplotlib.plotCDF(CDF['cdf'],Vp,"Watson's V",'r',"") p2 = pmagplotlib.plotVs(CDF['cdf'],[V],'g','-') p3 = pmagplotlib.plotVs(CDF['cdf'],[Vp[k]],'b','--') pmagplotlib.drawFIGS(CDF)
def sb_vgp_calc(dataframe): """ This function calculates the angular dispersion of VGPs and corrects for within site dispersion to return a value S_b. The input data needs to be within a pandas Dataframe. The function also plots the VGPs and their associated Fisher mean on a projection centered on the mean. Parameters ----------- dataframe : the name of the pandas.DataFrame containing the data the data frame needs to contain these columns: dataframe['site_lat'] : latitude of the site dataframe['site_lon'] : longitude of the site ---- changed to site_long so that it works for a given DF dataframe['inc_tc'] : tilt-corrected inclination dataframe['dec_tc'] : tilt-corrected declination dataframe['k'] : fisher precision parameter for directions dataframe['vgp_lat'] : VGP latitude ---- changed to pole_lat so that it works for a given DF dataframe['vgp_lon'] : VGP longitude ---- changed to pole_long so that it works for a given DF """ # calculate the mean from the directional data dataframe_dirs=[] for n in range(0,len(dataframe)): dataframe_dirs.append([dataframe['dec_tc'][n], dataframe['inc_tc'][n],1.]) dataframe_dir_mean=pmag.fisher_mean(dataframe_dirs) # calculate the mean from the vgp data dataframe_poles=[] dataframe_pole_lats=[] dataframe_pole_lons=[] for n in range(0,len(dataframe)): dataframe_poles.append([dataframe['pole_long'][n], dataframe['pole_lat'][n],1.]) dataframe_pole_lats.append(dataframe['pole_lat'][n]) dataframe_pole_lons.append(dataframe['pole_long'][n]) dataframe_pole_mean=pmag.fisher_mean(dataframe_poles) # calculate mean paleolatitude from the directional data dataframe['paleolatitude']=lat_from_inc(dataframe_dir_mean['inc']) angle_list=[] for n in range(0,len(dataframe)): angle=pmag.angle([dataframe['pole_long'][n],dataframe['pole_lat'][n]], [dataframe_pole_mean['dec'],dataframe_pole_mean['inc']]) angle_list.append(angle[0]) dataframe['delta_mean-pole']=angle_list # use eq. 2 of Cox (1970) to translate the directional precision parameter # into pole coordinates using the assumption of a Fisherian distribution in # directional coordinates and the paleolatitude as calculated from mean # inclination using the dipole equation dataframe['K']=dataframe['k']/(0.125*(5+18*np.sin(np.deg2rad(dataframe['paleolatitude']))**2 +9*np.sin(np.deg2rad(dataframe['paleolatitude']))**4)) dataframe['Sw']=81/(dataframe['K']**0.5) summation=0 N=0 for n in range(0,len(dataframe)): quantity=dataframe['delta_mean-pole'][n]**2-dataframe['Sw'][n]**2/np.float(dataframe['n'][n]) summation+=quantity N+=1 Sb=((1.0/(N-1.0))*summation)**0.5 plt.figure(figsize=(6, 6)) m = Basemap(projection='ortho',lat_0=dataframe_pole_mean['inc'],lon_0=dataframe_pole_mean['dec'],resolution='c',area_thresh=50000) m.drawcoastlines(linewidth=0.25) m.fillcontinents(color='bisque',lake_color='white',zorder=1) m.drawmapboundary(fill_color='white') m.drawmeridians(np.arange(0,360,30)) m.drawparallels(np.arange(-90,90,30)) plot_vgp(m,dataframe_pole_lons,dataframe_pole_lats,dataframe_pole_lons) plot_pole(m,dataframe_pole_mean['dec'],dataframe_pole_mean['inc'], dataframe_pole_mean['alpha95'],color='r',marker='s') return Sb
def main(): """ NAME eqarea_ell.py DESCRIPTION makes equal area projections from declination/inclination data and plot ellipses SYNTAX eqarea_ell.py -h [command line options] INPUT takes space delimited Dec/Inc data OPTIONS -h prints help message and quits -f FILE -fmt [svg,png,jpg] format for output plots -sav saves figures and quits -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors """ FIG = {} # plot dictionary FIG['eq'] = 1 # eqarea is figure 1 fmt, dist, mode, plot = 'svg', 'F', 1, 0 sym = {'lower': ['o', 'r'], 'upper': ['o', 'w'], 'size': 10} plotE = 0 if '-h' in sys.argv: print main.__doc__ sys.exit() pmagplotlib.plot_init(FIG['eq'], 5, 5) if '-sav' in sys.argv: plot = 1 if '-f' in sys.argv: ind = sys.argv.index("-f") title = sys.argv[ind + 1] data = numpy.loadtxt(title).transpose() if '-ell' in sys.argv: plotE = 1 ind = sys.argv.index('-ell') ell_type = sys.argv[ind + 1] if ell_type == 'F': dist = 'F' if ell_type == 'K': dist = 'K' if ell_type == 'B': dist = 'B' if ell_type == 'Be': dist = 'BE' if ell_type == 'Bv': dist = 'BV' FIG['bdirs'] = 2 pmagplotlib.plot_init(FIG['bdirs'], 5, 5) if '-fmt' in sys.argv: ind = sys.argv.index("-fmt") fmt = sys.argv[ind + 1] DIblock = numpy.array([data[0], data[1]]).transpose() if len(DIblock) > 0: pmagplotlib.plotEQsym(FIG['eq'], DIblock, title, sym) if plot == 0: pmagplotlib.drawFIGS(FIG) else: print "no data to plot" sys.exit() if plotE == 1: ppars = pmag.doprinc(DIblock) # get principal directions nDIs, rDIs, npars, rpars = [], [], [], [] for rec in DIblock: angle = pmag.angle([rec[0], rec[1]], [ppars['dec'], ppars['inc']]) if angle > 90.: rDIs.append(rec) else: nDIs.append(rec) if dist == 'B': # do on whole dataset etitle = "Bingham confidence ellipse" bpars = pmag.dobingham(DIblock) for key in bpars.keys(): if key != 'n' and pmagplotlib.verbose: print " ", key, '%7.1f' % (bpars[key]) if key == 'n' and pmagplotlib.verbose: print " ", key, ' %i' % (bpars[key]) npars.append(bpars['dec']) npars.append(bpars['inc']) npars.append(bpars['Zeta']) npars.append(bpars['Zdec']) npars.append(bpars['Zinc']) npars.append(bpars['Eta']) npars.append(bpars['Edec']) npars.append(bpars['Einc']) if dist == 'F': etitle = "Fisher confidence cone" if len(nDIs) > 3: fpars = pmag.fisher_mean(nDIs) for key in fpars.keys(): if key != 'n' and pmagplotlib.verbose: print " ", key, '%7.1f' % (fpars[key]) if key == 'n' and pmagplotlib.verbose: print " ", key, ' %i' % (fpars[key]) mode += 1 npars.append(fpars['dec']) npars.append(fpars['inc']) npars.append(fpars['alpha95']) # Beta npars.append(fpars['dec']) isign = abs(fpars['inc']) / fpars['inc'] npars.append(fpars['inc'] - isign * 90.) #Beta inc npars.append(fpars['alpha95']) # gamma npars.append(fpars['dec'] + 90.) # Beta dec npars.append(0.) #Beta inc if len(rDIs) > 3: fpars = pmag.fisher_mean(rDIs) if pmagplotlib.verbose: print "mode ", mode for key in fpars.keys(): if key != 'n' and pmagplotlib.verbose: print " ", key, '%7.1f' % (fpars[key]) if key == 'n' and pmagplotlib.verbose: print " ", key, ' %i' % (fpars[key]) mode += 1 rpars.append(fpars['dec']) rpars.append(fpars['inc']) rpars.append(fpars['alpha95']) # Beta rpars.append(fpars['dec']) isign = abs(fpars['inc']) / fpars['inc'] rpars.append(fpars['inc'] - isign * 90.) #Beta inc rpars.append(fpars['alpha95']) # gamma rpars.append(fpars['dec'] + 90.) # Beta dec rpars.append(0.) #Beta inc if dist == 'K': etitle = "Kent confidence ellipse" if len(nDIs) > 3: kpars = pmag.dokent(nDIs, len(nDIs)) if pmagplotlib.verbose: print "mode ", mode for key in kpars.keys(): if key != 'n' and pmagplotlib.verbose: print " ", key, '%7.1f' % (kpars[key]) if key == 'n' and pmagplotlib.verbose: print " ", key, ' %i' % (kpars[key]) mode += 1 npars.append(kpars['dec']) npars.append(kpars['inc']) npars.append(kpars['Zeta']) npars.append(kpars['Zdec']) npars.append(kpars['Zinc']) npars.append(kpars['Eta']) npars.append(kpars['Edec']) npars.append(kpars['Einc']) if len(rDIs) > 3: kpars = pmag.dokent(rDIs, len(rDIs)) if pmagplotlib.verbose: print "mode ", mode for key in kpars.keys(): if key != 'n' and pmagplotlib.verbose: print " ", key, '%7.1f' % (kpars[key]) if key == 'n' and pmagplotlib.verbose: print " ", key, ' %i' % (kpars[key]) mode += 1 rpars.append(kpars['dec']) rpars.append(kpars['inc']) rpars.append(kpars['Zeta']) rpars.append(kpars['Zdec']) rpars.append(kpars['Zinc']) rpars.append(kpars['Eta']) rpars.append(kpars['Edec']) rpars.append(kpars['Einc']) else: # assume bootstrap if len(nDIs) < 10 and len(rDIs) < 10: print 'too few data points for bootstrap' sys.exit() if dist == 'BE': print 'Be patient for bootstrap...' if len(nDIs) >= 10: BnDIs = pmag.di_boot(nDIs) Bkpars = pmag.dokent(BnDIs, 1.) if pmagplotlib.verbose: print "mode ", mode for key in Bkpars.keys(): if key != 'n' and pmagplotlib.verbose: print " ", key, '%7.1f' % (Bkpars[key]) if key == 'n' and pmagplotlib.verbose: print " ", key, ' %i' % (Bkpars[key]) mode += 1 npars.append(Bkpars['dec']) npars.append(Bkpars['inc']) npars.append(Bkpars['Zeta']) npars.append(Bkpars['Zdec']) npars.append(Bkpars['Zinc']) npars.append(Bkpars['Eta']) npars.append(Bkpars['Edec']) npars.append(Bkpars['Einc']) if len(rDIs) >= 10: BrDIs = pmag.di_boot(rDIs) Bkpars = pmag.dokent(BrDIs, 1.) if pmagplotlib.verbose: print "mode ", mode for key in Bkpars.keys(): if key != 'n' and pmagplotlib.verbose: print " ", key, '%7.1f' % (Bkpars[key]) if key == 'n' and pmagplotlib.verbose: print " ", key, ' %i' % (Bkpars[key]) mode += 1 rpars.append(Bkpars['dec']) rpars.append(Bkpars['inc']) rpars.append(Bkpars['Zeta']) rpars.append(Bkpars['Zdec']) rpars.append(Bkpars['Zinc']) rpars.append(Bkpars['Eta']) rpars.append(Bkpars['Edec']) rpars.append(Bkpars['Einc']) etitle = "Bootstrapped confidence ellipse" elif dist == 'BV': print 'Be patient for bootstrap...' vsym = {'lower': ['+', 'k'], 'upper': ['x', 'k'], 'size': 5} if len(nDIs) > 5: BnDIs = pmag.di_boot(nDIs) pmagplotlib.plotEQsym(FIG['bdirs'], BnDIs, 'Bootstrapped Eigenvectors', vsym) if len(rDIs) > 5: BrDIs = pmag.di_boot(rDIs) if len(nDIs) > 5: # plot on existing plots pmagplotlib.plotDIsym(FIG['bdirs'], BrDIs, vsym) else: pmagplotlib.plotEQ(FIG['bdirs'], BrDIs, 'Bootstrapped Eigenvectors', vsym) if dist == 'B': if len(nDIs) > 3 or len(rDIs) > 3: pmagplotlib.plotCONF(FIG['eq'], etitle, [], npars, 0) elif len(nDIs) > 3 and dist != 'BV': pmagplotlib.plotCONF(FIG['eq'], etitle, [], npars, 0) if len(rDIs) > 3: pmagplotlib.plotCONF(FIG['eq'], etitle, [], rpars, 0) elif len(rDIs) > 3 and dist != 'BV': pmagplotlib.plotCONF(FIG['eq'], etitle, [], rpars, 0) if plot == 0: pmagplotlib.drawFIGS(FIG) if plot == 0: pmagplotlib.drawFIGS(FIG) # files = {} for key in FIG.keys(): files[key] = title + '_' + key + '.' + fmt if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles = {} titles['eq'] = 'Equal Area Plot' FIG = pmagplotlib.addBorders(FIG, titles, black, purple) pmagplotlib.saveP(FIG, files) elif plot == 0: ans = raw_input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans == "q": sys.exit() if ans == "a": pmagplotlib.saveP(FIG, files) else: pmagplotlib.saveP(FIG, files)
def main(): """ NAME revtest_MM1990.py DESCRIPTION calculates Watson's V statistic from input files through Monte Carlo simulation in order to test whether normal and reversed populations could have been drawn from a common mean (equivalent to watsonV.py). Also provides the critical angle between the two sample mean directions and the corresponding McFadden and McElhinny (1990) classification. INPUT FORMAT takes dec/inc as first two columns in two space delimited files (one file for normal directions, one file for reversed directions). SYNTAX revtest_MM1990.py [command line options] OPTIONS -h prints help message and quits -f FILE -f2 FILE -P (don't plot the Watson V cdf) OUTPUT Watson's V between the two populations and the Monte Carlo Critical Value Vc. M&M1990 angle, critical angle and classification Plot of Watson's V CDF from Monte Carlo simulation (red line), V is solid and Vc is dashed. """ D1, D2 = [], [] plot = 1 Flip = 1 if "-h" in sys.argv: # check if help is needed print main.__doc__ sys.exit() # graceful quit if "-P" in sys.argv: plot = 0 if "-f" in sys.argv: ind = sys.argv.index("-f") file1 = sys.argv[ind + 1] f1 = open(file1, "rU") for line in f1.readlines(): rec = line.split() Dec, Inc = float(rec[0]), float(rec[1]) D1.append([Dec, Inc, 1.0]) f1.close() if "-f2" in sys.argv: ind = sys.argv.index("-f2") file2 = sys.argv[ind + 1] f2 = open(file2, "rU") print "be patient, your computer is doing 5000 simulations..." for line in f2.readlines(): rec = line.split() Dec, Inc = float(rec[0]), float(rec[1]) D2.append([Dec, Inc, 1.0]) f2.close() # take the antipode for the directions in file 2 D2_flip = [] for rec in D2: d, i = (rec[0] - 180.0) % 360.0, -rec[1] D2_flip.append([d, i, 1.0]) pars_1 = pmag.fisher_mean(D1) pars_2 = pmag.fisher_mean(D2_flip) cart_1 = pmag.dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]]) cart_2 = pmag.dir2cart([pars_2["dec"], pars_2["inc"], pars_2["r"]]) Sw = pars_1["k"] * pars_1["r"] + pars_2["k"] * pars_2["r"] # k1*r1+k2*r2 xhat_1 = pars_1["k"] * cart_1[0] + pars_2["k"] * cart_2[0] # k1*x1+k2*x2 xhat_2 = pars_1["k"] * cart_1[1] + pars_2["k"] * cart_2[1] # k1*y1+k2*y2 xhat_3 = pars_1["k"] * cart_1[2] + pars_2["k"] * cart_2[2] # k1*z1+k2*z2 Rw = numpy.sqrt(xhat_1 ** 2 + xhat_2 ** 2 + xhat_3 ** 2) V = 2 * (Sw - Rw) # # keep weighted sum for later when determining the "critical angle" let's save it as Sr (notation of McFadden and McElhinny, 1990) # Sr = Sw # # do monte carlo simulation of datasets with same kappas, but common mean # counter, NumSims = 0, 5000 Vp = [] # set of Vs from simulations for k in range(NumSims): # # get a set of N1 fisher distributed vectors with k1, calculate fisher stats # Dirp = [] for i in range(pars_1["n"]): Dirp.append(pmag.fshdev(pars_1["k"])) pars_p1 = pmag.fisher_mean(Dirp) # # get a set of N2 fisher distributed vectors with k2, calculate fisher stats # Dirp = [] for i in range(pars_2["n"]): Dirp.append(pmag.fshdev(pars_2["k"])) pars_p2 = pmag.fisher_mean(Dirp) # # get the V for these # Vk = pmag.vfunc(pars_p1, pars_p2) Vp.append(Vk) # # sort the Vs, get Vcrit (95th percentile one) # Vp.sort() k = int(0.95 * NumSims) Vcrit = Vp[k] # # equation 18 of McFadden and McElhinny, 1990 calculates the critical value of R (Rwc) # Rwc = Sr - (Vcrit / 2) # # following equation 19 of McFadden and McElhinny (1990) the critical angle is calculated. # k1 = pars_1["k"] k2 = pars_2["k"] R1 = pars_1["r"] R2 = pars_2["r"] critical_angle = numpy.degrees( numpy.arccos(((Rwc ** 2) - ((k1 * R1) ** 2) - ((k2 * R2) ** 2)) / (2 * k1 * R1 * k2 * R2)) ) D1_mean = (pars_1["dec"], pars_1["inc"]) D2_mean = (pars_2["dec"], pars_2["inc"]) angle = pmag.angle(D1_mean, D2_mean) # # print the results of the test # print "" print "Results of Watson V test: " print "" print "Watson's V: " "%.1f" % (V) print "Critical value of V: " "%.1f" % (Vcrit) if V < Vcrit: print '"Pass": Since V is less than Vcrit, the null hypothesis that the two populations are drawn from distributions that share a common mean direction (antipodal to one another) cannot be rejected.' elif V > Vcrit: print '"Fail": Since V is greater than Vcrit, the two means can be distinguished at the 95% confidence level.' print "" print "M&M1990 classification:" print "" print "Angle between data set means: " "%.1f" % (angle) print "Critical angle of M&M1990: " "%.1f" % (critical_angle) if V > Vcrit: print "" elif V < Vcrit: if critical_angle < 5: print "The McFadden and McElhinny (1990) classification for this test is: 'A'" elif critical_angle < 10: print "The McFadden and McElhinny (1990) classification for this test is: 'B'" elif critical_angle < 20: print "The McFadden and McElhinny (1990) classification for this test is: 'C'" else: print "The McFadden and McElhinny (1990) classification for this test is: 'INDETERMINATE;" if plot == 1: CDF = {"cdf": 1} pmagplotlib.plot_init(CDF["cdf"], 5, 5) p1 = pmagplotlib.plotCDF(CDF["cdf"], Vp, "Watson's V", "r", "") p2 = pmagplotlib.plotVs(CDF["cdf"], [V], "g", "-") p3 = pmagplotlib.plotVs(CDF["cdf"], [Vp[k]], "b", "--") pmagplotlib.drawFIGS(CDF) files, fmt = {}, "svg" if file2 != "": files["cdf"] = "WatsonsV_" + file1 + "_" + file2 + "." + fmt else: files["cdf"] = "WatsonsV_" + file1 + "." + fmt if pmagplotlib.isServer: black = "#000000" purple = "#800080" titles = {} titles["cdf"] = "Cumulative Distribution" CDF = pmagplotlib.addBorders(CDF, titles, black, purple) pmagplotlib.saveP(CDF, files) else: ans = raw_input(" S[a]ve to save plot, [q]uit without saving: ") if ans == "a": pmagplotlib.saveP(CDF, files)
def main(): """ NAME eqarea_ell.py DESCRIPTION makes equal area projections from declination/inclination data and plot ellipses SYNTAX eqarea_ell.py -h [command line options] INPUT takes space delimited Dec/Inc data OPTIONS -h prints help message and quits -f FILE -fmt [svg,png,jpg] format for output plots -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors """ FIG = {} # plot dictionary FIG["eq"] = 1 # eqarea is figure 1 fmt, dist, mode = "svg", "F", 1 plotE = 0 if "-h" in sys.argv: print main.__doc__ sys.exit() pmagplotlib.plot_init(FIG["eq"], 5, 5) if "-f" in sys.argv: ind = sys.argv.index("-f") title = sys.argv[ind + 1] f = open(title, "rU") data = f.readlines() if "-ell" in sys.argv: plotE = 1 ind = sys.argv.index("-ell") ell_type = sys.argv[ind + 1] if ell_type == "F": dist = "F" if ell_type == "K": dist = "K" if ell_type == "B": dist = "B" if ell_type == "Be": dist = "BE" if ell_type == "Bv": dist = "BV" FIG["bdirs"] = 2 pmagplotlib.plot_init(FIG["bdirs"], 5, 5) if "-fmt" in sys.argv: ind = sys.argv.index("-fmt") fmt = sys.argv[ind + 1] DIblock = [] for line in data: if "\t" in line: rec = line.split("\t") # split each line on space to get records else: rec = line.split() # split each line on space to get records DIblock.append([float(rec[0]), float(rec[1])]) if len(DIblock) > 0: pmagplotlib.plotEQ(FIG["eq"], DIblock, title) else: print "no data to plot" sys.exit() if plotE == 1: ppars = pmag.doprinc(DIblock) # get principal directions nDIs, rDIs, npars, rpars = [], [], [], [] for rec in DIblock: angle = pmag.angle([rec[0], rec[1]], [ppars["dec"], ppars["inc"]]) if angle > 90.0: rDIs.append(rec) else: nDIs.append(rec) if dist == "B": # do on whole dataset etitle = "Bingham confidence ellipse" bpars = pmag.dobingham(DIblock) for key in bpars.keys(): if key != "n" and pmagplotlib.verbose: print " ", key, "%7.1f" % (bpars[key]) if key == "n" and pmagplotlib.verbose: print " ", key, " %i" % (bpars[key]) npars.append(bpars["dec"]) npars.append(bpars["inc"]) npars.append(bpars["Zeta"]) npars.append(bpars["Zdec"]) npars.append(bpars["Zinc"]) npars.append(bpars["Eta"]) npars.append(bpars["Edec"]) npars.append(bpars["Einc"]) if dist == "F": etitle = "Fisher confidence cone" if len(nDIs) > 3: fpars = pmag.fisher_mean(nDIs) for key in fpars.keys(): if key != "n" and pmagplotlib.verbose: print " ", key, "%7.1f" % (fpars[key]) if key == "n" and pmagplotlib.verbose: print " ", key, " %i" % (fpars[key]) mode += 1 npars.append(fpars["dec"]) npars.append(fpars["inc"]) npars.append(fpars["alpha95"]) # Beta npars.append(fpars["dec"]) isign = abs(fpars["inc"]) / fpars["inc"] npars.append(fpars["inc"] - isign * 90.0) # Beta inc npars.append(fpars["alpha95"]) # gamma npars.append(fpars["dec"] + 90.0) # Beta dec npars.append(0.0) # Beta inc if len(rDIs) > 3: fpars = pmag.fisher_mean(rDIs) if pmagplotlib.verbose: print "mode ", mode for key in fpars.keys(): if key != "n" and pmagplotlib.verbose: print " ", key, "%7.1f" % (fpars[key]) if key == "n" and pmagplotlib.verbose: print " ", key, " %i" % (fpars[key]) mode += 1 rpars.append(fpars["dec"]) rpars.append(fpars["inc"]) rpars.append(fpars["alpha95"]) # Beta rpars.append(fpars["dec"]) isign = abs(fpars["inc"]) / fpars["inc"] rpars.append(fpars["inc"] - isign * 90.0) # Beta inc rpars.append(fpars["alpha95"]) # gamma rpars.append(fpars["dec"] + 90.0) # Beta dec rpars.append(0.0) # Beta inc if dist == "K": etitle = "Kent confidence ellipse" if len(nDIs) > 3: kpars = pmag.dokent(nDIs, len(nDIs)) if pmagplotlib.verbose: print "mode ", mode for key in kpars.keys(): if key != "n" and pmagplotlib.verbose: print " ", key, "%7.1f" % (kpars[key]) if key == "n" and pmagplotlib.verbose: print " ", key, " %i" % (kpars[key]) mode += 1 npars.append(kpars["dec"]) npars.append(kpars["inc"]) npars.append(kpars["Zeta"]) npars.append(kpars["Zdec"]) npars.append(kpars["Zinc"]) npars.append(kpars["Eta"]) npars.append(kpars["Edec"]) npars.append(kpars["Einc"]) if len(rDIs) > 3: kpars = pmag.dokent(rDIs, len(rDIs)) if pmagplotlib.verbose: print "mode ", mode for key in kpars.keys(): if key != "n" and pmagplotlib.verbose: print " ", key, "%7.1f" % (kpars[key]) if key == "n" and pmagplotlib.verbose: print " ", key, " %i" % (kpars[key]) mode += 1 rpars.append(kpars["dec"]) rpars.append(kpars["inc"]) rpars.append(kpars["Zeta"]) rpars.append(kpars["Zdec"]) rpars.append(kpars["Zinc"]) rpars.append(kpars["Eta"]) rpars.append(kpars["Edec"]) rpars.append(kpars["Einc"]) else: # assume bootstrap if len(nDIs) < 10 and len(rDIs) < 10: print "too few data points for bootstrap" sys.exit() if dist == "BE": if len(nDIs) >= 10: BnDIs = pmag.di_boot(nDIs) Bkpars = pmag.dokent(BnDIs, 1.0) if pmagplotlib.verbose: print "mode ", mode for key in Bkpars.keys(): if key != "n" and pmagplotlib.verbose: print " ", key, "%7.1f" % (Bkpars[key]) if key == "n" and pmagplotlib.verbose: print " ", key, " %i" % (Bkpars[key]) mode += 1 npars.append(Bkpars["dec"]) npars.append(Bkpars["inc"]) npars.append(Bkpars["Zeta"]) npars.append(Bkpars["Zdec"]) npars.append(Bkpars["Zinc"]) npars.append(Bkpars["Eta"]) npars.append(Bkpars["Edec"]) npars.append(Bkpars["Einc"]) if len(rDIs) >= 10: BrDIs = pmag.di_boot(rDIs) Bkpars = pmag.dokent(BrDIs, 1.0) if pmagplotlib.verbose: print "mode ", mode for key in Bkpars.keys(): if key != "n" and pmagplotlib.verbose: print " ", key, "%7.1f" % (Bkpars[key]) if key == "n" and pmagplotlib.verbose: print " ", key, " %i" % (Bkpars[key]) mode += 1 rpars.append(Bkpars["dec"]) rpars.append(Bkpars["inc"]) rpars.append(Bkpars["Zeta"]) rpars.append(Bkpars["Zdec"]) rpars.append(Bkpars["Zinc"]) rpars.append(Bkpars["Eta"]) rpars.append(Bkpars["Edec"]) rpars.append(Bkpars["Einc"]) etitle = "Bootstrapped confidence ellipse" elif dist == "BV": vsym = {"lower": ["+", "k"], "upper": ["x", "k"], "size": 5} if len(nDIs) > 5: BnDIs = pmag.di_boot(nDIs) pmagplotlib.plotEQsym(FIG["bdirs"], BnDIs, "Bootstrapped Eigenvectors", vsym) if len(rDIs) > 5: BrDIs = pmag.di_boot(rDIs) if len(nDIs) > 5: # plot on existing plots pmagplotlib.plotDIsym(FIG["bdirs"], BrDIs, vsym) else: pmagplotlib.plotEQ(FIG["bdirs"], BrDIs, "Bootstrapped Eigenvectors", vsym) if dist == "B": if len(nDIs) > 3 or len(rDIs) > 3: pmagplotlib.plotCONF(FIG["eq"], etitle, [], npars, 0) elif len(nDIs) > 3 and dist != "BV": pmagplotlib.plotCONF(FIG["eq"], etitle, [], npars, 0) if len(rDIs) > 3: pmagplotlib.plotCONF(FIG["eq"], etitle, [], rpars, 0) elif len(rDIs) > 3 and dist != "BV": pmagplotlib.plotCONF(FIG["eq"], etitle, [], rpars, 0) pmagplotlib.drawFIGS(FIG) # files = {} for key in FIG.keys(): files[key] = title + "_" + key + "." + fmt if pmagplotlib.isServer: black = "#000000" purple = "#800080" titles = {} titles["eq"] = "Equal Area Plot" FIG = pmagplotlib.addBorders(FIG, titles, black, purple) pmagplotlib.saveP(FIG, files) else: ans = raw_input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans == "q": sys.exit() if ans == "a": pmagplotlib.saveP(FIG, files)
def main(): """ NAME EI.py [command line options] DESCRIPTION Finds bootstrap confidence bounds on Elongation and Inclination data SYNTAX EI.py [command line options] OPTIONS -h prints help message and quits -f FILE specifies input file -p do parametric bootstrap INPUT dec/inc pairs OUTPUT makes a plot of the E/I pair and bootstrapped confidence bounds along with the E/I trend predicted by the TK03 field model prints out: Io (mean inclination), I_lower and I_upper are 95% confidence bounds on inclination Eo (elongation), E_lower and E_upper are 95% confidence bounds on elongation Edec,Einc are the elongation direction """ par=0 if '-h' in sys.argv: print main.__doc__ sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') file=open(sys.argv[ind+1],'rU') if '-p' in sys.argv: par=1 rseed,nb,data=10,5000,[] upper,lower=int(round(.975*nb)),int(round(.025*nb)) Es,Is=[],[] PLTS={'eq':1,'ei':2} pmagplotlib.plot_init(PLTS['eq'],5,5) pmagplotlib.plot_init(PLTS['ei'],5,5) # poly_tab= [ 3.07448925e-06, -3.49555831e-04, -1.46990847e-02, 2.90905483e+00] poly_new= [ 3.15976125e-06, -3.52459817e-04, -1.46641090e-02, 2.89538539e+00] # poly_cp88= [ 5.34558576e-06, -7.70922659e-04, 5.18529685e-03, 2.90941351e+00] # poly_qc96= [ 7.08210133e-06, -8.79536536e-04, 1.09625547e-03, 2.92513660e+00] # poly_cj98=[ 6.56675431e-06, -7.91823539e-04, -1.08211350e-03, 2.80557710e+00] # poly_tk03_g20= [ 4.96757685e-06, -6.02256097e-04, -5.96103272e-03, 2.84227449e+00] # poly_tk03_g30= [ 7.82525963e-06, -1.39781724e-03, 4.47187092e-02, 2.54637535e+00] # poly_gr99_g=[ 1.24362063e-07, -1.69383384e-04, -4.24479223e-03, 2.59257437e+00] # poly_gr99_e=[ 1.26348154e-07, 2.01691452e-04, -4.99142308e-02, 3.69461060e+00] E_EI,E_tab,E_new,E_cp88,E_cj98,E_qc96,E_tk03_g20=[],[],[],[],[],[],[] E_tk03_g30,E_gr99_g,E_gr99_e=[],[],[] I2=range(0,90,5) for inc in I2: E_new.append(EI(inc,poly_new)) # use the polynomial from Tauxe et al. (2008) pmagplotlib.plotEI(PLTS['ei'],E_new,I2,1) if '-f' in sys.argv: random.seed(rseed) for line in file.readlines(): rec=line.split() dec=float(rec[0]) inc=float(rec[1]) if par==1: if len(rec)==4: N=(int(rec[2])) # append n K=float(rec[3]) # append k rec=[dec,inc,N,K] data.append(rec) else: rec=[dec,inc] data.append(rec) pmagplotlib.plotEQ(PLTS['eq'],data,'Data') ppars=pmag.doprinc(data) n=ppars["N"] Io=ppars['inc'] Edec=ppars['Edir'][0] Einc=ppars['Edir'][1] Eo=(ppars['tau2']/ppars['tau3']) b=0 print 'doing bootstrap - be patient' while b<nb: bdata=[] for j in range(n): boot=random.randint(0,n-1) random.jumpahead(rseed) if par==1: DIs=[] D,I,N,K=data[boot][0],data[boot][1],data[boot][2],data[boot][3] for k in range(N): dec,inc=pmag.fshdev(K) drot,irot=pmag.dodirot(dec,inc,D,I) DIs.append([drot,irot]) fpars=pmag.fisher_mean(DIs) bdata.append([fpars['dec'],fpars['inc'],1.]) # replace data[boot] with parametric dec,inc else: bdata.append(data[boot]) ppars=pmag.doprinc(bdata) Is.append(ppars['inc']) Es.append(ppars['tau2']/ppars['tau3']) b+=1 if b%100==0:print b Is.sort() Es.sort() x,std=pmag.gausspars(Es) stderr=std/math.sqrt(len(data)) pmagplotlib.plotX(PLTS['ei'],Io,Eo,Is[lower],Is[upper],Es[lower],Es[upper],'b-') # pmagplotlib.plotX(PLTS['ei'],Io,Eo,Is[lower],Is[upper],Eo-stderr,Eo+stderr,'b-') print 'Io, Eo, I_lower, I_upper, E_lower, E_upper, Edec, Einc' print '%7.1f %4.2f %7.1f %7.1f %4.2f %4.2f %7.1f %7.1f' %(Io,Eo,Is[lower],Is[upper],Es[lower],Es[upper], Edec,Einc) # print '%7.1f %4.2f %7.1f %7.1f %4.2f %4.2f' %(Io,Eo,Is[lower],Is[upper],Eo-stderr,Eo+stderr) pmagplotlib.drawFIGS(PLTS) files,fmt={},'svg' for key in PLTS.keys(): files[key]=key+'.'+fmt ans=raw_input(" S[a]ve to save plot, [q]uit without saving: ") if ans=="a": pmagplotlib.saveP(PLTS,files)
def main(): """ NAME eqarea_magic.py DESCRIPTION makes equal area projections from declination/inclination data SYNTAX eqarea_magic.py [command line options] INPUT takes magic formatted pmag_results, pmag_sites, pmag_samples or pmag_specimens OPTIONS -h prints help message and quits -f FILE: specify input magic format file from magic,default='pmag_results.txt' supported types=[magic_measurements,pmag_specimens, pmag_samples, pmag_sites, pmag_results, magic_web] -obj OBJ: specify level of plot [all, sit, sam, spc], default is all -crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted default is geographic -fmt [svg,png,jpg] format for output plots -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors -c plot as colour contour NOTE all: entire file; sit: site; sam: sample; spc: specimen """ FIG={} # plot dictionary FIG['eq']=1 # eqarea is figure 1 in_file,plot_key,coord,crd='pmag_results.txt','all',"-1",'g' fmt,dist,mode='svg','F',1 plotE,contour=0,0 dir_path='.' if '-h' in sys.argv: print main.__doc__ sys.exit() if '-WD' in sys.argv: ind=sys.argv.index('-WD') dir_path=sys.argv[ind+1] pmagplotlib.plot_init(FIG['eq'],5,5) if '-f' in sys.argv: ind=sys.argv.index("-f") in_file=dir_path+"/"+sys.argv[ind+1] if '-obj' in sys.argv: ind=sys.argv.index('-obj') plot_by=sys.argv[ind+1] if plot_by=='all':plot_key='all' if plot_by=='sit':plot_key='er_site_name' if plot_by=='sam':plot_key='er_sample_name' if plot_by=='spc':plot_key='er_specimen_name' if '-c' in sys.argv: contour=1 if '-ell' in sys.argv: plotE=1 ind=sys.argv.index('-ell') ell_type=sys.argv[ind+1] if ell_type=='F':dist='F' if ell_type=='K':dist='K' if ell_type=='B':dist='B' if ell_type=='Be':dist='BE' if ell_type=='Bv': dist='BV' FIG['bdirs']=2 pmagplotlib.plot_init(FIG['bdirs'],5,5) if '-crd' in sys.argv: ind=sys.argv.index("-crd") coord=sys.argv[ind+1] if coord=='g':coord="0" if coord=='t':coord="100" if '-fmt' in sys.argv: ind=sys.argv.index("-fmt") fmt=sys.argv[ind+1] Dec_keys=['site_dec','sample_dec','specimen_dec','measurement_dec','average_dec'] Inc_keys=['site_inc','sample_inc','specimen_inc','measurement_inc','average_inc'] Tilt_keys=['tilt_correction','site_tilt_correction','sample_tilt_correction','specimen_tilt_correction'] Dir_type_keys=['','site_direction_type','sample_direction_type','specimen_direction_type'] Name_keys=['er_specimen_name','er_sample_name','er_site_name','pmag_result_name'] data,file_type=pmag.magic_read(in_file) if file_type=='pmag_results' and plot_key!="all":plot_key=plot_key+'s' # need plural for results table if pmagplotlib.verbose: print len(data),' records read from ',in_file # # # find desired dec,inc data: # dir_type_key='' # # get plotlist if not plotting all records # plotlist=[] if plot_key!="all": for rec in data: if rec[plot_key] not in plotlist: plotlist.append(rec[plot_key]) plotlist.sort() else: plotlist.append('Whole file') for plot in plotlist: DIblock=[] GCblock=[] SLblock,SPblock=[],[] tilt_key="" mode=1 for rec in data: # find what data are available if plot_key=='all' or rec[plot_key]==plot: if plot_key!="all": title=rec[plot_key] else: title=plot if coord=='-1':title=title+' Specimen Coordinates' if coord=='0':title=title+' Geographic Coordinates' if coord=='100':title=title+' Tilt corrected Coordinates' dec_key,inc_key,tilt_key,name_key,k="","","","",0 while dec_key=="" and k<len(Dec_keys): if Dec_keys[k] in rec.keys() and rec[Dec_keys[k]]!="" and Inc_keys[k] in rec.keys() and rec[Inc_keys[k]]!="": dec_key,inc_key =Dec_keys[k],Inc_keys[k] k+=1 k=0 while tilt_key=="" and k<len(Tilt_keys): if Tilt_keys[k] in rec.keys():tilt_key=Tilt_keys[k] k+=1 k=0 while name_key=="" and k<len(Name_keys): if Name_keys[k] in rec.keys():name_key=Name_keys[k] k+=1 k=1 while dir_type_key=="" and k<len(Dir_type_keys): if Dir_type_keys[k] in rec.keys():dir_type_key=Dir_type_keys[k] k+=1 if dec_key!="":break if tilt_key=="":tilt_key='-1' if dir_type_key=="":dir_type_key='direction_type' for rec in data: # pick out the data if (plot_key=='all' or rec[plot_key]==plot) and rec[dec_key].strip()!="" and rec[inc_key].strip()!="": if dir_type_key not in rec.keys() or rec[dir_type_key]=="":rec[dir_type_key]='l' if tilt_key not in rec.keys():rec[tilt_key]='-1' # assume specimen coordinates unless otherwise specified if coord=='-1': DIblock.append([float(rec[dec_key]),float(rec[inc_key])]) SLblock.append([rec[name_key],rec['magic_method_codes']]) elif rec[tilt_key]==coord and rec[dir_type_key]=='l' and rec[dec_key]!="" and rec[inc_key]!="": if rec[tilt_key]==coord and rec[dir_type_key]=='l' and rec[dec_key]!="" and rec[inc_key]!="": DIblock.append([float(rec[dec_key]),float(rec[inc_key])]) SLblock.append([rec[name_key],rec['magic_method_codes']]) elif rec[tilt_key]==coord and rec[dir_type_key]!='l' and rec[dec_key]!="" and rec[inc_key]!="": GCblock.append([float(rec[dec_key]),float(rec[inc_key])]) SPblock.append([rec[name_key],rec['magic_method_codes']]) if len(DIblock)==0 and len(GCblock)==0: if pmagplotlib.verbose: print "no records for plotting" sys.exit() if pmagplotlib.verbose: for k in range(len(SLblock)): print '%s %s %7.1f %7.1f'%(SLblock[k][0],SLblock[k][1],DIblock[k][0],DIblock[k][1]) for k in range(len(SPblock)): print '%s %s %7.1f %7.1f'%(SPblock[k][0],SPblock[k][1],GCblock[k][0],GCblock[k][1]) if len(DIblock)>0: if contour==0: pmagplotlib.plotEQ(FIG['eq'],DIblock,title) else: pmagplotlib.plotEQcont(FIG['eq'],DIblock) else: pmagplotlib.plotNET(FIG['eq']) if len(GCblock)>0: for rec in GCblock: pmagplotlib.plotC(FIG['eq'],rec,90.,'g') if plotE==1: ppars=pmag.doprinc(DIblock) # get principal directions nDIs,rDIs,npars,rpars=[],[],[],[] for rec in DIblock: angle=pmag.angle([rec[0],rec[1]],[ppars['dec'],ppars['inc']]) if angle>90.: rDIs.append(rec) else: nDIs.append(rec) if dist=='B': # do on whole dataset etitle="Bingham confidence ellipse" bpars=pmag.dobingham(DIblock) for key in bpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(bpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(bpars[key]) npars.append(bpars['dec']) npars.append(bpars['inc']) npars.append(bpars['Zeta']) npars.append(bpars['Zdec']) npars.append(bpars['Zinc']) npars.append(bpars['Eta']) npars.append(bpars['Edec']) npars.append(bpars['Einc']) if dist=='F': etitle="Fisher confidence cone" if len(nDIs)>2: fpars=pmag.fisher_mean(nDIs) for key in fpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(fpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(fpars[key]) mode+=1 npars.append(fpars['dec']) npars.append(fpars['inc']) npars.append(fpars['alpha95']) # Beta npars.append(fpars['dec']) isign=abs(fpars['inc'])/fpars['inc'] npars.append(fpars['inc']-isign*90.) #Beta inc npars.append(fpars['alpha95']) # gamma npars.append(fpars['dec']+90.) # Beta dec npars.append(0.) #Beta inc if len(rDIs)>2: fpars=pmag.fisher_mean(rDIs) if pmagplotlib.verbose:print "mode ",mode for key in fpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(fpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(fpars[key]) mode+=1 rpars.append(fpars['dec']) rpars.append(fpars['inc']) rpars.append(fpars['alpha95']) # Beta rpars.append(fpars['dec']) isign=abs(fpars['inc'])/fpars['inc'] rpars.append(fpars['inc']-isign*90.) #Beta inc rpars.append(fpars['alpha95']) # gamma rpars.append(fpars['dec']+90.) # Beta dec rpars.append(0.) #Beta inc if dist=='K': etitle="Kent confidence ellipse" if len(nDIs)>3: kpars=pmag.dokent(nDIs,len(nDIs)) if pmagplotlib.verbose:print "mode ",mode for key in kpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(kpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(kpars[key]) mode+=1 npars.append(kpars['dec']) npars.append(kpars['inc']) npars.append(kpars['Zeta']) npars.append(kpars['Zdec']) npars.append(kpars['Zinc']) npars.append(kpars['Eta']) npars.append(kpars['Edec']) npars.append(kpars['Einc']) if len(rDIs)>3: kpars=pmag.dokent(rDIs,len(rDIs)) if pmagplotlib.verbose:print "mode ",mode for key in kpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(kpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(kpars[key]) mode+=1 rpars.append(kpars['dec']) rpars.append(kpars['inc']) rpars.append(kpars['Zeta']) rpars.append(kpars['Zdec']) rpars.append(kpars['Zinc']) rpars.append(kpars['Eta']) rpars.append(kpars['Edec']) rpars.append(kpars['Einc']) else: # assume bootstrap if dist=='BE': if len(nDIs)>5: BnDIs=pmag.di_boot(nDIs) Bkpars=pmag.dokent(BnDIs,1.) if pmagplotlib.verbose:print "mode ",mode for key in Bkpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(Bkpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(Bkpars[key]) mode+=1 npars.append(Bkpars['dec']) npars.append(Bkpars['inc']) npars.append(Bkpars['Zeta']) npars.append(Bkpars['Zdec']) npars.append(Bkpars['Zinc']) npars.append(Bkpars['Eta']) npars.append(Bkpars['Edec']) npars.append(Bkpars['Einc']) if len(rDIs)>5: BrDIs=pmag.di_boot(rDIs) Bkpars=pmag.dokent(BrDIs,1.) if pmagplotlib.verbose:print "mode ",mode for key in Bkpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(Bkpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(Bkpars[key]) mode+=1 rpars.append(Bkpars['dec']) rpars.append(Bkpars['inc']) rpars.append(Bkpars['Zeta']) rpars.append(Bkpars['Zdec']) rpars.append(Bkpars['Zinc']) rpars.append(Bkpars['Eta']) rpars.append(Bkpars['Edec']) rpars.append(Bkpars['Einc']) etitle="Bootstrapped confidence ellipse" elif dist=='BV': if len(nDIs)>5: BnDIs=pmag.di_boot(nDIs) pmagplotlib.plotEQ(FIG['bdirs'],BnDIs,'Bootstrapped Eigenvectors') if len(rDIs)>5: BrDIs=pmag.di_boot(rDIs) if len(nDIs)>5: # plot on existing plots pmagplotlib.plotDI(FIG['bdirs'],BrDIs) else: pmagplotlib.plotEQ(FIG['bdirs'],BrDIs,'Bootstrapped Eigenvectors') if dist=='B': if len(nDIs)> 3 or len(rDIs)>3: pmagplotlib.plotCONF(FIG['eq'],etitle,[],npars,0) elif len(nDIs)>3 and dist!='BV': pmagplotlib.plotCONF(FIG['eq'],etitle,[],npars,0) if len(rDIs)>3: pmagplotlib.plotCONF(FIG['eq'],etitle,[],rpars,0) elif len(rDIs)>3 and dist!='BV': pmagplotlib.plotCONF(FIG['eq'],etitle,[],rpars,0) pmagplotlib.drawFIGS(FIG) # files={} for key in FIG.keys(): files[key]=title.replace(" ","_")+'_'+'eqarea'+'.'+fmt if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles={} titles['eq']='Equal Area Plot' FIG = pmagplotlib.addBorders(FIG,titles,black,purple) pmagplotlib.saveP(FIG,files) else: ans=raw_input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans=="q": sys.exit() if ans=="a": pmagplotlib.saveP(FIG,files)
def main(): """ NAME eqarea_ell.py DESCRIPTION makes equal area projections from declination/inclination data and plot ellipses SYNTAX eqarea_ell.py -h [command line options] INPUT takes space delimited Dec/Inc data OPTIONS -h prints help message and quits -f FILE -fmt [svg,png,jpg] format for output plots -sav saves figures and quits -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors """ FIG={} # plot dictionary FIG['eq']=1 # eqarea is figure 1 fmt,dist,mode,plot='svg','F',1,0 sym={'lower':['o','r'],'upper':['o','w'],'size':10} plotE=0 if '-h' in sys.argv: print main.__doc__ sys.exit() pmagplotlib.plot_init(FIG['eq'],5,5) if '-sav' in sys.argv:plot=1 if '-f' in sys.argv: ind=sys.argv.index("-f") title=sys.argv[ind+1] data=numpy.loadtxt(title).transpose() if '-ell' in sys.argv: plotE=1 ind=sys.argv.index('-ell') ell_type=sys.argv[ind+1] if ell_type=='F':dist='F' if ell_type=='K':dist='K' if ell_type=='B':dist='B' if ell_type=='Be':dist='BE' if ell_type=='Bv': dist='BV' FIG['bdirs']=2 pmagplotlib.plot_init(FIG['bdirs'],5,5) if '-fmt' in sys.argv: ind=sys.argv.index("-fmt") fmt=sys.argv[ind+1] DIblock=numpy.array([data[0],data[1]]).transpose() if len(DIblock)>0: pmagplotlib.plotEQsym(FIG['eq'],DIblock,title,sym) if plot==0:pmagplotlib.drawFIGS(FIG) else: print "no data to plot" sys.exit() if plotE==1: ppars=pmag.doprinc(DIblock) # get principal directions nDIs,rDIs,npars,rpars=[],[],[],[] for rec in DIblock: angle=pmag.angle([rec[0],rec[1]],[ppars['dec'],ppars['inc']]) if angle>90.: rDIs.append(rec) else: nDIs.append(rec) if dist=='B': # do on whole dataset etitle="Bingham confidence ellipse" bpars=pmag.dobingham(DIblock) for key in bpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(bpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(bpars[key]) npars.append(bpars['dec']) npars.append(bpars['inc']) npars.append(bpars['Zeta']) npars.append(bpars['Zdec']) npars.append(bpars['Zinc']) npars.append(bpars['Eta']) npars.append(bpars['Edec']) npars.append(bpars['Einc']) if dist=='F': etitle="Fisher confidence cone" if len(nDIs)>3: fpars=pmag.fisher_mean(nDIs) for key in fpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(fpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(fpars[key]) mode+=1 npars.append(fpars['dec']) npars.append(fpars['inc']) npars.append(fpars['alpha95']) # Beta npars.append(fpars['dec']) isign=abs(fpars['inc'])/fpars['inc'] npars.append(fpars['inc']-isign*90.) #Beta inc npars.append(fpars['alpha95']) # gamma npars.append(fpars['dec']+90.) # Beta dec npars.append(0.) #Beta inc if len(rDIs)>3: fpars=pmag.fisher_mean(rDIs) if pmagplotlib.verbose:print "mode ",mode for key in fpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(fpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(fpars[key]) mode+=1 rpars.append(fpars['dec']) rpars.append(fpars['inc']) rpars.append(fpars['alpha95']) # Beta rpars.append(fpars['dec']) isign=abs(fpars['inc'])/fpars['inc'] rpars.append(fpars['inc']-isign*90.) #Beta inc rpars.append(fpars['alpha95']) # gamma rpars.append(fpars['dec']+90.) # Beta dec rpars.append(0.) #Beta inc if dist=='K': etitle="Kent confidence ellipse" if len(nDIs)>3: kpars=pmag.dokent(nDIs,len(nDIs)) if pmagplotlib.verbose:print "mode ",mode for key in kpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(kpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(kpars[key]) mode+=1 npars.append(kpars['dec']) npars.append(kpars['inc']) npars.append(kpars['Zeta']) npars.append(kpars['Zdec']) npars.append(kpars['Zinc']) npars.append(kpars['Eta']) npars.append(kpars['Edec']) npars.append(kpars['Einc']) if len(rDIs)>3: kpars=pmag.dokent(rDIs,len(rDIs)) if pmagplotlib.verbose:print "mode ",mode for key in kpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(kpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(kpars[key]) mode+=1 rpars.append(kpars['dec']) rpars.append(kpars['inc']) rpars.append(kpars['Zeta']) rpars.append(kpars['Zdec']) rpars.append(kpars['Zinc']) rpars.append(kpars['Eta']) rpars.append(kpars['Edec']) rpars.append(kpars['Einc']) else: # assume bootstrap if len(nDIs)<10 and len(rDIs)<10: print 'too few data points for bootstrap' sys.exit() if dist=='BE': print 'Be patient for bootstrap...' if len(nDIs)>=10: BnDIs=pmag.di_boot(nDIs) Bkpars=pmag.dokent(BnDIs,1.) if pmagplotlib.verbose:print "mode ",mode for key in Bkpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(Bkpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(Bkpars[key]) mode+=1 npars.append(Bkpars['dec']) npars.append(Bkpars['inc']) npars.append(Bkpars['Zeta']) npars.append(Bkpars['Zdec']) npars.append(Bkpars['Zinc']) npars.append(Bkpars['Eta']) npars.append(Bkpars['Edec']) npars.append(Bkpars['Einc']) if len(rDIs)>=10: BrDIs=pmag.di_boot(rDIs) Bkpars=pmag.dokent(BrDIs,1.) if pmagplotlib.verbose:print "mode ",mode for key in Bkpars.keys(): if key!='n' and pmagplotlib.verbose:print " ",key, '%7.1f'%(Bkpars[key]) if key=='n' and pmagplotlib.verbose:print " ",key, ' %i'%(Bkpars[key]) mode+=1 rpars.append(Bkpars['dec']) rpars.append(Bkpars['inc']) rpars.append(Bkpars['Zeta']) rpars.append(Bkpars['Zdec']) rpars.append(Bkpars['Zinc']) rpars.append(Bkpars['Eta']) rpars.append(Bkpars['Edec']) rpars.append(Bkpars['Einc']) etitle="Bootstrapped confidence ellipse" elif dist=='BV': print 'Be patient for bootstrap...' vsym={'lower':['+','k'],'upper':['x','k'],'size':5} if len(nDIs)>5: BnDIs=pmag.di_boot(nDIs) pmagplotlib.plotEQsym(FIG['bdirs'],BnDIs,'Bootstrapped Eigenvectors',vsym) if len(rDIs)>5: BrDIs=pmag.di_boot(rDIs) if len(nDIs)>5: # plot on existing plots pmagplotlib.plotDIsym(FIG['bdirs'],BrDIs,vsym) else: pmagplotlib.plotEQ(FIG['bdirs'],BrDIs,'Bootstrapped Eigenvectors',vsym) if dist=='B': if len(nDIs)> 3 or len(rDIs)>3: pmagplotlib.plotCONF(FIG['eq'],etitle,[],npars,0) elif len(nDIs)>3 and dist!='BV': pmagplotlib.plotCONF(FIG['eq'],etitle,[],npars,0) if len(rDIs)>3: pmagplotlib.plotCONF(FIG['eq'],etitle,[],rpars,0) elif len(rDIs)>3 and dist!='BV': pmagplotlib.plotCONF(FIG['eq'],etitle,[],rpars,0) if plot==0:pmagplotlib.drawFIGS(FIG) if plot==0:pmagplotlib.drawFIGS(FIG) # files={} for key in FIG.keys(): files[key]=title+'_'+key+'.'+fmt if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles={} titles['eq']='Equal Area Plot' FIG = pmagplotlib.addBorders(FIG,titles,black,purple) pmagplotlib.saveP(FIG,files) elif plot==0: ans=raw_input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans=="q": sys.exit() if ans=="a": pmagplotlib.saveP(FIG,files) else: pmagplotlib.saveP(FIG,files)
def main(): """ NAME watsonsV.py DESCRIPTION calculates Watson's V statistic from input files INPUT FORMAT takes dec/inc as first two columns in two space delimited files SYNTAX watsonsV.py [command line options] OPTIONS -h prints help message and quits -f FILE (with optional second) -f2 FILE (second file) -ant, flip antipodal directions in FILE to opposite direction -P (don't plot) OUTPUT Watson's V and the Monte Carlo Critical Value Vc. in plot, V is solid and Vc is dashed. """ D1,D2=[],[] Flip=0 plot=1 if '-h' in sys.argv: # check if help is needed print main.__doc__ sys.exit() # graceful quit if '-ant' in sys.argv: Flip=1 if '-P' in sys.argv: plot=0 if '-f' in sys.argv: ind=sys.argv.index('-f') file1=sys.argv[ind+1] f=open(file1,'rU') for line in f.readlines(): rec=line.split() Dec,Inc=float(rec[0]),float(rec[1]) D1.append([Dec,Inc,1.]) f.close() if '-f2' in sys.argv: ind=sys.argv.index('-f2') file2=sys.argv[ind+1] f=open(file2,'rU') for line in f.readlines(): if '\t' in line: rec=line.split('\t') # split each line on space to get records else: rec=line.split() # split each line on space to get records Dec,Inc=float(rec[0]),float(rec[1]) if Flip==0: D2.append([Dec,Inc,1.]) else: D1.append([Dec,Inc,1.]) f.close() if Flip==1: D1,D2=pmag.flip(D1) # counter,NumSims=0,5000 # # first calculate the fisher means and cartesian coordinates of each set of Directions # pars_1=pmag.fisher_mean(D1) pars_2=pmag.fisher_mean(D2) # # get V statistic for these # V=pmag.vfunc(pars_1,pars_2) # # do monte carlo simulation of datasets with same kappas, but common mean # Vp=[] # set of Vs from simulations if plot==1:print "Doing ",NumSims," simulations" for k in range(NumSims): counter+=1 if counter==50: if plot==1:print k+1 counter=0 Dirp=[] # get a set of N1 fisher distributed vectors with k1, calculate fisher stats for i in range(pars_1["n"]): Dirp.append(pmag.fshdev(pars_1["k"])) pars_p1=pmag.fisher_mean(Dirp) # get a set of N2 fisher distributed vectors with k2, calculate fisher stats Dirp=[] for i in range(pars_2["n"]): Dirp.append(pmag.fshdev(pars_2["k"])) pars_p2=pmag.fisher_mean(Dirp) # get the V for these Vk=pmag.vfunc(pars_p1,pars_p2) Vp.append(Vk) # # sort the Vs, get Vcrit (95th one) # Vp.sort() k=int(.95*NumSims) print "Watson's V, Vcrit: " print ' %10.1f %10.1f'%(V,Vp[k]) if plot==1: CDF={'cdf':1} pmagplotlib.plot_init(CDF['cdf'],5,5) pmagplotlib.plotCDF(CDF['cdf'],Vp,"Watson's V",'r',"") pmagplotlib.plotVs(CDF['cdf'],[V],'g','-') pmagplotlib.plotVs(CDF['cdf'],[Vp[k]],'b','--') pmagplotlib.drawFIGS(CDF) files,fmt={},'svg' if file2!="": files['cdf']='WatsonsV_'+file1+'_'+file2+'.'+fmt else: files['cdf']='WatsonsV_'+file1+'.'+fmt if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles={} titles['cdf']='Cumulative Distribution' CDF = pmagplotlib.addBorders(CDF,titles,black,purple) pmagplotlib.saveP(CDF,files) else: ans=raw_input(" S[a]ve to save plot, [q]uit without saving: ") if ans=="a": pmagplotlib.saveP(CDF,files)
def main(): """ NAME orientation_magic.py DESCRIPTION takes tab delimited field notebook information and converts to MagIC formatted tables SYNTAX orientation_magic.py [command line options] OPTIONS -f FILE: specify input file, default is: orient.txt -Fsa FILE: specify output file, default is: er_samples.txt -Fsi FILE: specify output site location file, default is: er_sites.txt -app append/update these data in existing er_samples.txt, er_sites.txt files -ocn OCON: specify orientation convention, default is #1 below -dcn DCON [DEC]: specify declination convention, default is #1 below if DCON = 2, you must supply the declination correction -BCN don't correct bedding_dip_dir for magnetic declination -already corrected -ncn NCON: specify naming convention: default is #1 below -a: averages all bedding poles and uses average for all samples: default is NO -gmt HRS: specify hours to subtract from local time to get GMT: default is 0 -mcd: specify sampling method codes as a colon delimited string: [default is: FS-FD:SO-POM] FS-FD field sampling done with a drill FS-H field sampling done with hand samples FS-LOC-GPS field location done with GPS FS-LOC-MAP field location done with map SO-POM a Pomeroy orientation device was used SO-ASC an ASC orientation device was used INPUT FORMAT Input files must be tab delimited and have in the first line: tab location_name Note: The "location_name" will facilitate searching in the MagIC database. Data from different "locations" should be put in separate files. The definition of a "location" is rather loose. Also this is the word 'tab' not a tab, which will be indicated by '\t'. The second line has the names of the columns (tab delimited), e.g.: site_name sample_name mag_azimuth field_dip date lat long sample_lithology sample_type sample_class shadow_angle hhmm stratigraphic_height bedding_dip_direction bedding_dip GPS_baseline image_name image_look image_photographer participants method_codes site_description sample_description GPS_Az, sample_igsn, sample_texture, sample_cooling_rate, cooling_rate_corr, cooling_rate_mcd Notes: 1) column order doesn't matter but the NAMES do. 2) sample_name, sample_lithology, sample_type, sample_class, lat and long are required. all others are optional. 3) If subsequent data are the same (e.g., date, bedding orientation, participants, stratigraphic_height), you can leave the field blank and the program will fill in the last recorded information. BUT if you really want a blank stratigraphic_height, enter a '-1'. These will not be inherited and must be specified for each entry: image_name, look, photographer or method_codes 4) hhmm must be in the format: hh:mm and the hh must be in 24 hour time. date must be mm/dd/yy (years < 50 will be converted to 20yy and >50 will be assumed 19yy) 5) image_name, image_look and image_photographer are colon delimited lists of file name (e.g., IMG_001.jpg) image look direction and the name of the photographer respectively. If all images had same look and photographer, just enter info once. The images will be assigned to the site for which they were taken - not at the sample level. 6) participants: Names of who helped take the samples. These must be a colon delimited list. 7) method_codes: Special method codes on a sample level, e.g., SO-GT5 which means the orientation is has an uncertainty of >5 degrees for example if it broke off before orienting.... 8) GPS_Az is the place to put directly determined GPS Azimuths, using, e.g., points along the drill direction. 9) sample_cooling_rate is the cooling rate in K per Ma 10) int_corr_cooling_rate 11) cooling_rate_mcd: data adjustment method code for cooling rate correction; DA-CR-EG is educated guess; DA-CR-PS is percent estimated from pilot samples; DA-CR-TRM is comparison between 2 TRMs acquired with slow and rapid cooling rates. is the percent cooling rate factor to apply to specimens from this sample, DA-CR-XX is the method code Orientation convention: Samples are oriented in the field with a "field arrow" and measured in the laboratory with a "lab arrow". The lab arrow is the positive X direction of the right handed coordinate system of the specimen measurements. The lab and field arrows may not be the same. In the MagIC database, we require the orientation (azimuth and plunge) of the X direction of the measurements (lab arrow). Here are some popular conventions that convert the field arrow azimuth (mag_azimuth in the orient.txt file) and dip (field_dip in orient.txt) to the azimuth and plunge of the laboratory arrow (sample_azimuth and sample_dip in er_samples.txt). The two angles, mag_azimuth and field_dip are explained below. [1] Standard Pomeroy convention of azimuth and hade (degrees from vertical down) of the drill direction (field arrow). lab arrow azimuth= sample_azimuth = mag_azimuth; lab arrow dip = sample_dip =-field_dip. i.e. the lab arrow dip is minus the hade. [2] Field arrow is the strike of the plane orthogonal to the drill direction, Field dip is the hade of the drill direction. Lab arrow azimuth = mag_azimuth-90 Lab arrow dip = -field_dip [3] Lab arrow is the same as the drill direction; hade was measured in the field. Lab arrow azimuth = mag_azimuth; Lab arrow dip = 90-field_dip [4] lab azimuth and dip are same as mag_azimuth, field_dip : use this for unoriented samples too [5] Same as AZDIP convention explained below - azimuth and inclination of the drill direction are mag_azimuth and field_dip; lab arrow is as in [1] above. lab azimuth is same as mag_azimuth,lab arrow dip=field_dip-90 [6] Lab arrow azimuth = mag_azimuth-90; Lab arrow dip = 90-field_dip [7] all others you will have to either customize your self or e-mail [email protected] for help. Magnetic declination convention: [1] Use the IGRF value at the lat/long and date supplied [default] [2] Will supply declination correction [3] mag_az is already corrected in file [4] Correct mag_az but not bedding_dip_dir Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY NB: all others you will have to either customize your self or e-mail [email protected] for help. OUTPUT output saved in er_samples.txt and er_sites.txt - will overwrite any existing files """ # # initialize variables # stratpos="" args=sys.argv date,lat,lon="","","" # date of sampling, latitude (pos North), longitude (pos East) bed_dip,bed_dip_dir="","" participantlist="" Lats,Lons=[],[] # list of latitudes and longitudes SampOuts,SiteOuts,ImageOuts=[],[],[] # lists of Sample records and Site records samplelist,sitelist,imagelist=[],[],[] samp_con,Z,average_bedding,DecCorr="1",1,"0",0. newbaseline,newbeddir,newbeddip="","","" meths='' delta_u="0" sclass,lithology,type="","","" newclass,newlith,newtype='','','' user="" BPs=[]# bedding pole declinations, bedding pole inclinations # # dir_path,AddTo='.',0 if "-WD" in args: ind=args.index("-WD") dir_path=sys.argv[ind+1] orient_file,samp_file,or_con,corr = dir_path+"/orient.txt",dir_path+"/er_samples.txt","1","1" site_file=dir_path+"/er_sites.txt" image_file=dir_path+"/er_images.txt" SampRecs,SiteRecs,ImageRecs=[],[],[] if "-h" in args: print main.__doc__ sys.exit() if "-f" in args: ind=args.index("-f") orient_file=dir_path+'/'+sys.argv[ind+1] if "-Fsa" in args: ind=args.index("-Fsa") samp_file=dir_path+'/'+sys.argv[ind+1] if "-Fsi" in args: ind=args.index("-Fsi") site_file=dir_path+'/'+sys.argv[ind+1] if '-app' in args: AddTo=1 try: SampRecs,file_type=pmag.magic_read(samp_file) print 'sample data to be appended to: ', samp_file except: print 'problem with existing file: ',samp_file, ' will create new.' try: SiteRecs,file_type=pmag.magic_read(site_file) print 'site data to be appended to: ',site_file except: print 'problem with existing file: ',site_file,' will create new.' try: ImageRecs,file_type=pmag.magic_read(image_file) print 'image data to be appended to: ',image_file except: print 'problem with existing file: ',image_file,' will create new.' if "-ocn" in args: ind=args.index("-ocn") or_con=sys.argv[ind+1] if "-dcn" in args: ind=args.index("-dcn") corr=sys.argv[ind+1] if corr=="2": DecCorr=float(sys.argv[ind+2]) elif corr=="3": DecCorr=0. if '-BCN' in args: BedCorr=0 else: BedCorr=1 if "-ncn" in args: ind=args.index("-ncn") samp_con=sys.argv[ind+1] if "4" in samp_con: if "-" not in samp_con: print "option [4] must be in form 4-Z where Z is an integer" sys.exit() else: Z=samp_con.split("-")[1] samp_con="4" if "7" in samp_con: if "-" not in samp_con: print "option [7] must be in form 7-Z where Z is an integer" sys.exit() else: Z=samp_con.split("-")[1] samp_con="7" if "-gmt" in args: ind=args.index("-gmt") delta_u=(sys.argv[ind+1]) if "-mcd" in args: ind=args.index("-mcd") meths=(sys.argv[ind+1]) if "-a" in args: average_bedding="1" # # read in file to convert # OrData,location_name=pmag.magic_read(orient_file) # # step through the data sample by sample # for OrRec in OrData: if 'mag_azimuth' not in OrRec.keys():OrRec['mag_azimuth']="" if 'field_dip' not in OrRec.keys():OrRec['field_dip']="" if OrRec['mag_azimuth']==" ":OrRec["mag_azimuth"]="" if OrRec['field_dip']==" ":OrRec["field_dip"]="" if 'sample_description' in OrRec.keys(): sample_description=OrRec['sample_description'] else: sample_description="" if 'sample_igsn' in OrRec.keys(): sample_igsn=OrRec['sample_igsn'] else: sample_igsn="" if 'sample_texture' in OrRec.keys(): sample_texture=OrRec['sample_texture'] else: sample_texture="" if 'sample_cooling_rate' in OrRec.keys(): sample_cooling_rate=OrRec['sample_cooling_rate'] else: sample_cooling_rate="" if 'cooling_rate_corr' in OrRec.keys(): cooling_rate_corr=OrRec['cooling_rate_corr'] if 'cooling_rate_mcd' in OrRec.keys(): cooling_rate_mcd=OrRec['cooling_rate_mcd'] else: cooling_rate_mcd='DA-CR' else: cooling_rate_corr="" cooling_rate_mcd="" sample_orientation_flag='g' if 'sample_orientation_flag' in OrRec.keys(): if OrRec['sample_orientation_flag']=='b' or OrRec["mag_azimuth"]=="": sample_orientation_flag='b' methcodes=meths # initialize method codes if meths!='': if 'method_codes' in OrRec.keys() and OrRec['method_codes'].strip()!="":methcodes=methcodes+":"+OrRec['method_codes'] # add notes else: if 'method_codes' in OrRec.keys() and OrRec['method_codes'].strip()!="":methcodes=OrRec['method_codes'] # add notes codes=methcodes.replace(" ","").split(":") MagRec={} MagRec["er_location_name"]=location_name MagRec["er_citation_names"]="This study" MagRec['sample_orientation_flag']=sample_orientation_flag MagRec['sample_igsn']=sample_igsn MagRec['sample_texture']=sample_texture MagRec['sample_cooling_rate']=sample_cooling_rate MagRec['cooling_rate_corr']=cooling_rate_corr MagRec['cooling_rate_mcd']=cooling_rate_mcd # # parse information common to all orientation methods # MagRec["er_sample_name"]=OrRec["sample_name"] if "IGSN" in OrRec.keys(): MagRec["sample_igsn"]=OrRec["IGSN"] else: MagRec["sample_igsn"]="" MagRec["sample_height"],MagRec["sample_bed_dip_direction"],MagRec["sample_bed_dip"]="","","" if "er_sample_alternatives" in OrRec.keys():MagRec["er_sample_alternatives"]=OrRec["sample_alternatives"] sample=OrRec["sample_name"] if OrRec['mag_azimuth']=="" and OrRec['field_dip']!="": OrRec['mag_azimuth']='999' if OrRec["mag_azimuth"]!="": labaz,labdip=pmag.orient(float(OrRec["mag_azimuth"]),float(OrRec["field_dip"]),or_con) if labaz<0:labaz+=360. else: labaz,labdip="","" if OrRec['mag_azimuth']=='999':labaz="" if "GPS_baseline" in OrRec.keys() and OrRec['GPS_baseline']!="":newbaseline=OrRec["GPS_baseline"] if newbaseline!="":baseline=float(newbaseline) if 'participants' in OrRec.keys() and OrRec['participants']!="" and OrRec['participants']!=participantlist: participantlist=OrRec['participants'] MagRec['er_scientist_mail_names']=participantlist newlat=OrRec["lat"] if newlat!="":lat=float(newlat) if lat=="": print "No latitude specified for ! ",sample sys.exit() MagRec["sample_lat"]='%11.5f'%(lat) newlon=OrRec["long"] if newlon!="":lon=float(newlon) if lon=="": print "No longitude specified for ! ",sample sys.exit() MagRec["sample_lon"]='%11.5f'%(lon) if 'bedding_dip_direction' in OrRec.keys(): newbeddir=OrRec["bedding_dip_direction"] if newbeddir!="":bed_dip_dir=OrRec['bedding_dip_direction'] if 'bedding_dip' in OrRec.keys(): newbeddip=OrRec["bedding_dip"] if newbeddip!="":bed_dip=OrRec['bedding_dip'] MagRec["sample_bed_dip"]=bed_dip MagRec["sample_bed_dip_direction"]=bed_dip_dir if "sample_class" in OrRec.keys():newclass=OrRec["sample_class"] if newclass!="":sclass=newclass if sclass=="": sclass="Not Specified" MagRec["sample_class"]=sclass if "sample_lithology" in OrRec.keys():newlith=OrRec["sample_lithology"] if newlith!="":lithology=newlith if lithology=="": lithology="Not Specified" MagRec["sample_lithology"]=lithology if "sample_type" in OrRec.keys():newtype=OrRec["sample_type"] if newtype!="":type=newtype if type=="": type="Not Specified" MagRec["sample_type"]=type if labdip!="": MagRec["sample_dip"]='%7.1f'%labdip else: MagRec["sample_dip"]="" if "date" in OrRec.keys(): newdate=OrRec["date"] if newdate!="":date=newdate mmddyy=date.split('/') yy=int(mmddyy[2]) if yy>50: yy=1900+yy else: yy=2000+yy decimal_year=yy+float(mmddyy[0])/12 sample_date='%i:%s:%s'%(yy,mmddyy[0],mmddyy[1]) MagRec["sample_date"]=sample_date if labaz!="": MagRec["sample_azimuth"]='%7.1f'%(labaz) else: MagRec["sample_azimuth"]="" if "stratigraphic_height" in OrRec.keys(): if OrRec["stratigraphic_height"]!="": MagRec["sample_height"]=OrRec["stratigraphic_height"] stratpos=OrRec["stratigraphic_height"] elif OrRec["stratigraphic_height"]=='-1': MagRec["sample_height"]="" # make empty else: MagRec["sample_height"]=stratpos # keep last record if blank # if corr=="1" and MagRec['sample_azimuth']!="": # get magnetic declination (corrected with igrf value) x,y,z,f=pmag.doigrf(lon,lat,0,decimal_year) Dir=pmag.cart2dir( (x,y,z)) DecCorr=Dir[0] if "bedding_dip" in OrRec.keys(): if OrRec["bedding_dip"]!="": MagRec["sample_bed_dip"]=OrRec["bedding_dip"] bed_dip=OrRec["bedding_dip"] else: MagRec["sample_bed_dip"]=bed_dip else: MagRec["sample_bed_dip"]='0' if "bedding_dip_direction" in OrRec.keys(): if OrRec["bedding_dip_direction"]!="" and BedCorr==1: dd=float(OrRec["bedding_dip_direction"])+DecCorr if dd>360.:dd=dd-360. MagRec["sample_bed_dip_direction"]='%7.1f'%(dd) dip_dir=MagRec["sample_bed_dip_direction"] else: MagRec["sample_bed_dip_direction"]=OrRec['bedding_dip_direction'] else: MagRec["sample_bed_dip_direction"]='0' if average_bedding!="0": BPs.append([float(MagRec["sample_bed_dip_direction"]),float(MagRec["sample_bed_dip"])-90.,1.]) if MagRec['sample_azimuth']=="" and MagRec['sample_dip']=="": MagRec["sample_declination_correction"]='' methcodes=methcodes+':SO-NO' MagRec["magic_method_codes"]=methcodes MagRec['sample_description']=sample_description # # work on the site stuff too if 'site_name' in OrRec.keys(): site=OrRec['site_name'] else: site=pmag.parse_site(OrRec["sample_name"],samp_con,Z) # parse out the site name MagRec["er_site_name"]=site site_description="" # overwrite any prior description if 'site_description' in OrRec.keys() and OrRec['site_description']!="": site_description=OrRec['site_description'].replace(",",";") if "image_name" in OrRec.keys(): images=OrRec["image_name"].split(":") if "image_look" in OrRec.keys(): looks=OrRec['image_look'].split(":") else: looks=[] if "image_photographer" in OrRec.keys(): photographers=OrRec['image_photographer'].split(":") else: photographers=[] for image in images: if image !="" and image not in imagelist: imagelist.append(image) ImageRec={} ImageRec['er_image_name']=image ImageRec['image_type']="outcrop" ImageRec['image_date']=sample_date ImageRec['er_citation_names']="This study" ImageRec['er_location_name']=location_name ImageRec['er_site_name']=MagRec['er_site_name'] k=images.index(image) if len(looks)>k: ImageRec['er_image_description']="Look direction: "+looks[k] elif len(looks)>=1: ImageRec['er_image_description']="Look direction: "+looks[-1] else: ImageRec['er_image_description']="Look direction: unknown" if len(photographers)>k: ImageRec['er_photographer_mail_names']=photographers[k] elif len(photographers)>=1: ImageRec['er_photographer_mail_names']=photographers[-1] else: ImageRec['er_photographer_mail_names']="unknown" ImageOuts.append(ImageRec) if site not in sitelist: sitelist.append(site) # collect unique site names SiteRec={} SiteRec["er_site_name"]=site SiteRec["site_definition"]="s" SiteRec["er_location_name"]=location_name SiteRec["er_citation_names"]="This study" SiteRec["site_lat"]=MagRec["sample_lat"] SiteRec["site_lon"]=MagRec["sample_lon"] SiteRec["site_height"]=MagRec["sample_height"] SiteRec["site_class"]=MagRec["sample_class"] SiteRec["site_lithology"]=MagRec["sample_lithology"] SiteRec["site_type"]=MagRec["sample_type"] SiteRec["site_description"]=site_description SiteOuts.append(SiteRec) if sample not in samplelist: samplelist.append(sample) if MagRec['sample_azimuth']!="": # assume magnetic compass only MagRec['magic_method_codes']=MagRec['magic_method_codes']+':SO-MAG' MagRec['magic_method_codes']=MagRec['magic_method_codes'].strip(":") SampOuts.append(MagRec) if MagRec['sample_azimuth']!="" and corr!='3': az=labaz+DecCorr if az>360.:az=az-360. CMDRec={} for key in MagRec.keys(): CMDRec[key]=MagRec[key] # make a copy of MagRec CMDRec["sample_azimuth"]='%7.1f'%(az) CMDRec["magic_method_codes"]=methcodes+':SO-CMD-NORTH' CMDRec["magic_method_codes"]=CMDRec['magic_method_codes'].strip(':') CMDRec["sample_declination_correction"]='%7.1f'%(DecCorr) if corr=='1': CMDRec['sample_description']=sample_description+':Declination correction calculated from IGRF' else: CMDRec['sample_description']=sample_description+':Declination correction supplied by user' CMDRec["sample_description"]=CMDRec['sample_description'].strip(':') SampOuts.append(CMDRec) if "mag_az_bs" in OrRec.keys() and OrRec["mag_az_bs"] !="" and OrRec["mag_az_bs"]!=" ": SRec={} for key in MagRec.keys(): SRec[key]=MagRec[key] # make a copy of MagRec labaz=float(OrRec["mag_az_bs"]) az=labaz+DecCorr if az>360.:az=az-360. SRec["sample_azimuth"]='%7.1f'%(az) SRec["sample_declination_correction"]='%7.1f'%(DecCorr) SRec["magic_method_codes"]=methcodes+':SO-SIGHT-BACK:SO-CMD-NORTH' SampOuts.append(SRec) # # check for suncompass data # if "shadow_angle" in OrRec.keys() and OrRec["shadow_angle"]!="": # there are sun compass data if delta_u=="": delta_u=raw_input("Enter hours to SUBTRACT from time for GMT: [0] ") if delta_u=="":delta_u="0" SunRec,sundata={},{} shad_az=float(OrRec["shadow_angle"]) sundata["date"]='%i:%s:%s:%s'%(yy,mmddyy[0],mmddyy[1],OrRec["hhmm"]) # if eval(delta_u)<0: # MagRec["sample_time_zone"]='GMT'+delta_u+' hours' # else: # MagRec["sample_time_zone"]='GMT+'+delta_u+' hours' sundata["delta_u"]=delta_u sundata["lon"]='%7.1f'%(lon) sundata["lat"]='%7.1f'%(lat) sundata["shadow_angle"]=OrRec["shadow_angle"] sundec=pmag.dosundec(sundata) for key in MagRec.keys(): SunRec[key]=MagRec[key] # make a copy of MagRec SunRec["sample_azimuth"]='%7.1f'%(sundec) SunRec["sample_declination_correction"]='' SunRec["magic_method_codes"]=methcodes+':SO-SUN' SunRec["magic_method_codes"]=SunRec['magic_method_codes'].strip(':') SampOuts.append(SunRec) # # check for differential GPS data # if "prism_angle" in OrRec.keys() and OrRec["prism_angle"]!="": # there are diff GPS data GPSRec={} for key in MagRec.keys(): GPSRec[key]=MagRec[key] # make a copy of MagRec prism_angle=float(OrRec["prism_angle"]) laser_angle=float(OrRec["laser_angle"]) if OrRec["GPS_baseline"]!="": baseline=float(OrRec["GPS_baseline"]) # new baseline gps_dec=baseline+laser_angle+prism_angle-90. while gps_dec>360.: gps_dec=gps_dec-360. while gps_dec<0: gps_dec=gps_dec+360. for key in MagRec.keys(): GPSRec[key]=MagRec[key] # make a copy of MagRec GPSRec["sample_azimuth"]='%7.1f'%(gps_dec) GPSRec["sample_declination_correction"]='' GPSRec["magic_method_codes"]=methcodes+':SO-GPS-DIFF' SampOuts.append(GPSRec) if "GPS_Az" in OrRec.keys() and OrRec["GPS_Az"]!="": # there are differential GPS Azimuth data GPSRec={} for key in MagRec.keys(): GPSRec[key]=MagRec[key] # make a copy of MagRec GPSRec["sample_azimuth"]='%7.1f'%(float(OrRec["GPS_Az"])) GPSRec["sample_declination_correction"]='' GPSRec["magic_method_codes"]=methcodes+':SO-GPS-DIFF' SampOuts.append(GPSRec) if average_bedding!="0": fpars=pmag.fisher_mean(BPs) print 'over-writing all bedding with average ' Samps=[] for rec in SampOuts: if average_bedding!="0": rec['sample_bed_dip_direction']='%7.1f'%(fpars['dec']) rec['sample_bed_dip']='%7.1f'%(fpars['inc']+90.) Samps.append(rec) else: Samps.append(rec) for rec in SampRecs: if rec['er_sample_name'] not in samplelist: # overwrite prior for this sample Samps.append(rec) for rec in SiteRecs: if rec['er_site_name'] not in sitelist: # overwrite prior for this sample SiteOuts.append(rec) for rec in ImageRecs: if rec['er_image_name'] not in imagelist: # overwrite prior for this sample ImageOuts.append(rec) print 'saving data...' SampsOut,keys=pmag.fillkeys(Samps) Sites,keys=pmag.fillkeys(SiteOuts) pmag.magic_write(samp_file,SampsOut,"er_samples") pmag.magic_write(site_file,Sites,"er_sites") print "Data saved in ", samp_file,' and ',site_file if len(ImageOuts)>0: Images,keys=pmag.fillkeys(ImageOuts) pmag.magic_write(image_file,Images,"er_images") print "Image info saved in ",image_file
def dokent(data, NN): #From PmagPy (Tauxe et al., 2016) """ gets Kent parameters for data ([D,I],N) """ X, kpars = [], {} N = len(data) if N < 2: return kpars # # get fisher mean and convert to co-inclination (theta)/dec (phi) in radians # fpars = pmag.fisher_mean(data) pbar = fpars["dec"] * np.pi / 180. tbar = (90. - fpars["inc"]) * np.pi / 180. # # initialize matrices # H = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]] w = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]] b = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]] gam = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]] xg = [] # # set up rotation matrix H # H = [[ np.cos(tbar) * np.cos(pbar), -np.sin(pbar), np.sin(tbar) * np.cos(pbar) ], [ np.cos(tbar) * np.sin(pbar), np.cos(pbar), np.sin(pbar) * np.sin(tbar) ], [-np.sin(tbar), 0., np.cos(tbar)]] # # get cartesian coordinates of data # for rec in data: X.append(pmag.dir2cart([rec[0], rec[1], 1.])) # # put in T matrix # T = pmag.Tmatrix(X) for i in range(3): for j in range(3): T[i][j] = old_div(T[i][j], float(NN)) # # compute B=H'TH # for i in range(3): for j in range(3): for k in range(3): w[i][j] += T[i][k] * H[k][j] for i in range(3): for j in range(3): for k in range(3): b[i][j] += H[k][i] * w[k][j] # # choose a rotation w about North pole to diagonalize upper part of B # psi = 0.5 * np.arctan(2. * b[0][1] / (b[0][0] - b[1][1])) w = [[np.cos(psi), -np.sin(psi), 0], [np.sin(psi), np.cos(psi), 0], [0., 0., 1.]] for i in range(3): for j in range(3): gamtmp = 0. for k in range(3): gamtmp += H[i][k] * w[k][j] gam[i][j] = gamtmp for i in range(N): xg.append([0., 0., 0.]) for k in range(3): xgtmp = 0. for j in range(3): xgtmp += gam[j][k] * X[i][j] xg[i][k] = xgtmp # compute asymptotic ellipse parameters # xmu, sigma1, sigma2 = 0., 0., 0. for i in range(N): xmu += xg[i][2] sigma1 = sigma1 + xg[i][0]**2 sigma2 = sigma2 + xg[i][1]**2 xmu = old_div(xmu, float(N)) sigma1 = old_div(sigma1, float(N)) sigma2 = old_div(sigma2, float(N)) g = -2.0 * np.log(0.05) / (float(NN) * xmu**2) if np.sqrt(sigma1 * g) < 1: eta = np.arcsin(np.sqrt(sigma1 * g)) if np.sqrt(sigma2 * g) < 1: zeta = np.arcsin(np.sqrt(sigma2 * g)) if np.sqrt(sigma1 * g) >= 1.: eta = old_div(np.pi, 2.) if np.sqrt(sigma2 * g) >= 1.: zeta = old_div(np.pi, 2.) # # convert Kent parameters to directions,angles # kpars["dec"] = fpars["dec"] kpars["inc"] = fpars["inc"] kpars["n"] = NN ZDir = pmag.cart2dir([gam[0][1], gam[1][1], gam[2][1]]) EDir = pmag.cart2dir([gam[0][0], gam[1][0], gam[2][0]]) kpars["Zdec"] = ZDir[0] kpars["Zinc"] = ZDir[1] kpars["Edec"] = EDir[0] kpars["Einc"] = EDir[1] if kpars["Zinc"] < 0: kpars["Zinc"] = -kpars["Zinc"] kpars["Zdec"] = (kpars["Zdec"] + 180.) % 360. if kpars["Einc"] < 0: kpars["Einc"] = -kpars["Einc"] kpars["Edec"] = (kpars["Edec"] + 180.) % 360. kpars["Zeta"] = zeta * 180. / np.pi kpars["Eta"] = eta * 180. / np.pi return kpars
def main(): """ NAME watsonsF.py DESCRIPTION calculates Watson's F statistic from input files INPUT FORMAT takes dec/inc as first two columns in two space delimited files SYNTAX watsonsF.py [command line options] OPTIONS -h prints help message and quits -f FILE (with optional second) -f2 FILE (second file) -ant, flip antipodal directions in FILE to opposite direction OUTPUT Watson's F, critical value from F-tables for 2, 2(N-2) degrees of freedom """ D,D1,D2=[],[],[] Flip=0 if '-h' in sys.argv: # check if help is needed print main.__doc__ sys.exit() # graceful quit if '-ant' in sys.argv: Flip=1 if '-f' in sys.argv: ind=sys.argv.index('-f') file1=sys.argv[ind+1] if '-f2' in sys.argv: ind=sys.argv.index('-f2') file2=sys.argv[ind+1] f=open(file1,'rU') for line in f.readlines(): if '\t' in line: rec=line.split('\t') # split each line on space to get records else: rec=line.split() # split each line on space to get records Dec,Inc=float(rec[0]),float(rec[1]) D1.append([Dec,Inc,1.]) D.append([Dec,Inc,1.]) f.close() if Flip==0: f=open(file2,'rU') for line in f.readlines(): rec=line.split() Dec,Inc=float(rec[0]),float(rec[1]) D2.append([Dec,Inc,1.]) D.append([Dec,Inc,1.]) f.close() else: D1,D2=pmag.flip(D1) for d in D2: D.append(d) # # first calculate the fisher means and cartesian coordinates of each set of Directions # pars_0=pmag.fisher_mean(D) pars_1=pmag.fisher_mean(D1) pars_2=pmag.fisher_mean(D2) # # get F statistic for these # N= len(D) R=pars_0['r'] R1=pars_1['r'] R2=pars_2['r'] F=(N-2)*((R1+R2-R)/(N-R1-R2)) Fcrit=pmag.fcalc(2,2*(N-2)) print F,Fcrit
def common_dir_MM90(dir1,dir2,NumSims=5000,plot='no'): dir1['r']=get_R(dir1) dir2['r']=get_R(dir2) #largely based on iWatsonV routine of Swanson-Hyell in IPMag cart_1=pmag.dir2cart([dir1["dec"],dir1["inc"],dir1["r"]]) cart_2=pmag.dir2cart([dir2['dec'],dir2['inc'],dir2["r"]]) Sw=dir1['k']*dir1['r']+dir2['k']*dir2['r'] # k1*r1+k2*r2 xhat_1=dir1['k']*cart_1[0]+dir2['k']*cart_2[0] # k1*x1+k2*x2 xhat_2=dir1['k']*cart_1[1]+dir2['k']*cart_2[1] # k1*y1+k2*y2 xhat_3=dir1['k']*cart_1[2]+dir2['k']*cart_2[2] # k1*z1+k2*z2 Rw=np.sqrt(xhat_1**2+xhat_2**2+xhat_3**2) V=2*(Sw-Rw) # keep weighted sum for later when determining the "critical angle" # let's save it as Sr (notation of McFadden and McElhinny, 1990) Sr=Sw # do monte carlo simulation of datasets with same kappas as data, # but a common mean counter=0 Vp=[] # set of Vs from simulations for k in range(NumSims): # get a set of N1 fisher distributed vectors with k1, # calculate fisher stats Dirp=[] for i in range(int(dir1["n"])): Dirp.append(pmag.fshdev(dir1["k"])) pars_p1=pmag.fisher_mean(Dirp) # get a set of N2 fisher distributed vectors with k2, # calculate fisher stats Dirp=[] for i in range(int(dir2["n"])): Dirp.append(pmag.fshdev(dir2["k"])) pars_p2=pmag.fisher_mean(Dirp) # get the V for these Vk=pmag.vfunc(pars_p1,pars_p2) Vp.append(Vk) # sort the Vs, get Vcrit (95th percentile one) Vp.sort() k=int(.95*NumSims) Vcrit=Vp[k] # equation 18 of McFadden and McElhinny, 1990 calculates the critical # value of R (Rwc) Rwc=Sr-(Vcrit/2) # following equation 19 of McFadden and McElhinny (1990) the critical # angle is calculated. If the observed angle (also calculated below) # between the data set means exceeds the critical angle the hypothesis # of a common mean direction may be rejected at the 95% confidence # level. The critical angle is simply a different way to present # Watson's V parameter so it makes sense to use the Watson V parameter # in comparison with the critical value of V for considering the test # results. What calculating the critical angle allows for is the # classification of McFadden and McElhinny (1990) to be made # for data sets that are consistent with sharing a common mean. k1=dir1['k'] k2=dir2['k'] R1=dir1['r'] R2=dir2['r'] critical_angle=np.degrees(np.arccos(((Rwc**2)-((k1*R1)**2) -((k2*R2)**2))/ (2*k1*R1*k2*R2))) D1=(dir1['dec'],dir1['inc']) D2=(dir2['dec'],dir2['inc']) angle=pmag.angle(D1,D2) if V<Vcrit: outcome='Pass' if critical_angle<5: MM90class='A' elif critical_angle<10: MM90class='B' elif critical_angle<20: MM90class='C' else: MM90class='INDETERMINATE' else: outcome='Fail' MM90class='FAIL' result=pd.Series([outcome,V,Vcrit,angle[0],critical_angle,MM90class], index=['Outcome','VWatson','Vcrit','angle','critangle','MM90class']) if plot=='yes': CDF={'cdf':1} #pmagplotlib.plot_init(CDF['cdf'],5,5) p1 = pmagplotlib.plotCDF(CDF['cdf'],Vp,"Watson's V",'r',"") p2 = pmagplotlib.plotVs(CDF['cdf'],[V],'g','-') p3 = pmagplotlib.plotVs(CDF['cdf'],[Vp[k]],'b','--') pmagplotlib.drawFIGS(CDF) return result
def main(): """ NAME eqarea_magic.py DESCRIPTION makes equal area projections from declination/inclination data SYNTAX eqarea_magic.py [command line options] INPUT takes magic formatted pmag_results, pmag_sites, pmag_samples or pmag_specimens OPTIONS -h prints help message and quits -f FILE: specify input magic format file from magic,default='pmag_results.txt' supported types=[magic_measurements,pmag_specimens, pmag_samples, pmag_sites, pmag_results, magic_web] -obj OBJ: specify level of plot [all, sit, sam, spc], default is all -crd [s,g,t]: specify coordinate system, [s]pecimen, [g]eographic, [t]ilt adjusted default is geographic, unspecified assumed geographic -fmt [svg,png,jpg] format for output plots -ell [F,K,B,Be,Bv] plot Fisher, Kent, Bingham, Bootstrap ellipses or Boostrap eigenvectors -c plot as colour contour -sav save plot and quit quietly NOTE all: entire file; sit: site; sam: sample; spc: specimen """ FIG = {} # plot dictionary FIG['eqarea'] = 1 # eqarea is figure 1 in_file, plot_key, coord, crd = 'pmag_results.txt', 'all', "0", 'g' plotE, contour = 0, 0 dir_path = '.' fmt = 'svg' verbose = pmagplotlib.verbose if '-h' in sys.argv: print main.__doc__ sys.exit() if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path = sys.argv[ind + 1] pmagplotlib.plot_init(FIG['eqarea'], 5, 5) if '-f' in sys.argv: ind = sys.argv.index("-f") in_file = dir_path + "/" + sys.argv[ind + 1] if '-obj' in sys.argv: ind = sys.argv.index('-obj') plot_by = sys.argv[ind + 1] if plot_by == 'all': plot_key = 'all' if plot_by == 'sit': plot_key = 'er_site_name' if plot_by == 'sam': plot_key = 'er_sample_name' if plot_by == 'spc': plot_key = 'er_specimen_name' if '-c' in sys.argv: contour = 1 plots = 0 if '-sav' in sys.argv: plots = 1 verbose = 0 if '-ell' in sys.argv: plotE = 1 ind = sys.argv.index('-ell') ell_type = sys.argv[ind + 1] if ell_type == 'F': dist = 'F' if ell_type == 'K': dist = 'K' if ell_type == 'B': dist = 'B' if ell_type == 'Be': dist = 'BE' if ell_type == 'Bv': dist = 'BV' FIG['bdirs'] = 2 pmagplotlib.plot_init(FIG['bdirs'], 5, 5) if '-crd' in sys.argv: ind = sys.argv.index("-crd") crd = sys.argv[ind + 1] if crd == 's': coord = "-1" if crd == 'g': coord = "0" if crd == 't': coord = "100" if '-fmt' in sys.argv: ind = sys.argv.index("-fmt") fmt = sys.argv[ind + 1] Dec_keys = ['site_dec', 'sample_dec', 'specimen_dec', 'measurement_dec', 'average_dec', 'none'] Inc_keys = ['site_inc', 'sample_inc', 'specimen_inc', 'measurement_inc', 'average_inc', 'none'] Tilt_keys = ['tilt_correction', 'site_tilt_correction', 'sample_tilt_correction', 'specimen_tilt_correction', 'none'] Dir_type_keys = ['', 'site_direction_type', 'sample_direction_type', 'specimen_direction_type'] Name_keys = ['er_specimen_name', 'er_sample_name', 'er_site_name', 'pmag_result_name'] data, file_type = pmag.magic_read(in_file) if file_type == 'pmag_results' and plot_key != "all": plot_key = plot_key + 's' # need plural for results table if verbose: print len(data), ' records read from ', in_file # # # find desired dec,inc data: # dir_type_key = '' # # get plotlist if not plotting all records # plotlist = [] if plot_key != "all": plots = pmag.get_dictitem(data, plot_key, '', 'F') for rec in plots: if rec[plot_key] not in plotlist: plotlist.append(rec[plot_key]) plotlist.sort() else: plotlist.append('All') for plot in plotlist: if verbose: print plot DIblock = [] GCblock = [] SLblock, SPblock = [], [] title = plot mode = 1 dec_key, inc_key, tilt_key, name_key, k = "", "", "", "", 0 if plot != "All": odata = pmag.get_dictitem(data, plot_key, plot, 'T') else: odata = data # data for this obj for dec_key in Dec_keys: Decs = pmag.get_dictitem(odata, dec_key, '', 'F') # get all records with this dec_key not blank if len(Decs) > 0: break for inc_key in Inc_keys: Incs = pmag.get_dictitem(Decs, inc_key, '', 'F') # get all records with this inc_key not blank if len(Incs) > 0: break for tilt_key in Tilt_keys: if tilt_key in Incs[0].keys(): break # find the tilt_key for these records if tilt_key == 'none': # no tilt key in data, need to fix this with fake data which will be unknown tilt tilt_key = 'tilt_correction' for rec in Incs: rec[tilt_key] = '' cdata = pmag.get_dictitem(Incs, tilt_key, coord, 'T') # get all records matching specified coordinate system if coord == '0': # geographic udata = pmag.get_dictitem(Incs, tilt_key, '', 'T') # get all the blank records - assume geographic if len(cdata) == 0: crd = '' if len(udata) > 0: for d in udata: cdata.append(d) crd = crd + 'u' for name_key in Name_keys: Names = pmag.get_dictitem(cdata, name_key, '', 'F') # get all records with this name_key not blank if len(Names) > 0: break for dir_type_key in Dir_type_keys: Dirs = pmag.get_dictitem(cdata, dir_type_key, '', 'F') # get all records with this direction type if len(Dirs) > 0: break if dir_type_key == "": dir_type_key = 'direction_type' locations, site, sample, specimen = "", "", "", "" for rec in cdata: # pick out the data if 'er_location_name' in rec.keys() and rec['er_location_name'] != "" and rec[ 'er_location_name'] not in locations: locations = locations + rec['er_location_name'].replace("/", "") + "_" if 'er_location_names' in rec.keys() and rec['er_location_names'] != "": locs = rec['er_location_names'].split(':') for loc in locs: if loc not in locations: locations = locations + loc.replace("/", "") + '_' if plot_key == 'er_site_name' or plot_key == 'er_sample_name' or plot_key == 'er_specimen_name': site = rec['er_site_name'] if plot_key == 'er_sample_name' or plot_key == 'er_specimen_name': sample = rec['er_sample_name'] if plot_key == 'er_specimen_name': specimen = rec['er_specimen_name'] if plot_key == 'er_site_names' or plot_key == 'er_sample_names' or plot_key == 'er_specimen_names': site = rec['er_site_names'] if plot_key == 'er_sample_names' or plot_key == 'er_specimen_names': sample = rec['er_sample_names'] if plot_key == 'er_specimen_names': specimen = rec['er_specimen_names'] if dir_type_key not in rec.keys() or rec[dir_type_key] == "": rec[dir_type_key] = 'l' if 'magic_method_codes' not in rec.keys(): rec['magic_method_codes'] = "" DIblock.append([float(rec[dec_key]), float(rec[inc_key])]) SLblock.append([rec[name_key], rec['magic_method_codes']]) if rec[tilt_key] == coord and rec[dir_type_key] != 'l' and rec[dec_key] != "" and rec[inc_key] != "": GCblock.append([float(rec[dec_key]), float(rec[inc_key])]) SPblock.append([rec[name_key], rec['magic_method_codes']]) if len(DIblock) == 0 and len(GCblock) == 0: if verbose: print "no records for plotting" sys.exit() if verbose: for k in range(len(SLblock)): print '%s %s %7.1f %7.1f' % (SLblock[k][0], SLblock[k][1], DIblock[k][0], DIblock[k][1]) for k in range(len(SPblock)): print '%s %s %7.1f %7.1f' % (SPblock[k][0], SPblock[k][1], GCblock[k][0], GCblock[k][1]) if len(DIblock) > 0: if contour == 0: pmagplotlib.plotEQ(FIG['eqarea'], DIblock, title) else: pmagplotlib.plotEQcont(FIG['eqarea'], DIblock) else: pmagplotlib.plotNET(FIG['eqarea']) if len(GCblock) > 0: for rec in GCblock: pmagplotlib.plotC(FIG['eqarea'], rec, 90., 'g') if plotE == 1: ppars = pmag.doprinc(DIblock) # get principal directions nDIs, rDIs, npars, rpars = [], [], [], [] for rec in DIblock: angle = pmag.angle([rec[0], rec[1]], [ppars['dec'], ppars['inc']]) if angle > 90.: rDIs.append(rec) else: nDIs.append(rec) if dist == 'B': # do on whole dataset etitle = "Bingham confidence ellipse" bpars = pmag.dobingham(DIblock) for key in bpars.keys(): if key != 'n' and verbose: print " ", key, '%7.1f' % (bpars[key]) if key == 'n' and verbose: print " ", key, ' %i' % (bpars[key]) npars.append(bpars['dec']) npars.append(bpars['inc']) npars.append(bpars['Zeta']) npars.append(bpars['Zdec']) npars.append(bpars['Zinc']) npars.append(bpars['Eta']) npars.append(bpars['Edec']) npars.append(bpars['Einc']) if dist == 'F': etitle = "Fisher confidence cone" if len(nDIs) > 2: fpars = pmag.fisher_mean(nDIs) for key in fpars.keys(): if key != 'n' and verbose: print " ", key, '%7.1f' % (fpars[key]) if key == 'n' and verbose: print " ", key, ' %i' % (fpars[key]) mode += 1 npars.append(fpars['dec']) npars.append(fpars['inc']) npars.append(fpars['alpha95']) # Beta npars.append(fpars['dec']) isign = abs(fpars['inc']) / fpars['inc'] npars.append(fpars['inc'] - isign * 90.) #Beta inc npars.append(fpars['alpha95']) # gamma npars.append(fpars['dec'] + 90.) # Beta dec npars.append(0.) #Beta inc if len(rDIs) > 2: fpars = pmag.fisher_mean(rDIs) if verbose: print "mode ", mode for key in fpars.keys(): if key != 'n' and verbose: print " ", key, '%7.1f' % (fpars[key]) if key == 'n' and verbose: print " ", key, ' %i' % (fpars[key]) mode += 1 rpars.append(fpars['dec']) rpars.append(fpars['inc']) rpars.append(fpars['alpha95']) # Beta rpars.append(fpars['dec']) isign = abs(fpars['inc']) / fpars['inc'] rpars.append(fpars['inc'] - isign * 90.) #Beta inc rpars.append(fpars['alpha95']) # gamma rpars.append(fpars['dec'] + 90.) # Beta dec rpars.append(0.) #Beta inc if dist == 'K': etitle = "Kent confidence ellipse" if len(nDIs) > 3: kpars = pmag.dokent(nDIs, len(nDIs)) if verbose: print "mode ", mode for key in kpars.keys(): if key != 'n' and verbose: print " ", key, '%7.1f' % (kpars[key]) if key == 'n' and verbose: print " ", key, ' %i' % (kpars[key]) mode += 1 npars.append(kpars['dec']) npars.append(kpars['inc']) npars.append(kpars['Zeta']) npars.append(kpars['Zdec']) npars.append(kpars['Zinc']) npars.append(kpars['Eta']) npars.append(kpars['Edec']) npars.append(kpars['Einc']) if len(rDIs) > 3: kpars = pmag.dokent(rDIs, len(rDIs)) if verbose: print "mode ", mode for key in kpars.keys(): if key != 'n' and verbose: print " ", key, '%7.1f' % (kpars[key]) if key == 'n' and verbose: print " ", key, ' %i' % (kpars[key]) mode += 1 rpars.append(kpars['dec']) rpars.append(kpars['inc']) rpars.append(kpars['Zeta']) rpars.append(kpars['Zdec']) rpars.append(kpars['Zinc']) rpars.append(kpars['Eta']) rpars.append(kpars['Edec']) rpars.append(kpars['Einc']) else: # assume bootstrap if dist == 'BE': if len(nDIs) > 5: BnDIs = pmag.di_boot(nDIs) Bkpars = pmag.dokent(BnDIs, 1.) if verbose: print "mode ", mode for key in Bkpars.keys(): if key != 'n' and verbose: print " ", key, '%7.1f' % (Bkpars[key]) if key == 'n' and verbose: print " ", key, ' %i' % (Bkpars[key]) mode += 1 npars.append(Bkpars['dec']) npars.append(Bkpars['inc']) npars.append(Bkpars['Zeta']) npars.append(Bkpars['Zdec']) npars.append(Bkpars['Zinc']) npars.append(Bkpars['Eta']) npars.append(Bkpars['Edec']) npars.append(Bkpars['Einc']) if len(rDIs) > 5: BrDIs = pmag.di_boot(rDIs) Bkpars = pmag.dokent(BrDIs, 1.) if verbose: print "mode ", mode for key in Bkpars.keys(): if key != 'n' and verbose: print " ", key, '%7.1f' % (Bkpars[key]) if key == 'n' and verbose: print " ", key, ' %i' % (Bkpars[key]) mode += 1 rpars.append(Bkpars['dec']) rpars.append(Bkpars['inc']) rpars.append(Bkpars['Zeta']) rpars.append(Bkpars['Zdec']) rpars.append(Bkpars['Zinc']) rpars.append(Bkpars['Eta']) rpars.append(Bkpars['Edec']) rpars.append(Bkpars['Einc']) etitle = "Bootstrapped confidence ellipse" elif dist == 'BV': sym = {'lower': ['o', 'c'], 'upper': ['o', 'g'], 'size': 3, 'edgecolor': 'face'} if len(nDIs) > 5: BnDIs = pmag.di_boot(nDIs) pmagplotlib.plotEQsym(FIG['bdirs'], BnDIs, 'Bootstrapped Eigenvectors', sym) if len(rDIs) > 5: BrDIs = pmag.di_boot(rDIs) if len(nDIs) > 5: # plot on existing plots pmagplotlib.plotDIsym(FIG['bdirs'], BrDIs, sym) else: pmagplotlib.plotEQ(FIG['bdirs'], BrDIs, 'Bootstrapped Eigenvectors') if dist == 'B': if len(nDIs) > 3 or len(rDIs) > 3: pmagplotlib.plotCONF(FIG['eqarea'], etitle, [], npars, 0) elif len(nDIs) > 3 and dist != 'BV': pmagplotlib.plotCONF(FIG['eqarea'], etitle, [], npars, 0) if len(rDIs) > 3: pmagplotlib.plotCONF(FIG['eqarea'], etitle, [], rpars, 0) elif len(rDIs) > 3 and dist != 'BV': pmagplotlib.plotCONF(FIG['eqarea'], etitle, [], rpars, 0) if verbose: pmagplotlib.drawFIGS(FIG) # files = {} locations = locations[:-1] for key in FIG.keys(): filename = 'LO:_' + locations + '_SI:_' + site + '_SA:_' + sample + '_SP:_' + specimen + '_CO:_' + crd + '_TY:_' + key + '_.' + fmt files[key] = filename if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles = {} titles['eq'] = 'Equal Area Plot' FIG = pmagplotlib.addBorders(FIG, titles, black, purple) pmagplotlib.saveP(FIG, files) elif verbose: ans = raw_input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans == "q": sys.exit() if ans == "a": pmagplotlib.saveP(FIG, files) if plots: pmagplotlib.saveP(FIG, files)
def sb_vgp_calc(dataframe): """ This function calculates the angular dispersion of VGPs and corrects for within site dispersion to return a value S_b. The input data needs to be within a pandas Dataframe. The function also plots the VGPs and their associated Fisher mean on a projection centered on the mean. Parameters ----------- dataframe : the name of the pandas.DataFrame containing the data the data frame needs to contain these columns: dataframe['site_lat'] : latitude of the site dataframe['site_lon'] : longitude of the site ---- changed to site_long so that it works for a given DF dataframe['inc_tc'] : tilt-corrected inclination dataframe['dec_tc'] : tilt-corrected declination dataframe['k'] : fisher precision parameter for directions dataframe['vgp_lat'] : VGP latitude ---- changed to pole_lat so that it works for a given DF dataframe['vgp_lon'] : VGP longitude ---- changed to pole_long so that it works for a given DF """ # calculate the mean from the directional data dataframe_dirs = [] for n in range(0, len(dataframe)): dataframe_dirs.append( [dataframe['dec_tc'][n], dataframe['inc_tc'][n], 1.]) dataframe_dir_mean = pmag.fisher_mean(dataframe_dirs) # calculate the mean from the vgp data dataframe_poles = [] dataframe_pole_lats = [] dataframe_pole_lons = [] for n in range(0, len(dataframe)): dataframe_poles.append( [dataframe['pole_long'][n], dataframe['pole_lat'][n], 1.]) dataframe_pole_lats.append(dataframe['pole_lat'][n]) dataframe_pole_lons.append(dataframe['pole_long'][n]) dataframe_pole_mean = pmag.fisher_mean(dataframe_poles) # calculate mean paleolatitude from the directional data dataframe['paleolatitude'] = lat_from_inc(dataframe_dir_mean['inc']) angle_list = [] for n in range(0, len(dataframe)): angle = pmag.angle( [dataframe['pole_long'][n], dataframe['pole_lat'][n]], [dataframe_pole_mean['dec'], dataframe_pole_mean['inc']]) angle_list.append(angle[0]) dataframe['delta_mean-pole'] = angle_list # use eq. 2 of Cox (1970) to translate the directional precision parameter # into pole coordinates using the assumption of a Fisherian distribution in # directional coordinates and the paleolatitude as calculated from mean # inclination using the dipole equation dataframe['K'] = dataframe['k'] / ( 0.125 * (5 + 18 * np.sin(np.deg2rad(dataframe['paleolatitude']))**2 + 9 * np.sin(np.deg2rad(dataframe['paleolatitude']))**4)) dataframe['Sw'] = 81 / (dataframe['K']**0.5) summation = 0 N = 0 for n in range(0, len(dataframe)): quantity = dataframe['delta_mean-pole'][ n]**2 - dataframe['Sw'][n]**2 / np.float(dataframe['n'][n]) summation += quantity N += 1 Sb = ((1.0 / (N - 1.0)) * summation)**0.5 plt.figure(figsize=(6, 6)) m = Basemap(projection='ortho', lat_0=dataframe_pole_mean['inc'], lon_0=dataframe_pole_mean['dec'], resolution='c', area_thresh=50000) m.drawcoastlines(linewidth=0.25) m.fillcontinents(color='bisque', lake_color='white', zorder=1) m.drawmapboundary(fill_color='white') m.drawmeridians(np.arange(0, 360, 30)) m.drawparallels(np.arange(-90, 90, 30)) plot_vgp(m, dataframe_pole_lons, dataframe_pole_lats, dataframe_pole_lons) plot_pole(m, dataframe_pole_mean['dec'], dataframe_pole_mean['inc'], dataframe_pole_mean['alpha95'], color='r', marker='s') return Sb
def watson_common_mean(Data1, Data2, NumSims=5000, plot='no'): """ Conduct a Watson V test for a common mean on two declination, inclination data sets This function calculates Watson's V statistic from input files through Monte Carlo simulation in order to test whether two populations of directional data could have been drawn from a common mean. The critical angle between the two sample mean directions and the corresponding McFadden and McElhinny (1990) classification is printed. Required Arguments ---------- Data1 : a list of directional data [dec,inc] Data2 : a list of directional data [dec,inc] Optional Arguments ---------- NumSims : number of Monte Carlo simulations (default is 5000) plot : the default is no plot ('no'). Putting 'yes' will the plot the CDF from the Monte Carlo simulations. """ pars_1 = pmag.fisher_mean(Data1) pars_2 = pmag.fisher_mean(Data2) cart_1 = pmag.dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]]) cart_2 = pmag.dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]]) Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2 xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2 xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2 xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2 Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2) V = 2 * (Sw - Rw) # keep weighted sum for later when determining the "critical angle" # let's save it as Sr (notation of McFadden and McElhinny, 1990) Sr = Sw # do monte carlo simulation of datasets with same kappas as data, # but a common mean counter = 0 Vp = [] # set of Vs from simulations for k in range(NumSims): # get a set of N1 fisher distributed vectors with k1, # calculate fisher stats Dirp = [] for i in range(pars_1["n"]): Dirp.append(pmag.fshdev(pars_1["k"])) pars_p1 = pmag.fisher_mean(Dirp) # get a set of N2 fisher distributed vectors with k2, # calculate fisher stats Dirp = [] for i in range(pars_2["n"]): Dirp.append(pmag.fshdev(pars_2["k"])) pars_p2 = pmag.fisher_mean(Dirp) # get the V for these Vk = pmag.vfunc(pars_p1, pars_p2) Vp.append(Vk) # sort the Vs, get Vcrit (95th percentile one) Vp.sort() k = int(.95 * NumSims) Vcrit = Vp[k] # equation 18 of McFadden and McElhinny, 1990 calculates the critical # value of R (Rwc) Rwc = Sr - (Vcrit / 2) # following equation 19 of McFadden and McElhinny (1990) the critical # angle is calculated. If the observed angle (also calculated below) # between the data set means exceeds the critical angle the hypothesis # of a common mean direction may be rejected at the 95% confidence # level. The critical angle is simply a different way to present # Watson's V parameter so it makes sense to use the Watson V parameter # in comparison with the critical value of V for considering the test # results. What calculating the critical angle allows for is the # classification of McFadden and McElhinny (1990) to be made # for data sets that are consistent with sharing a common mean. k1 = pars_1['k'] k2 = pars_2['k'] R1 = pars_1['r'] R2 = pars_2['r'] critical_angle = np.degrees( np.arccos(((Rwc**2) - ((k1 * R1)**2) - ((k2 * R2)**2)) / (2 * k1 * R1 * k2 * R2))) D1 = (pars_1['dec'], pars_1['inc']) D2 = (pars_2['dec'], pars_2['inc']) angle = pmag.angle(D1, D2) print "Results of Watson V test: " print "" print "Watson's V: " '%.1f' % (V) print "Critical value of V: " '%.1f' % (Vcrit) if V < Vcrit: print '"Pass": Since V is less than Vcrit, the null hypothesis' print 'that the two populations are drawn from distributions' print 'that share a common mean direction can not be rejected.' elif V > Vcrit: print '"Fail": Since V is greater than Vcrit, the two means can' print 'be distinguished at the 95% confidence level.' print "" print "M&M1990 classification:" print "" print "Angle between data set means: " '%.1f' % (angle) print "Critical angle for M&M1990: " '%.1f' % (critical_angle) if V > Vcrit: print "" elif V < Vcrit: if critical_angle < 5: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'A'" elif critical_angle < 10: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'B'" elif critical_angle < 20: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'C'" else: print "The McFadden and McElhinny (1990) classification for" print "this test is: 'INDETERMINATE;" if plot == 'yes': CDF = {'cdf': 1} #pmagplotlib.plot_init(CDF['cdf'],5,5) p1 = pmagplotlib.plotCDF(CDF['cdf'], Vp, "Watson's V", 'r', "") p2 = pmagplotlib.plotVs(CDF['cdf'], [V], 'g', '-') p3 = pmagplotlib.plotVs(CDF['cdf'], [Vp[k]], 'b', '--') pmagplotlib.drawFIGS(CDF)
#!/usr/bin/env python import pmag, matplotlib, math matplotlib.use("TkAgg") import pylab k = 29.2 Dels, A95s, CSDs, Ns = [], [], [], range(4, 31) DIs = [] for i in range(31): dec, inc = pmag.fshdev(k) DIs.append([dec, inc]) pars = pmag.fisher_mean(DIs) pDIs = [] for i in range(3): pDIs.append(DIs[i]) for n in Ns: pDIs.append(DIs[n]) fpars = pmag.fisher_mean(pDIs) A95s.append(fpars['alpha95']) CSDs.append(fpars['csd']) Dels.append((180. / math.pi) * math.acos(fpars['r'] / float(n))) pylab.plot(Ns, A95s, 'ro') pylab.plot(Ns, A95s, 'r-') pylab.plot(Ns, CSDs, 'bs') pylab.plot(Ns, CSDs, 'b-') pylab.plot(Ns, Dels, 'g^') pylab.plot(Ns, Dels, 'g-') pylab.axhline(pars['csd'], color='k') pylab.show() raw_input()