def snr_mat_f(mchvec, reds, lum_dist, fmin, fmax, fvec, finteg, tobs, sn_f): '''''' mch_fmat=np.transpose(np.tile(mchvec, (len(reds), len(fvec), finteg, 1) ), axes=(0,3,1,2)) z_fmat=np.transpose(np.tile(reds, (len(mchvec), len(fvec), finteg, 1) ),axes=(3,0,1,2)) f_fmat=np.transpose(np.tile(fvec, (len(reds), len(mchvec), finteg, 1) ), axes=(0,1,3,2)) finteg_fmat=np.transpose(np.tile(np.arange(finteg), (len(reds), len(mchvec), len(fvec), 1) ), axes=(0,1,2,3)) stshape=np.shape(z_fmat) #Standard shape of all matrices that I will use. DL_fmat=np.transpose(np.tile(lum_dist, (len(mchvec), len(fvec), finteg, 1) ),axes=(3,0,1,2)) #Luminosity distance in Mpc. flim_fmat=A8.f_cut(1./4., 2.*mch_fmat*2.**(1./5.))*1./(1.+z_fmat) #The symmetric mass ratio is 1/4, since I assume equal masses. flim_det=np.maximum(np.minimum(fmax, flim_fmat), fmin) #The isco frequency limited to the detector window. tlim_fmat=CM.tafter(mch_fmat, f_fmat, flim_fmat, z_fmat) #By construction, f_mat cannot be smaller than fmin or larger than fmax (which are the limits imposed by the detector). fmin_fmat=np.minimum(f_fmat, flim_det) #I impose that the minimum frequency cannot be larger than the fisco. fmaxobs_fmat=flim_det.copy() #fmaxobs_fmat=fmin_fmat.copy() fmaxobs_fmat[tobs<tlim_fmat]=CM.fafter(mch_fmat[tobs<tlim_fmat], z_fmat[tobs<tlim_fmat], f_fmat[tobs<tlim_fmat], tobs) fmax_fmat=np.minimum(fmaxobs_fmat, flim_det) #The maximum frequency (after an observation tobs) cannot exceed fisco or the maximum frequency of the detector. integconst=(np.log10(fmax_fmat)-np.log10(fmin_fmat))*1./(finteg-1) finteg_fmat=fmin_fmat*10**(integconst*finteg_fmat) sn_vec=sn_f(fvec)########## sn_fmat=sn_f(finteg_fmat) #Noise spectral density. #htilde_fmat=A8.htilde_f(1./4., 2.*mch_fmat*2**(1./5.), z_fmat, DL_fmat, f_fmat) htilde_fmat=A8.htilde_f(1./4., 2.*mch_fmat*2**(1./5.), z_fmat, DL_fmat, finteg_fmat) py.loglog(finteg_fmat[0,0,:,0],htilde_fmat[0,0,:,0]**2.) py.loglog(finteg_fmat[0,0,:,0],sn_fmat[0,0,:,0]) snrsq_int_fmat=4.*htilde_fmat**2./sn_fmat #Integrand of the S/N square. snrsq_int_m_fmat=0.5*(snrsq_int_fmat[:,:,:,1:]+snrsq_int_fmat[:,:,:,:-1]) #Integrand at the arithmetic mean of the infinitesimal intervals. df_fmat=np.diff(finteg_fmat, axis=3) #Infinitesimal intervals. snr_full_fmat=np.sqrt(np.sum(snrsq_int_m_fmat*df_fmat,axis=3)) #S/N as a function of redshift, mass and frequency. fopt=fvec[np.argmax(snr_full_fmat, axis=2)] #Frequency at which the S/N is maximum, for each pixel of redshift and mass. snr_opt=np.amax(snr_full_fmat, axis=2) #Maximum S/N at each pixel of redshift and mass. snr_min=snr_full_fmat[:,:,0] return snr_opt
def simulate_model(self): # simulate the model # run simulation and determine the temp_in and temp_mat self.__dict__.update(COMMON.simulate_model(**self.__dict__)) # update data for peers and history records self.__dict__.update(COMMON.broadcast(**self.__dict__)) # update counter, which counts how long the device is at a certain status self.__dict__.update(COMMON.adjust_counter(**self.__dict__)) return self.temp_in, self.temp_mat
def decide(self): # decide to approve or deny the proposed status # get device priority self.__dict__.update(COMMON.adjust_priority(**self.__dict__)) # interpret ldc_signal self.__dict__.update(COMMON.interpret_signal(**self.__dict__)) # adjust local limit self.__dict__.update(COMMON.adjust_limit(**self.__dict__)) # save old status self.old_status = self.a_status # save old status # decide on the next status self.__dict__.update(COMMON.get_a_status(**self.__dict__)) return self.a_status
def get_gridNode(self): """ 获取机器或节点 :return: """ filepath = os.getcwd() fig = FIG.ReadConfig(filepath) Host = fig.getConfigValue('Host') driver = CM.get_girdDriver(Host) return driver
def htilde_f(nu, mtot, z, lumdist, fvec): '''''' mch=nu**(3./5.)*mtot fmer=A8.f_mer(nu, mtot)*1./(1.+z) frin=A8.f_rin(nu, mtot)*1./(1.+z) fcut=A8.f_cut(nu, mtot)*1./(1.+z) fsig=A8.f_sig(nu, mtot)*1./(1.+z) camp=CM.htilde_f(mch, z, lumdist, fmer) aeff=np.zeros(np.shape(fvec)) selecti=(fvec<fmer) aeff[selecti]+=A8.aeff_low(fvec[selecti], fmer, camp) selecti=(fvec>=fmer)&(fvec<frin) aeff[selecti]+=A8.aeff_mid(fvec[selecti], fmer, camp) selecti=(fvec>=frin)&(fvec<fcut) aeff[selecti]+=A8.aeff_upp(fvec[selecti], fmer, frin, fsig, camp) return aeff
import COMMON class Solution: # @param root, a tree node # @param sum, an integer # @return a boolean def hasPathSum(self, root, sum): if root==None: return False; if root.left==None and root.right==None and root.val==sum: return True; if (self.hasPathSum(root.left,sum-root.val)): return True; if (self.hasPathSum(root.right,sum-root.val)): return True; return False; s=Solution(); print s.hasPathSum(COMMON.build_tree("{5,4,8,11,#,13,4,7,2,#,#,#,1}"),22); print s.hasPathSum(COMMON.build_tree("{5,4,8,11,#,13,4,7,2,#,#,#,1}"),21);
def Keyboard(key, x, y): global TOGGLE_FULLSCREEN,TOGGLE_LIGHTING,TOGGLE_GRID,TOGGLE_WIREFRAME,TOGGLE_BONES,TOGGLE_3D,TOGGLE_ORTHO global _mode,DIR global __MODEL_DATA,__BONE_DATA #//--// need a GUI handler for these if key == chr(9): #import model typenames,modules,modnames,ihandlers,decmpr = [],[],[],[],[]; iftypes,isupport = [],[] for M,D,I in COMMON.__Scripts[0][0]: #get model import scripts if D[1] != ('',['']): #script has model info (not sure if it's safe to remove this yet) iftypes+=[(D[1][0],tuple(["*.%s"%T for T in D[1][1]]))] for T in D[1][1]: try: isupport.index("*.%s"%T) #is this file type already supported? except: isupport+=["*.%s"%T] #add the current file type to the supported types list modnames+=[D[1][0]] #displayed in the GUI or Tk fiter typenames+=[T] #filetype modules+=[M] #current script ihandlers+=[I] #included image handlers #----- Tkinter dialog (will be replaced) _in=askopenfilename(title='Import Model', filetypes=[('Supported', " ".join(isupport))]+iftypes) #----- if _in=='': pass #action cancelled else: COMMON.__functions=[0,0,0,0] #prevent unwanted initialization #this block will change once I use my own dialog #Tkinter doesn't return the filter ID #----- it = _in.split('.')[-1] if typenames.count(it)>1: print '\nThis filetype is used by multiple scripts:\n' scr = [] for idx,ft in enumerate(typenames): if ft==it: scr+=[[modnames[idx],modules[idx]]] for I,NM in enumerate(scr): print ' %i - %s'%(I,NM[0]) print sid=input('Please enter the script ID here: ') i=__import__(scr[sid][1]) else: ti=typenames.index(it) i=__import__(modules[ti]) COMMON.__ReloadScripts() #check for valid changes to the scripts #----- try: #can we get our hands on the file? COMMON.ImportFile(_in,1) #set the file data global Libs; __Libs=Libs #remember last session in case of a script error Libs=[[],[],[],[],[["Def_Scene",[]]],[]] #reset the data for importing print 'Converting from import format...' try: #does the script contain any unfound errors? __LOG('-- importing %s --\n'%_in.split('/')[-1]) i.ImportModel(it,None) print 'Verifying data...' glNewList(__MODEL_DATA, GL_COMPILE); __M(); glEndList() glNewList(__BONE_DATA, GL_COMPILE); __B(); glEndList() print 'Updating Viewer\n' glutSetWindowTitle("Universal Model Converter v3.0a (dev5) - %s" % _in.split('/')[-1]) #export UMC session data l=open('session.ses','w') l.write(str([1,Libs])) l.close() COMMON.__ClearFiles() #clear the file data to be used for writing except: Libs=__Libs print "Error! Check 'session-info.log' for more details.\n" import traceback typ,val,tb=sys.exc_info()#;tb=traceback.extract_tb(i[2])[0] traceback.print_exception( typ,val,tb#, #limit=2, #file=sys.stdout ) print __Libs=[] #save memory usage except: pass #an error should already be thrown __WLOG(0) #write log COMMON.__CleanScripts() #remove pyc files if key == chr(5): #export model COMMON.__ClearFiles() #clear the file data again... just in case etypenames,emodules,emodnames,ehandlers = [],[],[],[]; eftypes = [] for M,D,I in COMMON.__Scripts[0][1]: if D[1] != ('',['']): #has model info eftypes+=[(D[1][0],tuple(["*.%s"%T for T in D[1][1]]))] for T in D[1][1]: emodnames+=[D[1][0]] etypenames+=[T] emodules+=[M] ehandlers+=[I] #Tkinter dialog (will be replaced) #----- _en=asksaveasfilename(title='Export Model', filetypes=eftypes, defaultextension='.ses') #----- if _en=='': pass else: COMMON.__functions=[0,0,0,0] #prevent unwanted initialization #this block will change once I use my own dialog #Tkinter doesn't return the filter ID #----- et = _en.split('.')[-1] if etypenames.count(et)>1: print '\nThis filetype is used by multiple scripts:\n' scr = [] for idx,ft in enumerate(etypenames): if ft==et: scr+=[[emodnames[idx],emodules[idx]]] for I,NM in enumerate(scr): print ' %i - %s'%(I,NM[0]) print sid=input('Please enter the script ID here: ') e=__import__(scr[sid][1]) else: e=__import__(emodules[etypenames.index(et)]) COMMON.__ReloadScripts() #check for valid changes to the scripts #----- ''' try: COMMON.ExportFile(_en) #add the file to the data space print 'converting to export format...' e.ExportModel(et,None) COMMON.__WriteFiles() print 'Done!' except: print "Error! Check 'session-info.log' for details.\n" ''' COMMON.ExportFile(_en) #add the file to the data space print 'converting to export format...' e.ExportModel(et,None) COMMON.__WriteFiles() print 'Refreshing Viewer\n' #''' __WLOG(_mode) #write log COMMON.__CleanScripts() #remove pyc files
def propose_demand(self): # This function proposes the demand and status of the device for the next time step try: # determine if the device is connected # update n_usage... used as index for list_starts, and list_ends (the schedule) self.__dict__.update(COMMON.get_n_usage(**self.__dict__)) # get the data about when the device should start and end self.__dict__.update(COMMON.get_hour(**self.__dict__)) # convert schedules from hours of the day to unix self.__dict__.update(COMMON.get_unix(**self.__dict__)) # based on unix_start and unix_end determine if the device is connected at current unixtime self.__dict__.update(COMMON.is_connected(**self.__dict__)) # determine device status # get flexibility self.__dict__.update(COMMON.get_flexibility(**self.__dict__)) # get state of charge self.__dict__.update(COMMON.get_soc(**self.__dict__)) # get job status self.__dict__.update(COMMON.get_job_status(**self.__dict__)) # determine mode of the device self.__dict__.update(COMMON.get_mode(**self.__dict__)) # propose a status for the device (i.e., on or off) self.__dict__.update(COMMON.get_p_status(**self.__dict__)) # determine device demand for this timestep # propose a power demand of the device self.__dict__.update(COMMON.get_p_demand(**self.__dict__)) # determine ramping and shedding potential # determine if can ramp or can shed self.__dict__.update(COMMON.check_ramp_shed(**self.__dict__)) # calculate ramping power self.__dict__.update(COMMON.get_ramp_power(**self.__dict__)) # calculate shedding power self.__dict__.update(COMMON.get_shed_power(**self.__dict__)) except Exception as e: print("Error propose_demand:", e) return 0
import pylab as py from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv import COMMON as CM import USEFUL as UF from matplotlib import colors from scipy import integrate, interpolate import Formulas_AjithEtAl2008 as A8 #Input parameters: from INPUT_PARAMETERS import * outputdir='../data/output/' if cluster=='1': outputdir='/lustre/projects/p002_swin/prosado/horizon/data/output/' outputfile=outputdir+'snr_z_vs_mz_'+detector+'_'+flim_model+'_mat' #File to save data. overwrite=CM.z_vs_mc_check(outputdir, outputfile, overwrite) #Check if output directory and files already exist. #Load detector's sensitivity. fvecd, sn=CM.detector_f(detector, factor) sn_f=interpolate.interp1d(fvecd,sn) fmin, fmax=min(fvecd), max(fvecd) fmin*=1.000001 fmax*=0.99999 #I reduce the upper frequency slightly, so that there are no issues when extrapolating the sensitivity curve close to this value (the issue appears only when evaluating the sensitivity exactly at fmax). if detector in ['EPTA', 'PPTA']: if (1./tobs)>fmin: fmin=1./tobs else: print 'The minimum frequency of the sensitivity curve is larger than 1/T! The noise cannot be extrapolated to lower frequencies. Exiting...' exit() fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbins)
#fmin=min(fvecd) fmax=max(fvecd) fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbins) #fvec=np.arange(fmin,fmax,fbin) svec=np.interp(fvec,fvecd,sn) #Power spectral density interpolated. fvec_m=0.5*(fvec[1:]+fvec[:-1]) #Vector of frequencies centred at the arithmetic mean of the bin. svec_m=np.interp(fvec_m,fvecd,sn) #Power spectral density interpolated at the arithmetic mean of the bin. #Create vector of chirp mass. mchvec=np.logspace(minmch, maxmch, mchbins) #Calculate luminosity distance and similar functions. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin. lum_dist=CM.comdist(reds_m)*(1.+reds_m) #Luminosity distance in Mpc. #Choose plotting options that look optimal for the paper. fig_width = 3.4039 goldenmean=(np.sqrt(5.)-1.0)/2.0 fig_height = fig_width * goldenmean sizepoints=8 legendsizepoints=4.5 py.rcParams.update({ 'backend': 'ps', 'ps.usedistiller': 'xpdf', 'text.usetex': True, 'figure.figsize': [fig_width, fig_height], 'axes.titlesize': sizepoints, 'axes.labelsize': sizepoints, 'text.fontsize': sizepoints,
mchfiles.append(plotfiles[mi]) indisort=np.array(mchvec).argsort() mchvec=np.array(mchvec)[indisort] mchfiles=np.array(mchfiles)[indisort] #Optimal plotting options. import PARAMETER_PLOTS left, right, top, bottom, cb_fraction=0.13, 0.94, 0.96, 0.16, 0.08 #Borders of the plot. for mi in xrange(len(mchfiles)): mchi=mchvec[mi] print 'Plot for chirp mass 10^{%e} msun.' %mchi print mch=10**(mchi) #Chirp mass of the BH binary. m=mch*2**(1./5.) #Mass of each individual BH, assuming equal masses. nu=CM.nu_f(m,m) #Symmetric mass ratio. mtot=2.*m #Total mass. data=np.load(inputdatadir+mchfiles[mi])[()] snr_final_mat=data['snr_mat'] f_mat=data['f_mat'] z_mat=data['z_mat'] z_app=data['z_app'] xmin,xmax=np.amin(f_mat),np.amax(f_mat) #Edges of the x-axis. ymin,ymax=np.amin(z_mat),np.amax(z_mat) #Edges of the y-axis. lso_mat=np.zeros(np.shape(z_mat)) if flim_model=='ISCO': flim_mat=CM.felso(m, m)*1./(1.+z_mat) elif flim_model=='A8':
from INPUT_PARAMETERS import * fmin=1./tobs fmax=1./cad fmin=1 fmax=1000 fvec=np.logspace(np.log10(fmin), np.log10(fmax), fbins) m1=10**(1.) m2=m1 dist=1000.*mpc nu=A8.nu_f(m1,m2) mtot=m1+m2 mch=CM.mchirp(m1, m2) print np.log10(mch) fmer=A8.f_mer(nu, mtot) frin=A8.f_rin(nu, mtot) fcut=A8.f_cut(nu, mtot) fsig=A8.f_sig(nu, mtot) flso=CM.felso(m1,m2) py.ion() fvec_low=np.zeros(len(fvec)) fvec_mid=np.zeros(len(fvec)) fvec_upp=np.zeros(len(fvec)) fvec_low[fvec<fmer]=fvec[fvec<fmer] fvec_mid[(fmer<=fvec)&(fvec<frin)]=fvec[(fmer<=fvec)&(fvec<frin)]
import COMMON as CM from Formulas_AjithEtAl2008 import apar, bpar, cpar, xpar, ypar, zpar, kpar import Formulas_AjithEtAl2008 as A8 from scipy import interpolate factor=1. detector='ALIGO' tobs=10.*yr mch=10. m=mch*2.**(1./5.) nu=1./4. mtot=2.*m fbins=1000 finteg=100 z=np.array([10.]) DL=CM.comdist(z)*(1.+z) #Luminosity distance in Mpc. fvecd, sn=CM.detector_f(detector, factor) sn_f=interpolate.interp1d(fvecd,sn) fmin=min(fvecd) fmax=max(fvecd) fvec=np.logspace(np.log10(fmin), np.log10(fmax), fbins) sred=sn_f(fvec) def htilde_f(nu, mtot, z, lumdist, fvec): '''''' mch=nu**(3./5.)*mtot fmer=A8.f_mer(nu, mtot)*1./(1.+z) frin=A8.f_rin(nu, mtot)*1./(1.+z) fcut=A8.f_cut(nu, mtot)*1./(1.+z)
if cell==None: dCount=self.cur.execute("select * from %s"%table) else: dCount=self.cur.execute("select * from %s where interfaceId='%s' "%(table,cell)) dDatas=self.cur.fetchmany(dCount) for i in dDatas: resultlist.append(list(i)) return resultlist def update_data(self,table,data,interfaceid,TCNO): Sqlit=self.cur.execute("update %s set Parameter='%s' where interfaceId='%s' and TCNO=%d" %(table,data,interfaceid,TCNO)) self.cur.close() self.conn.commit() self.conn.close() # for i in MySql().get_data('vq_testcase','Login'): # print i # MySql().get_data('VQ_login') # print aa # for i in aa: # for j in i: # print j import CONFIG as CFG import COMMON as cn import os dataPath=os.path.join(CFG.prjDir,'BaseCase','GJB','data','10') b=cn.get_xls_sql('testData.xlsx',dataPath) for i in b: print i
import numpy as np import pylab as py from COMMON import grav, light, hub0, yr, mpc, gpc, msun, week, h0, omm, omv import COMMON as CM from Formulas_AjithEtAl2008 import apar, bpar, cpar, xpar, ypar, zpar, kpar import Formulas_AjithEtAl2008 as A8 from scipy import interpolate mch=10**(10.) flow=1.5e-8 fupp=0. zlim=0.2 zmax=1 m=mch*2**(1./5.) print CM.felso(m,m)*1./(1.+zmax) py.ion() #zvecall=np.linspace(0.,1.,1000) #comdistall=CM.comdist(zvecall) zvec1=np.linspace(0.,zlim,10000) comdist1=CM.comdist(zvec1) zvec2=np.linspace(zlim,zmax,10000) comdist2=CM.comdist(zvec2) def te(mch,flow,fupp,z): return 5.*light**5./(256.*np.pi**(8./3.)*(grav*msun*mch)**(5./3.)*(1.+z)**(8./3.))*(flow**(-8./3.)-fupp**(-8./3.))
#antiSD_Ecoli = "UGGA UCACCUCCUU" # antiSD_Ecoli = "CUGCGGUUGGA UCACCUCCUU A" # In this codes, all anti-SD sequence is determined by the above sequence # including positions, lengths #UTR_LEN=200 #CDS_LEN=300 #RDS_LEN = 37 # import this py file. # use CalculateRBSScore(utr, cds) # that's all. config = COMMON.CONFIG() # parameters for kinetics GENE_COPY_NUMBER = 100.0 RNA_SYNTHESIS_RATE = 20.0 RIBOSOMES_PER_RNA = 20.0 FREE_RIBOSOME_NUMBER = 57000.0 RNA_HALF_LIFE = 2.0 # /min def findSD(seq, atg_index, aSD): ''' finds the index of SD''' sff = SDFINDER.SDFINDER() sff.setAll(seq, aSD, atg_index, 37, 1) sff.setPerl(True)
import COMMON as CM from Formulas_AjithEtAl2008 import apar, bpar, cpar, xpar, ypar, zpar, kpar import Formulas_AjithEtAl2008 as A8 #ifile='../data/Eric/pablo_10_10.dat.txt' #ofile='../data/Eric/pablo_10_10' #eric=np.loadtxt(ifile) #tvec=eric[:,0] #hpvec=eric[:,1] #hcvec=eric[:,2] #dicti={'tvec':tvec, 'hpvec':hpvec, 'hcvec':hcvec} #np.save(ofile, dicti) m1=10. m2=10. nu=CM.nu_f(m1,m2) mtot=m1+m2 mch=CM.mchirp(m1,m2) reds=0. dl=1. phic=0. inc=0. ifile='../data/Eric/pablo_10_10.npy' eric=np.load(ifile)[()] tcoal=eric['tvec'] hpvec=eric['hpvec'] hcvec=eric['hcvec'] tvec=tcoal-tcoal[0] hpinsp=CM.hp(inc, CM.hamp_t(mch, reds, dl, tvec[-1], tvec[:-1]), CM.phase_t(mch, reds, phic, tvec[-1],tvec[:-1]))
#fvec=np.arange(fmin,fmax,fbin) svec=np.interp(fvec,fvecd,sn) #Power spectral density interpolated. fvec_m=0.5*(fvec[1:]+fvec[:-1]) #Vector of frequencies centred at the arithmetic mean of the bin. svec_m=np.interp(fvec_m,fvecd,sn) #Power spectral density interpolated at the arithmetic mean of the bin. #I take the minimum of S_n as a first approach to have the optimal result assuming white noise. s0=min(svec_m) #Create vector of chirp mass. mchvec=np.logspace(minmch, maxmch, mchbins) #Calculate luminosity distance and similar functions. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin. lum_dist=CM.comdist(reds_m)*(1.+reds_m) #Luminosity distance in Mpc. #Choose plotting options that look optimal for the paper. fig_width = 3.4039 goldenmean=(np.sqrt(5.)-1.0)/2.0 fig_height = fig_width * goldenmean sizepoints=8 legendsizepoints=4.5 py.rcParams.update({ 'backend': 'ps', 'ps.usedistiller': 'xpdf', 'text.usetex': True, 'figure.figsize': [fig_width, fig_height], 'axes.titlesize': sizepoints, 'axes.labelsize': sizepoints, 'text.fontsize': sizepoints,
def update_demand(self): # Update the heat contribution of the TCL considering the aggregators command # recalculate demand for next timestep self.__dict__.update(COMMON.get_a_demand(**self.__dict__)) return self.a_demand
import pylab as py from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv import COMMON as CM from USEFUL import time_estimate from matplotlib import colors from scipy import interpolate import Formulas_AjithEtAl2008 as A8 #Input parameters: from INPUT_PARAMETERS import * outputfile='../data/output/snr_z_vs_mc_'+detector+'_'+flim_model+'_iter' #File to save data. #----------------------------------------------------------------- #Load detector's sensitivity. fvecd, sn=CM.detector_f(detector, factor) sn_f=interpolate.interp1d(fvecd,sn) fmin, fmax=min(fvecd), max(fvecd) if detector in ['EPTA', 'PPTA']: fmin=1./tobs fmax*=0.99999 #I reduce the upper frequency slightly, so that there are no issues when extrapolating the sensitivity curve close to this value (the issue appears only when evaluating the sensitivity exactly at fmax). fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbins) mchvec=np.logspace(minmch, maxmch, mchbins) #Vector of physical chirp mass. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. lum_dist=CM.comdist(reds)*(1.+reds) #Luminosity distance in Mpc. #Calculate S/N for a given physical chirp mass. z_mat=np.tile(reds,(len(mchvec),1)).T #Matrix with z. DL_mat=np.tile(lum_dist,(len(mchvec),1)).T #Matrix with luminosity distance. mch_mat=np.tile(mchvec,(len(reds),1)) #Matrix with chirp mass. m_mat=mch_mat*2**(1./5.)
outputdir='/lustre/projects/p002_swin/prosado/horizon/data/output/SNR_z_vs_mz_PPTA_model_'+flim_model+'/' if len(sys.argv)<2: print 'Input number of pulsar to consider!' exit() pulsi=sys.argv[1] #Number of the pulsar from 0 to 19. #Obtaining noise curves for the different PPTA pulsars. fmini=1./tobs #Minimum frequency. fmaxi=1./cad #Maximum frequency. fvec=np.logspace(np.log10(fmini), np.log10(fmaxi), fbins) Snf=CM.PPTA_red_noise(fvec, cad) #Matrix with the noise of each pulsar. numpul=np.shape(Snf)[0] #Number of pulsars. mchvec=np.logspace(minmch, maxmch, mchbins) #Vector of physical chirp mass. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. lum_dist=CM.comdist(reds)*(1.+reds) #Luminosity distance in Mpc. print 'Obtaining S/N for pulsar %i / %i .' %(int(pulsi)+1, numpul) print outputfile=outputdir+'snr_pulsar'+'%i' %(int(pulsi)+1) #overwrite=CM.z_vs_mc_check(outputdir, outputfile, overwrite) #Check if output directory and files already exist. #Obtaining pulsar red noise curve. sn_f=interpolate.interp1d(fvec, Snf[pulsi,:]) fmin=min(fvec) fmax=0.99999*max(fvec) #I reduce the upper frequency slightly, so that there are no issues when extrapolating the sensitivity curve close to this value (the issue appears only when evaluating the sensitivity exactly at fmax). #Calculate S/N for a given physical chirp mass. mch_mat=np.zeros((len(reds), len(mchvec))) z_mat=np.zeros(np.shape(mch_mat))
class Solution: # @param head, a ListNode # @return a ListNode def insertionSortList(self, head): if(head==None): return None; result=head; p1=head; while(True): # from start to p1 ,it's a sorted List p2=p1.next; if(p2==None): break; if(p2.val>=p1.val): p1=p2; continue; # insert p2 to the old list p1.next=p2.next; if(p2.val<result.val): p2.next=result; result=p2; else: pointer=result; while(pointer.next.val<p2.val): pointer=pointer.next; p2.next=pointer.next; pointer.next=p2; return result; s=Solution(); COMMON.print_list(s.insertionSortList(COMMON.build_list([1,5,2,4,3])));
raw_input('enter') #Load SBHB candidates. candi=np.load('../data/Graham/Candidates.npy')[()] candi_z=candi['z'] candi_mtot=candi['t_mass'] candi_fgw=candi['f_gw_obs'] candi_mch=candi_mtot*2**(-1./5.)*0.5 candi_snr=np.load('../data/Graham/Cand_snr_PPTA_model_'+flim_model+'.npy')[()]['snr'] #Derive some quantities. reds=z_mat[:,0] mchvec=mch_mat[0,:] if flim_model=='ISCO': flim_mat=CM.felso(mch_mat*2.**(1./5.),mch_mat*2.**(1./5.))*1./(1.+z_mat) elif flim_model=='A8': flim_mat=A8.f_cut(1./4., 2.*mch_mat*2.**(1./5.))*1./(1.+z_mat) zlim_mat=np.ones(np.shape(z_mat))*np.nan zlim_mat[flim_mat<fmin]=1. #This matrix shows the region of z-Mc that cannot be seen, because the ISCO has already been reached below the minimum observable frequency. #snr_mat[snr_mat<=0]=np.nan snr_mat[snr_mat<=0]=-90. snr_mat[snr_mat>0.]=np.log10(snr_mat[snr_mat>0.]) #Optimal plotting options. #import PARAMETER_PLOTS #Choose plotting options that look optimal for the paper. fig_width = 3.4039*2./3. #goldenmean=(np.sqrt(5.)-1.0)/2.0
#Input parameters: from INPUT_PARAMETERS import * cb_width=0.014 cb_height=0.765 left, right, top, bottom, cb_fraction=0.063, 0.905, 0.97, 0.205, 0.08 #Borders of the plot. maxstrainlevel=-11 minstrainlevel=-15 strainlevels=10 zbins=1000 fbins=1000 mchvec=np.array([9.8,10.,10.2]) #Array of values of log10(chirp mass/msun). outputplot='../plots/z_vs_f_EPTA_strain_ALL.png' #----------------------------------------------------------------- #Load detector's sensitivity. fvecd, sn=CM.detector_f(detector, factor) sn_f=interpolate.interp1d(fvecd,sn) fmin, fmax=min(fvecd), max(fvecd) if detector in ['EPTA', 'PPTA']: fmin=1./tobs #Load EPTA upper limits data. ifile2='../data/EPTA/upper_fixed_at_maxL_Steve.txt' #Fp with fixed noise to ML values. Fp_ML ul2=np.array(np.loadtxt(ifile2)) fvecd,hvecd=ul2[:,0],ul2[:,1] #fmin=1.02623486508e-09 #In order to have the same limits as the EPTA plots. #fmax=3.15323154356e-07 #In order to have the same limits as the EPTA plots. fmin=min(fvecd) fmax=max(fvecd) fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbins) hvec=np.interp(fvec, fvecd, hvecd)
#freq=7e-8 #Where you do not see the turnover because it is beyond the ISCO. plotxmin=1e-2 plotxmax=1e2 plotymin=1 plotymax=1e3 outputdir='../plots/' snrt=8. snoise=1.*10**(-23) #Noise power spectral density (assumed white). limit_by='isco' #To stop the S/N at the ISCO. #limit_by='coal' #To stop the S/N at the coalescence. #----------------------------------------------------------------- #Calculate luminosity distance and similar functions. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin. com_dist=CM.comdist(reds_m) #Vector of comoving distance in Mpc. lum_dist=(1.+reds_m)*com_dist #Vector of luminosity distance in Mpc. #Calculate strains. hamp=CM.hamp_f(mch, reds_m, lum_dist, freq) #Calculate true S/N. m1=mch*2**(1./5.) m2=m1 fisco=CM.felso(m1, m2)*1./(1.+reds_m) tisco=CM.tafter(mch, freq, fisco, reds_m) if limit_by=='isco': zisco=reds_m[abs(tobs-tisco).argmin()] fmaxobs=np.ones(len(reds_m))*fisco fmaxobs[reds_m<zisco]=CM.fafter(mch, reds_m[reds_m<zisco], freq, tobs) snrvec=CM.snr_white(mch, reds_m, lum_dist, freq, fisco, fmaxobs, snoise)
outputdir='../data/output/' if cluster=='1': datafile='/lustre/projects/p002_swin/prosado/horizon/data/output/COMBINED_SNR_'+detector+'_'+flim_model+'.npy' outputdir='/lustre/projects/p002_swin/prosado/horizon/data/output/' outputfile=outputdir+'DP_'+detector+'_'+flim_model #----------------------------------------------------------------- #Get data. data=np.load(datafile)[()] snr=data['combined_max_snr'] mch_mat=data['mch_mat'] z_mat=data['z_mat'] f_mat=data['f_max_mat'] tobs, mchbins, fbins, finteg=data['tobs_yr'], data['mchbins'], data['fbins'], data['finteg'] f0t=CM.f0thres(numtem, fap) #Threshold in the Fe-statistic. snrshape=np.shape(snr) dpvec=np.zeros(snrshape) t=time_estimate(snr.size) #A class that prints estimated computation time. for row in xrange(snrshape[0]): for column in xrange(snrshape[1]): if snr[row,column]<=0.: continue t.display() #Shows the remaining computation time. t.increase() #Needed to calculate the remaining computation time. dpvec[row,column]=CM.dpfun(f0t, snr[row,column], minrange, intpoints) #Save combined data (only frequencies and S/N at the optimal frequencies). dicti={'FAP':fap, 'numtem':numtem, 'minrange':minrange, 'dpinteg':intpoints, 'DP':dpvec, 'combined_max_snr':snr, 'f_max_mat':f_mat, 'mch_mat':mch_mat, 'z_mat':z_mat, 'tobs_yr':tobs, 'mchbins':mchbins, 'fbins':fbins, 'finteg':finteg} np.save(outputfile,dicti)
mchvec=np.array([9.8,10.]) #Array of values of log10(chirp mass/msun). There will be a plot for each value of mch. #Obtaining noise curves for the different PPTA pulsars. fmini=1./tobs #Minimum frequency. fmaxi=1./cad #Maximum frequency. #fmini=1.02623486508e-09 #In order to have the same limits as the EPTA plots. #fmaxi=3.15323154356e-07 #In order to have the same limits as the EPTA plots. fvec=np.logspace(np.log10(fmini), np.log10(fmaxi), fbins) fmin=min(fvec) fmax=0.99999*max(fvec) #I reduce the upper frequency slightly, so that there are no issues when extrapolating the sensitivity curve close to this value (the issue appears only when evaluating the sensitivity exactly at fmax). Snf=CM.PPTA_red_noise(fvec, cad) #Matrix with the noise of each pulsar. numpul=np.shape(Snf)[0] #Number of pulsars. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. lum_dist=CM.comdist(reds)*(1.+reds) #Luminosity distance in Mpc. #Calculate S/N for a given physical chirp mass. z_mat=np.tile(reds,(len(fvec),1)).T #Matrix with z. DL_mat=np.tile(lum_dist,(len(fvec),1)).T #Matrix with luminosity distance. f_mat=np.tile(fvec,(len(reds),1)) #Matrix with (minimum) frequencies. fmaxdet_mat=np.ones(np.shape(f_mat))*fmax #Matrix with maximum frequencies allowed by the detector. for mi in xrange(len(mchvec)): print 'Plot for chirp mass 10^{%e} msun.' %mchvec[mi] print mch=10**(mchvec[mi]) #Chirp mass of the BH binary. m=mch*2**(1./5.) #Mass of each individual BH, assuming equal masses. nu=CM.nu_f(m,m) #Symmetric mass ratio. mtot=2.*m #Total mass. outputfile=outputdir+'mch%.3e' %mch
import pylab as py from scipy import integrate, interpolate import COMMON as CM import Formulas_AjithEtAl2008 as A8 from INPUT_PARAMETERS import * outputfile='../data/Graham/Cand_snr_PPTA_model_'+flim_model #Load candidate data. cand=np.load('../data/Graham/Candidates.npy')[()] cand_z=cand['z'] #Redshift. cand_mtot=cand['t_mass'] #Total mass. cand_f=cand['f_gw_obs'] #Observed GW frequency. #Derive some other quantities. cand_m=cand_mtot*0.5 #Mass of each BH. cand_mch=CM.mchirp(cand_m, cand_m) #Chirp mass. cand_DL=CM.comdist(cand_z)*(1.+cand_z) #Obtaining noise curves for the different PPTA pulsars. fmini=1./tobs #Minimum frequency. fmaxi=1./cad #Maximum frequency. fvec=np.logspace(np.log10(fmini), np.log10(fmaxi), fbins) fmin=min(fvec) fmax=0.99999*max(fvec) #I reduce the upper frequency slightly, so that there are no issues when extrapolating the sensitivity curve close to this value (the issue appears only when evaluating the sensitivity exactly at fmax). Snf=CM.PPTA_red_noise(fvec, cad) #Matrix with the noise of each pulsar. numpul=np.shape(Snf)[0] #Number of pulsars. #py.ion() #py.loglog(cand_f,'o') #py.ylim(fmini, fmaxi) #raw_input('enter')
mchfiles.append(plotfiles[mi]) indisort=np.array(mchvec).argsort() mchvec=np.array(mchvec)[indisort] mchfiles=np.array(mchfiles)[indisort] #Optimal plotting options. import PARAMETER_PLOTS left, right, top, bottom, cb_fraction=0.125, 0.945, 0.96, 0.17, 0.08 #Borders of the plot. for mi in xrange(len(mchfiles)): mchi=mchvec[mi] print 'Plot for chirp mass 10^{%e} msun.' %mchi print mch=10**(mchi) #Chirp mass of the BH binary. m=mch*2**(1./5.) #Mass of each individual BH, assuming equal masses. nu=CM.nu_f(m,m) #Symmetric mass ratio. mtot=2.*m #Total mass. data=np.load(inputdatadir+mchfiles[mi])[()] snr_final_mat=data['snr_mat'] f_mat=data['f_mat'] z_mat=data['z_mat'] xmin,xmax=np.amin(f_mat),np.amax(f_mat) #Edges of the x-axis. ymin,ymax=np.amin(z_mat),np.amax(z_mat) #Edges of the y-axis. #z_app=np.zeros(np.shape(z_mat)) lso_mat=np.zeros(np.shape(z_mat)) if flim_model=='ISCO': flim_mat=CM.felso(m, m)*1./(1.+z_mat)
import COMMON; class Solution: # @param root, a tree node # @return a list of lists of integers def levelOrderBottom(self, root): if(root==None): return []; data=[[root]]; while(True): next=[]; for node in data[len(data)-1]: if(node.left!=None): next+=[node.left]; if(node.right!=None): next+=[node.right]; if(next!=[]): data+=[next]; else: break; for arr in data: size=0; while(size<len(arr)): arr[size]=arr[size].val; size+=1; data.reverse(); return data; s=Solution(); print s.levelOrderBottom(COMMON.build_tree("{3,9,20,#,#,15,7}"))
fbin=1./tobs fmin=1./tobs #fmin=min(fvecd) fmax=max(fvecd) fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbins) #fvec=np.arange(fmin,fmax,fbin) svec=np.interp(fvec,fvecd,sn) #Power spectral density interpolated. fvec_m=0.5*(fvec[1:]+fvec[:-1]) #Vector of frequencies centred at the arithmetic mean of the bin. svec_m=np.interp(fvec_m,fvecd,sn) #Power spectral density interpolated at the arithmetic mean of the bin. #Calculate luminosity distance and similar functions. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin. lum_dist=CM.comdist(reds_m)*(1.+reds_m) #Luminosity distance in Mpc. #Choose plotting options that look optimal for the paper. fig_width = 3.4039 goldenmean=(np.sqrt(5.)-1.0)/2.0 fig_height = fig_width * goldenmean sizepoints=8 legendsizepoints=4.5 py.rcParams.update({ 'backend': 'ps', 'ps.usedistiller': 'xpdf', 'text.usetex': True, 'figure.figsize': [fig_width, fig_height], 'axes.titlesize': sizepoints, 'axes.labelsize': sizepoints, 'text.fontsize': sizepoints,
fbin=1./tobs fmin=1./tobs #fmin=min(fvecd) fmax=max(fvecd) fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbins) #fvec=np.arange(fmin,fmax,fbin) svec=np.interp(fvec,fvecd,sn) #Power spectral density interpolated. fvec_m=0.5*(fvec[1:]+fvec[:-1]) #Vector of frequencies centred at the arithmetic mean of the bin. svec_m=np.interp(fvec_m,fvecd,sn) #Power spectral density interpolated at the arithmetic mean of the bin. #Calculate luminosity distance and similar functions. reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced. reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin. lum_dist=CM.comdist(reds_m)*(1.+reds_m) #Luminosity distance in Mpc. #I assume white noise, i.e. the noise has a particular constant value: sn=np.ones(len(sn))*sn[0]##################### mchvec=np.array([8.5, 9., 9.5, 10., 10.5, 11.]) py.ion() for mch in xrange(len(mchvec)): mchi=10**(mchvec[mch]) m1i=mchi*2**(1./5.) m2i=m1i redsi=0.5 lumdisti=lum_dist[abs(redsi-reds_m).argmin()] feisco=CM.felso(m1i, m2i) fisci=feisco*1./(1.+redsi)
reds_text=3. #Redshift at which the text with frequency should appear. factor_right=5.5 factor_left=0.2 flevels=5 #Number of frequency contours. maxsnrlevel=7. #Maximum S/N level in the colorbar. minsnrlevel=np.log10(snrt) #Minimum one. #datafile='../data/output/snr_SENSIT_red_'+detector+'_mat.npy' #datafile='../data/output/snr_z_vs_mc_'+detector+'_'+flim_model+'_iter.npy' #datafile='../data/output/snr_z_vs_mc_'+detector+'_'+flim_model+'_mat.npy' datafile='../data/output/COMBINED_SNR_'+detector+'_'+flim_model+'.npy' if cluster=='1': datafile='/lustre/projects/p002_swin/prosado/horizon/data/output/COMBINED_SNR_'+detector+'_'+flim_model+'.npy' outputplot='../plots/z_vs_mc_'+detector+'_'+flim_model #----------------------------------------------------------------- #Load detector data. fvecd, sn=CM.detector_f(detector, factor) fmin, fmax=min(fvecd), max(fvecd) if detector in ['EPTA', 'PPTA']: fmax*=0.99999 #I reduce the upper frequency slightly, so that there are no issues when extrapolating the sensitivity curve close to this value (the issue appears only when evaluating the sensitivity exactly at fmax). if (1./tobs)>fmin: fmin=1./tobs else: print 'The minimum frequency of the sensitivity curve is larger than 1/T! The noise cannot be extrapolated to lower frequencies. Exiting...' exit() #Load S/N data. data=np.load(datafile)[()] snr_mat=data['combined_max_snr'] mch_mat=data['mch_mat'] z_mat=data['z_mat'] f_mat=data['f_max_mat']
import COMMON class Solution: def helper(self,root): if root==None: return (True,None,None); l=self.helper(root.left); r=self.helper(root.right); if not (l[0] and r[0]): return (False,None,None); res_bool,res_l,res_r=True,root,root; if l[2]!=None : if l[2].val >= root.val: res_bool=False; res_l=l[1]; if r[2]!=None : if r[1].val <= root.val: res_bool=False; res_r=r[2]; return (res_bool,res_l,res_r); # @param root, a tree node # @return a boolean def isValidBST(self, root): return self.helper(root)[0]; s=Solution(); print s.isValidBST(COMMON.build_tree("{1,#,2,#,3}")); print s.isValidBST(COMMON.build_tree("{1,2,3}"));