Ejemplo n.º 1
0
def read_posterior_samples(f,injrow):
    """Returns a bppu posterior sample object
    """
    peparser=bppu.PEOutputParser('common')
    commonResultsObj=peparser.parse(open(f,'r'))
    data = bppu.Posterior(commonResultsObj,SimInspiralTableEntry=injrow,injFref=100.0)
    # add tilts, comp masses, tidal...
    try:
      data.extend_posterior()
    except Exception as e:
      pass
    return data
def load_data(filename, header=None):
    peparser = bppu.PEOutputParser('common')
    commonObj = peparser.parse(open(filename, 'r'), info=[header, None])
    pos = bppu.Posterior(commonObj)
    return pos
Ejemplo n.º 3
0
def downsample_and_evidence(data_hdf5, deltaLogP=None, fixedBurnin=None, nDownsample=None, verbose=False):

    # Remove burnin from beginning of MCMC-chains, downsample by the chain's respective autocorrelation length.
    # Compute the evidence for the set of parallel tempered chains through a thermodynamic integral

	if not data_hdf5.lower().endswith(('.hdf5', '.hdf', '.h5')):
		print('cbcBayesMCMC2pos only suports hdf5 input, for older file formats plese revert to cbcBayesThermoInt and cbcBayesPosProc')
		sys.exit(1)

	peparser = bppu.PEOutputParser('hdf5')

	ps, samps = peparser.parse(data_hdf5, deltaLogP=deltaLogP, fixedBurnins=fixedBurnin,
		nDownsample=nDownsample, tablename=None)
	posterior_samples = apt.Table(samps, names=ps)

	highTchains = []
	for i in range(1,int(posterior_samples['nTemps'][0])):
		ps, samps = peparser.parse(data_hdf5, deltaLogP=deltaLogP, fixedBurnins=fixedBurnin,
			nDownsample=nDownsample, tablename='chain_'+str('%02.f' %i))
		highTchains.append(apt.Table(samps, names=ps))
		if verbose: print('chain_'+str('%02.f' %i)+' at a temperature '+str(highTchains[i-1]['temperature'].mean()))

	betas = np.zeros(len(highTchains)+1)
	logls = np.zeros_like(betas)

	betas[0] = 1./np.median(posterior_samples['temperature'])
	logls[0] = np.median(posterior_samples['logl'])

	for i in range(len(highTchains)):
		betas[i+1] = 1./np.median(highTchains[i]['temperature'])
		logls[i+1] = np.median(highTchains[i]['logl'])

	inds = np.argsort(betas)[::-1]

	betas = betas[inds]
	logls = logls[inds]

	# Now extend to infinite temperature by copying the last <log(L)>.
	# This works as long as the chains have extended to high enough
	# temperature to sample the prior.
	# If infinite temperature is already included, this 'duplicate'
	# will not change the final evidence.
	ebetas = np.concatenate((betas, [0.0]))
	elogls = np.concatenate((logls, [logls[-1]]))

	ebetas2 = np.concatenate((betas[::2], [0.0]))
	elogls2 = np.concatenate((logls[::2], [logls[::2][-1]]))

	evidence = -si.trapz(elogls, ebetas)
	evidence2 = -si.trapz(elogls2, ebetas2)

	posterior_samples['chain_log_evidence'] = evidence
	posterior_samples['chain_delta_log_evidence'] = np.absolute(evidence - evidence2)
	posterior_samples['chain_log_noise_evidence'] = posterior_samples['nullLogL']
	posterior_samples['chain_log_bayes_factor'] = posterior_samples['chain_log_evidence'] - posterior_samples['chain_log_noise_evidence']

	if verbose:
		print('logZ = '+str(posterior_samples['chain_log_evidence'][0])+'+-'+str(posterior_samples['chain_delta_log_evidence'][0]))
		print('logB_SN = '+str(posterior_samples['chain_log_bayes_factor'][0]))

	posterior_samples = reassign_metadata(posterior_samples, data_hdf5)

	return posterior_samples
Ejemplo n.º 4
0
                      metavar="N")
    parser.add_option("--bootstrap",
                      dest='bootstrap',
                      action='store',
                      default=1,
                      type='int',
                      metavar='N')
    parser.add_option('--output',
                      dest='output',
                      action='store',
                      default=None,
                      metavar='FILE')

    (opts, args) = parser.parse_args()

    pos_parser = bp.PEOutputParser('common')

    f = open(opts.data, "r")
    try:
        pos = bp.Posterior(pos_parser.parse(f))
    finally:
        f.close()

    outfile = None
    if opts.output:
        outfile = open(opts.output, 'w')
    try:
        for i in range(opts.bootstrap):
            if i == 0:
                log_ev = pos.di_evidence(boxing=opts.Nboxing)
            else:
Ejemplo n.º 5
0
def cbcBayesGraceDBinfo(gid=None,samples=None,skymap=None,analysis='LALInference', bcifile=None,bsnfile=None,email=None,message=None,server="https://gracedb.ligo.org/api/"):

  if gid is None or (samples is None and skymap is None):
    print("Must provide both a graceDB id and a posterior samples file or skymap file\n")
    sys.exit(1)

  import ligo.gracedb.rest 
  import os
  if server is not None:
    g=ligo.gracedb.rest.GraceDb(server)
  else:
    g=ligo.gracedb.rest.GraceDb()
  if samples is not None:
    samples=os.path.realpath(samples)
    if '.hdf' in samples  or '.h5' in samples:
      peparser = bppu.PEOutputParser('hdf5')
      commonResultsObj=peparser.parse(samples)
    else:
      peparser=bppu.PEOutputParser('common')
      commonResultsObj=peparser.parse(open(samples,'r'))

    try:
      pos = bppu.BurstPosterior(commonResultsObj)
      pars=['frequency','quality','hrss']
      units={'frequency':'[Hz]','quality':'','hrss':'','loghrss':''}
    except:
      pos = bppu.Posterior(commonResultsObj)
      pars=['mchirp','q','distance']
    strs=[]
    outstr='<table><tr><th colspan=2 align=center>%s PE summary</th></tr>'%analysis

    for i in pars:
      if i in pos.names:
        _,which=pos._posMap()
        if i=='hrss':
          outstr+='<tr><td align=left>%s %s</td>'%(i,units[i])
          outstr+='<td align=left>%.3e &plusmn; %.3e</td></tr>'%(pos[i].samples[which][0],pos[i].stdev)
        else:
          outstr+='<tr><td align=left>%s %s</td>'%(i,units[i])
          outstr+='<td align=left>%.3f &plusmn; %.3f</td></tr>'%(pos[i].samples[which][0],pos[i].stdev)
    if bcifile is not None and os.path.isfile(bcifile):
      bci=np.loadtxt(bcifile)	
    else: bci=None
    if bci is not None:
      outstr+='<tr><td align=left>logBCI</td>'
      outstr+='<td align=center>%.2f</td></tr>'%(bci)

    bsn=None
    if bsnfile is not None and os.path.isfile(bsnfile):
      bsn=np.loadtxt(bsnfile)
      bsn=bsn[0]
    else:
      try:
        import h5py
        with h5py.File(samples,'r') as h5grp:
          tmp=h5grp['lalinference']['lalinference_nest'].attrs
          bsn=tmp['log_bayes_factor']
      except Exception as e:
        print("Could not obtain BNS\n")
        print(e)

    if bsn is not None:
      outstr+='<tr><td align=left>logBSN</td>'
      outstr+='<td align=center>%.2f</td></tr>'%(bsn)
    outstr+='</table>'

    if email is not None and bci is not None:
      import os
      import smtplib
      import subprocess
      address=email.split(',')
      SERVER="localhost"
      USER=os.environ['USER']
      import socket
      HOST=socket.getfqdn()#socket.gethostbyaddr(socket.gethostname())[0]
      pref=""
      if bci>3 and bci<6:
        pref="A promising"
      elif bci>6 and bci<10:
        pref="A very interesting"
      elif bci>10:
        pref="A SPECTACULAR"
      FROM="salvatore.vitale@"+HOST
      SUBJECT="%s LIB result page is ready at "%pref+HOST+" for graceID %s!"%(gid)
      TEXT="LIB run for graceID %s is done on "%gid+HOST+".\nThe BCI is %lf\n"%bci
      if bci>10:
        TEXT+="RUN!!!!!!!!!!\n"
      message="From: %s\nTo: %s\nSubject: %s\n\n%s"%(FROM,', '.join(address),SUBJECT,TEXT)
      try:
        import os
        os.system('echo "%s" | mail -s "%s" "%s"'%(TEXT,SUBJECT,', '.join(address)))
        server=smtplib.SMTP(SERVER)
        server.sendmail(FROM,address,message)
        server.quit()
      except:
        print("Cound not send email\n")

    g.writeLog(gid,outstr,filename=None,tagname='pe')
  elif skymap is not None:
    if bcifile is not None and os.path.isfile(bcifile):
      bci=np.loadtxt(bcifile)
    else: bci=None
    if bsnfile is not None and os.path.isfile(bsnfile):
      bsn=np.loadtxt(bsnfile)
    else: bsn=None
    tag=['sky_loc']
    """
    if bci is not None and bsn is not None:
      if bsn>5. and bci>2.:
        tag.append('lvem')
    """
    g.writeLog(gid,message,filename=skymap,tagname=tag)
Ejemplo n.º 6
0
def lalinference_to_bilby_result(postfile):
    """
    Convert LALInference-derived pulsar posterior samples file, as created by
    ``lalapps_pulsar_parameter_estimation_nested``, into a
    :class:`bilby.core.result.Result` object.

    Parameters
    ----------
    postfile: str
        The path to a posterior samples file.

    Returns
    -------
    result:
        The results as a :class:`bilby.core.result.Result` object
    """

    import h5py

    from bilby.core.result import Result
    from lalinference import bayespputils as bppu
    from pandas import DataFrame

    try:
        peparser = bppu.PEOutputParser("hdf5")
        nsResultsObject = peparser.parse(postfile)
        pos = bppu.Posterior(nsResultsObject, SimInspiralTableEntry=None)
    except Exception as e:
        raise IOError(
            f"Could not import posterior samples from {postfile}: {e}")

    # remove any unchanging variables and randomly shuffle the rest
    pnames = pos.names
    nsamps = len(pos[pnames[0]].samples)
    permarr = np.arange(nsamps)
    np.random.shuffle(permarr)

    posdict = {}
    for pname in pnames:
        # ignore if all samples are the same
        if not pos[pname].samples.tolist().count(pos[pname].samples[0]) == len(
                pos[pname].samples):
            # shuffle and store
            posdict[pname] = pos[pname].samples[permarr, 0]

    # get evidence values from HDF5 file
    logZ = None
    logZn = None
    logbayes = None
    try:
        hdf = h5py.File(postfile, "r")
        a = hdf["lalinference"]["lalinference_nest"]
        logZ = a.attrs["log_evidence"]
        logZn = a.attrs["log_noise_evidence"]
        logbayes = logZ - logZn
        hdf.close()
    except KeyError:
        pass

    return Result(
        posterior=DataFrame(posdict),
        log_evidence=logZ,
        log_noise_evidence=logZn,
        log_bayes_factor=logbayes,
    )
Ejemplo n.º 7
0
def compare_bayes(outdir,names_and_pos_folders,injection_path,eventnum,username,password,reload_flag,clf,ldg_flag,contour_figsize=(4.5,4.5),contour_dpi=250,contour_figposition=[0.15,0.15,0.5,0.75],fail_on_file_err=True,covarianceMatrices=None,meanVectors=None,Npixels2D=50):

    injection=None

    if injection_path is not None and os.path.exists(injection_path) and eventnum is not None:
        eventnum=int(eventnum)
        from glue.ligolw import ligolw, lsctables, utils
        injections = lsctables.SimInspiralTable.get_table(
                utils.load_filename(injection_path, contenthandler = lsctables.use_in(ligolw.LIGOLWContentHandler)))
        if eventnum is not None:
            if(len(injections)<eventnum):
                print("Error: You asked for event %d, but %s contains only %d injections" %(eventnum,injection_path,len(injections)))
                sys.exit(1)
            else:
                injection=injections[eventnum]

    #Create analytic likelihood functions if covariance matrices and mean vectors were given
    analyticLikelihood = None
    if covarianceMatrices and meanVectors:
        analyticLikelihood = bppu.AnalyticLikelihood(covarianceMatrices, meanVectors)
    peparser=bppu.PEOutputParser('common')
    pos_list={}
    tp_list={}
    common_params=None
    working_folder=os.getcwd()
    for name,pos_folder in names_and_pos_folders:
        import urlparse

        pos_folder_url=urlparse.urlparse(pos_folder)
        pfu_scheme,pfu_netloc,pfu_path,pfu_params,pfu_query,pfu_fragment=pos_folder_url

        if 'http' in pfu_scheme:

            """
            Retrieve a file over http(s).
            """
            downloads_folder=os.path.join(os.getcwd(),"downloads")
            pos_folder_parse=urlparse.urlparse(pos_folder)
            pfp_scheme,pfp_netloc,pfp_path,pfp_params,pfp_query,pfp_fragment=pos_folder_parse
            head,tail=os.path.split(pfp_path)
            if tail is 'posplots.html' or tail:
                pos_file_part=head
            else:
                pos_file_part=pfp_path
            pos_file_url=urlparse.urlunsplit((pfp_scheme,pfp_netloc,os.path.join(pos_file_part,'posterior_samples.dat'),'',''))
            print(pos_file_url)
            pos_file=os.path.join(os.getcwd(),downloads_folder,"%s.dat"%name)

            if not os.path.exists(pos_file):
                reload_flag=True

            if reload_flag:
                if os.path.exists(pos_file):
                    os.remove(pos_file)
                if not os.path.exists(downloads_folder):
                    os.makedirs(downloads_folder)
                open_url_curl(pos_file_url,args=["-o","%s"%pos_file])

        elif pfu_scheme is '' or pfu_scheme is 'file':
            pos_file=os.path.join(pos_folder,'%s.dat'%name)
            # Try looking for posterior_samples.dat if name.dat doesn't exist
            if not os.path.exists(pos_file):
                print('%s does not exist, trying posterior_samples.dat'%(pos_file))
                pos_file=os.path.join(pos_folder,'posterior_samples.dat')
        else:
            print("Unknown scheme for input data url: %s\nFull URL: %s"%(pfu_scheme,str(pos_folder_url)))
            exit(0)

        print("Reading posterior samples from %s ..."%pos_file)

        try:
            common_output_table_header,common_output_table_raw=peparser.parse(open(pos_file,'r'))
        except:
            print('Unable to read file '+pos_file)
            continue

        test_and_switch_param(common_output_table_header,'distance','dist')
        test_and_switch_param(common_output_table_header,'chirpmass','mchirp')
        test_and_switch_param(common_output_table_header,'mc','mchirp')
        test_and_switch_param(common_output_table_header,'asym_massratio','q')
        test_and_switch_param(common_output_table_header,'massratio', 'eta')
        test_and_switch_param(common_output_table_header,'RA','ra')
        test_and_switch_param(common_output_table_header,'rightascension','ra')
        test_and_switch_param(common_output_table_header,'declination','dec')
        test_and_switch_param(common_output_table_header,'tilt_spin1','tilt1')
        test_and_switch_param(common_output_table_header,'tilt_spin2','tilt2')

        if 'LI_MCMC' in name or 'FU_MCMC' in name:

            try:

                idx=common_output_table_header.index('iota')
                print("Inverting iota!")

                common_output_table_raw[:,idx]= np.pi*np.ones(len(common_output_table_raw[:,0])) - common_output_table_raw[:,idx]

            except:
                pass


        # try:
        #     print "Converting phi_orb-> 2phi_orb"
        #     idx=common_output_table_header.index('phi_orb')
        #     common_output_table_header[idx]='2phi_orb'
        #     common_output_table_raw[:,idx]= 2*common_output_table_raw[:,idx]
        # except:
        #     pass

        try:
            print("Converting iota-> cos(iota)")
            idx=common_output_table_header.index('iota')
            common_output_table_header[idx]='cos(iota)'
            common_output_table_raw[:,idx]=np.cos(common_output_table_raw[:,idx])
        except:
            pass

        #try:
        #    print "Converting tilt1 -> cos(tilt1)"
        #    idx=common_output_table_header.index('tilt1')
        #    common_output_table_header[idx]='cos(tilt1)'
        #    common_output_table_raw[:,idx]=np.cos(common_output_table_raw[:,idx])
        #except:
        #    pass

        #try:
        #    print "Converting tilt2 -> cos(tilt2)"
        #    idx=common_output_table_header.index('tilt2')
        #    common_output_table_header[idx]='cos(tilt2)'
        #    common_output_table_raw[:,idx]=np.cos(common_output_table_raw[:,idx])
        #except:
        #    pass

        try:
            print("Converting thetas -> cos(thetas)")
            idx=common_output_table_header.index('thetas')
            common_output_table_header[idx]='cos(thetas)'
            common_output_table_raw[:,idx]=np.cos(common_output_table_raw[:,idx])
        except:
            pass

        try:
            print("Converting beta -> cos(beta)")
            idx=common_output_table_header.index('beta')
            common_output_table_header[idx]='cos(beta)'
            common_output_table_raw[:,idx]=np.cos(common_output_table_raw[:,idx])
        except:
            pass

        try:
            idx=common_output_table_header.index('f_ref')
            injFrefs=np.unique(common_output_table_raw[:,idx])
            if len(injFrefs) == 1:
              injFref = injFrefs[0]
              print("Using f_ref in results as injected value")
        except:
            injFref = None
            pass

        pos_temp=bppu.Posterior((common_output_table_header,common_output_table_raw),SimInspiralTableEntry=injection, injFref=injFref)

        if 'a1' in pos_temp.names and min(pos_temp['a1'].samples)[0] < 0:
          pos_temp.append_mapping('spin1', lambda a:a, 'a1')
          pos_temp.pop('a1')
          pos_temp.append_mapping('a1', lambda a:np.abs(a), 'spin1')
        if 'a2' in pos_temp.names and min(pos_temp['a2'].samples)[0] < 0:
          pos_temp.append_mapping('spin2', lambda a:a, 'a2')
          pos_temp.pop('a2')
          pos_temp.append_mapping('a2', lambda a:np.abs(a), 'spin2')


        if 'm1' in pos_temp.names and 'm2' in pos_temp.names:
          print("Calculating total mass")
          pos_temp.append_mapping('mtotal', lambda m1,m2: m1+m2, ['m1','m2'])
        if 'mass1' in pos_temp.names and 'mass2' in pos_temp.names:
          print("Calculating total mass")
          pos_temp.append_mapping('mtotal', lambda m1,m2: m1+m2, ['mass1','mass2'])

        try:
            idx=common_output_table_header.index('m1')

            idx2=common_output_table_header.index('m2')

            if pos_temp['m1'].mean<pos_temp['m2'].mean:
                print("SWAPPING MASS PARAMS!")
                common_output_table_header[idx]='x'
                common_output_table_header[idx2]='m1'
                common_output_table_header[idx]='m2'
                pos_temp=bppu.Posterior((common_output_table_header,common_output_table_raw),SimInspiralTableEntry=injection)
        except:
            pass

        pos_list[name]=pos_temp

        if common_params is None:
            common_params=pos_temp.names
        else:
            set_of_pars = set(pos_temp.names)
            common_params=list(set_of_pars.intersection(common_params))

    print("Common parameters are %s"%str(common_params))

    if injection is None and injection_path is not None:
        injections = SimInspiralUtils.ReadSimInspiralFromFiles([injection_path])
        injection=bppu.get_inj_by_time(injections,pos_temp.means['time'])
    if injection is not None:
        for pos in pos_list.values():
            pos.set_injection(injection)

    set_of_pars = set(allowed_params)
    common_params=list(set_of_pars.intersection(common_params))

    print("Using parameters %s"%str(common_params))

    if not os.path.exists(os.path.join(os.getcwd(),'results')):
        os.makedirs('results')

    if not os.path.exists(outdir):
        os.makedirs(outdir)

    pdfdir=os.path.join(outdir,'pdfs')
    if not os.path.exists(pdfdir):
        os.makedirs(pdfdir)

    greedy2savepaths=[]

    if common_params is not [] and common_params is not None: #If there are common parameters....
        colorlst=bppu.__default_color_lst

        if len(common_params)>1: #If there is more than one parameter...
            temp=copy.copy(common_params)
            #Plot two param contour plots

            #Assign some colours to each different analysis result
            color_by_name={}
            hatches_by_name={}
            my_cm=mpl_cm.Dark2
            cmap_size=my_cm.N
            color_idx=0
            color_idx_max=len(names_and_pos_folders)
            cmap_array=my_cm(np.array(range(cmap_size)))
            #cmap_array=['r','g','b','c','m','k','0.5','#ffff00']
            hatches=['/','\\','|','-','+','x','o','O','.','*']
            ldg='auto'
            if not ldg_flag:
              ldg=None
            for name,infolder in names_and_pos_folders:
                #color_by_name=cmap_array[color_idx]
                color_by_name[name]=cmap_array[int(floor(color_idx*cmap_size/color_idx_max)),:]
                color_idx=(color_idx+1) % color_idx_max
                hatches_by_name[name]=hatches[color_idx]

            for i,j in all_pairs(temp):#Iterate over all unique pairs in the set of common parameters
                pplst=[i,j]
                rpplst=pplst[:]
                rpplst.reverse()

                pplst_cond=(pplst in twoDplots)
                rpplst_cond=(rpplst in twoDplots)
                if pplst_cond or rpplst_cond:#If this pair of parameters is in the plotting list...

                    try:
                        print('2d plots: building ',i,j)
                        greedy2Params={i:greedyBinSizes[i],j:greedyBinSizes[j]}
                    except KeyError:
                        continue

                    name_list=[]
                    cs_list=[]

                    slinestyles=['solid', 'dashed', 'dashdot', 'dotted']

                    fig=bppu.plot_two_param_kde_greedy_levels(pos_list,greedy2Params,TwoDconfidenceLevels,color_by_name,figsize=contour_figsize,dpi=contour_dpi,figposition=contour_figposition,legend=ldg,line_styles=slinestyles,hatches_by_name=hatches_by_name,Npixels=Npixels2D)
                    if fig is None: continue
                    #fig=bppu.plot_two_param_greedy_bins_contour(pos_list,greedy2Params,TwoDconfidenceLevels,color_by_name,figsize=contour_figsize,dpi=contour_dpi,figposition=contour_figposition)
                    greedy2savepaths.append('%s-%s.png'%(pplst[0],pplst[1]))
                    fig.savefig(os.path.join(outdir,'%s-%s.png'%(pplst[0],pplst[1])),bbox_inches='tight')
                    fig.savefig(os.path.join(pdfdir,'%s-%s.pdf'%(pplst[0],pplst[1])),bbox_inches='tight')


            plt.clf()
        oned_data={}
        #confidence_levels={}
        confidence_levels=[{},{},{},{}]
        confidence_uncertainty={}
        for param in common_params:
            print("Plotting comparison for '%s'"%param)

            cl_table_header='<table><th>Run</th>'
            cl_table={}
            save_paths=[]
            cl_table_min_max_str='<tr><td> Min | Max </td>'
            level_index=0
            for confidence_level in OneDconfidenceLevels:
                if analyticLikelihood:
                    pdf=analyticLikelihood.pdf(param)
                    cdf=analyticLikelihood.cdf(param)
                else:
                    pdf=None
                    cdf=None
                cl_table_header+='<th colspan="2">%i%% (Lower|Upper)</th>'%(int(100*confidence_level))
                hist_fig,cl_intervals=compare_plots_one_param_line_hist(pos_list,param,confidence_level,color_by_name,cl_lines_flag=clf,legend=ldg,analyticPDF=pdf)
                hist_fig2,cl_intervals=compare_plots_one_param_line_hist_cum(pos_list,param,confidence_level,color_by_name,cl_lines_flag=clf,analyticCDF=cdf,legend=ldg)

                # Save confidence levels and uncertainty
                #confidence_levels[param]=[]
                confidence_levels[level_index][param]=[]

                for name,pos in pos_list.items():
                    median=pos[param].median
                    low,high=cl_intervals[name]
                    #confidence_levels[param].append((name,low,median,high))
                    confidence_levels[level_index][param].append((name,low,median,high))

                level_index=level_index+1
                cl_bounds=[]
                poses=[]
                for name,pos in pos_list.items():
                    cl_bounds.append(cl_intervals[name])
                    poses.append(pos[param])
                confidence_uncertainty[param]=bppu.confidence_interval_uncertainty(confidence_level, cl_bounds, poses)

                save_path=''
                if hist_fig is not None:
                    save_path=os.path.join(outdir,'%s_%i.png'%(param,int(100*confidence_level)))
                    save_path_pdf=os.path.join(pdfdir,'%s_%i.pdf'%(param,int(100*confidence_level)))
                    try:
                      plt.tight_layout(hist_fig)
                      plt.tight_layout(hist_fig2)
                    except:
                      pass
                    hist_fig.savefig(save_path,bbox_inches='tight')
                    hist_fig.savefig(save_path_pdf,bbox_inches='tight')
                    save_paths.append(save_path)
                    save_path=os.path.join(outdir,'%s_%i_cum.png'%(param,int(100*confidence_level)))
                    save_path_pdf=os.path.join(pdfdir,'%s_%i_cum.pdf'%(param,int(100*confidence_level)))
                    hist_fig2.savefig(save_path,bbox_inches='tight')
                    hist_fig2.savefig(save_path_pdf,bbox_inches='tight')
                    save_paths.append(save_path)
                min_low,max_high=cl_intervals.values()[0]
                for name,interval in cl_intervals.items():
                    low,high=interval
                    if low<min_low:
                        min_low=low
                    if high>max_high:
                        max_high=high
                    try:
                        cl_table[name]+='<td>%s</td><td>%s</td>'%(low,high)
                    except:
                        cl_table[name]='<td>%s</td><td>%s</td>'%(low,high)
                cl_table_min_max_str+='<td>%s</td><td>%s</td>'%(min_low,max_high)
            cl_table_str=cl_table_header
            for name,row_contents in cl_table.items():
                cl_table_str+='<tr><td>%s<font color="%s"></font></td>'%(name,str(mpl_colors.rgb2hex(color_by_name[name][0:3])))#,'&#183;'.encode('utf-8'))

                cl_table_str+=row_contents+'</tr>'
            cl_table_str+=cl_table_min_max_str+'</tr>'
            cl_table_str+='</table>'

            cl_uncer_str='<table> <th>Confidence Relative Uncertainty</th> <th>Confidence Fractional Uncertainty</th> <th>Confidence Percentile Uncertainty</th>\n'
            cl_uncer_str+='<tr> <td> %g </td> <td> %g </td> <td> %g </td> </tr> </table>'%(confidence_uncertainty[param][0], confidence_uncertainty[param][1], confidence_uncertainty[param][2])

            ks_matrix=compute_ks_pvalue_matrix(pos_list, param)

            N=ks_matrix.shape[0]+1

            # Make up KS-test table
            ks_table_str='<table><th colspan="%d"> K-S test p-value matrix </th>'%N

            # Column headers
            ks_table_str+='<tr> <td> -- </td> '
            for name,pos in pos_list.items():
                ks_table_str+='<td> %s </td>'%name
            ks_table_str+='</tr>'

            # Now plot rows of matrix
            for i in range(len(pos_list)):
                ks_table_str+='<tr> <td> %s </td>'%(pos_list.keys()[i])
                for j in range(len(pos_list)):
                    if i == j:
                        ks_table_str+='<td> -- </td>'
                    elif ks_matrix[i,j] < 0.05:
                        # Failing at suspiciously low p-value
                        ks_table_str+='<td> <b> %g </b> </td>'%ks_matrix[i,j]
                    else:
                        ks_table_str+='<td> %g </td>'%ks_matrix[i,j]

                ks_table_str+='</tr>'

            ks_table_str+='</table>'

            oned_data[param]=(save_paths,cl_table_str,ks_table_str,cl_uncer_str)

    # Watch out---using private variable _logL
    max_logls = [[name,max(pos._logL)] for name,pos in pos_list.items()]
    dics = [pos.DIC for name, pos in pos_list.items()]

    return greedy2savepaths,oned_data,confidence_uncertainty,confidence_levels,max_logls,dics
Ejemplo n.º 8
0
    import sys
    from optparse import OptionParser
    from lalinference import bayespputils as bppu

    parser = OptionParser(USAGE)
    parser.add_option("-o",
                      "--outpath",
                      dest="outpath",
                      default=None,
                      help="make page and plots in DIR",
                      metavar="DIR")
    parser.add_option(
        "-d",
        "--data",
        dest="data",
        help="Posteriors samples file (must be in common format)")
    (opts, args) = parser.parse_args()

    if opts.outpath is None:
        opts.outpath = os.getcwd()
    if not os.path.isfile(opts.data):
        print("Cannot find posterior file %s\n" % opts.data)
        sys.exit(1)
    else:
        peparser = bppu.PEOutputParser('common')
        commonResultsObj = peparser.parse(open(opts.data, 'r'),
                                          info=[None, None])
        ps, samps = commonResultsObj
        pos = bppu.Posterior(commonResultsObj)
        make_disk_plot(pos, opts.outpath)