예제 #1
0
    if opts.no2d:
        twoDplots=[]


    greedy2savepaths,oned_data,confidence_uncertainty,confidence_levels,max_logls,dics=compare_bayes(outpath,zip(names,pos_list),opts.inj,opts.eventnum,opts.username,opts.password,opts.reload_flag,opts.clf,opts.ldg_flag,contour_figsize=(float(opts.cw),float(opts.ch)),contour_dpi=int(opts.cdpi),contour_figposition=[0.15,0.15,float(opts.cpw),float(opts.cph)],fail_on_file_err=not opts.readFileErr,covarianceMatrices=opts.covarianceMatrices,meanVectors=opts.meanVectors,Npixels2D=int(opts.npixels_2d))

    ####Print Confidence Levels######
    output_confidence_levels_tex(confidence_levels,outpath)    
    output_confidence_levels_dat(confidence_levels,outpath)
    
    ####Save confidence uncertainty#####
    output_confidence_uncertainty(confidence_uncertainty,outpath)

    ####Print HTML!#######

    compare_page=bppu.htmlPage('Compare PDFs (single event)',css=bppu.__default_css_string)

    param_section=compare_page.add_section('Meta')

    param_section_write='<div><p>This comparison was created from the following analyses</p>'
    param_section_write+='<table border="1">'
    param_section_write+='<th>Analysis</th> <th> max(log(L)) </th> <th> DIC </th>'
    for (name,logl_max), dic in zip(max_logls, dics):
        param_section_write+='<tr><td><a href="%s">%s</a></td> <td>%g</td> <td>%.1f</td></tr>'%(dict(zip(names,pos_list))[name],name,logl_max,dic)
    param_section_write+='</table></div>'

    param_section.write(param_section_write)
    param_section.write('<div><p><a href="confidence_table.tex">LaTeX table</a> of medians and confidence levels.</p></div>')
    if oned_data:

        param_section=compare_page.add_section('1D marginal posteriors')
def makeOutputPage(objs, params, outdir, confidencelevels):
    """
    Make a summary page with table of results and plots for each param in params
    """
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    #Bin size/resolution for binning. Need to match (converted) column names.
    GreedyRes = {
        'mc': 0.025,
        'm1': 0.1,
        'm2': 0.1,
        'mass1': 0.1,
        'mass2': 0.1,
        'mtotal': 0.1,
        'eta': 0.001,
        'iota': 0.01,
        'time': 1e-3,
        'distance': 1.0,
        'dist': 1.0,
        'mchirp': 0.025,
        'a1': 0.02,
        'a2': 0.02,
        'phi1': 0.05,
        'phi2': 0.05,
        'theta1': 0.05,
        'theta2': 0.05,
        'ra': 0.05,
        'dec': 0.05
    }

    html = bppu.htmlPage('Injection Summary', css=bppu.__default_css_string)
    html_meta = html.add_section('Summary')
    html_meta.p('Analysed %i injections.' % (len(objs)))

    # Make a directory for stdacc and errorbar plots
    accdir = os.path.join(outdir, 'stdacc')
    if not os.path.isdir(accdir):
        os.makedirs(accdir)
    errdir = os.path.join(outdir, 'errbar')
    if not os.path.isdir(errdir):
        os.makedirs(errdir)
    boxdir = os.path.join(outdir, 'boxplot')
    if not os.path.isdir(boxdir):
        os.makedirs(boxdir)
    # Calculate confidence intervals for each injection and parameter

    #for par in params:
    #    par=par.lower()
    #    print 'Binning %s to determine confidence intervals'%(par)
    #
    #    for obj in objs:
    #        try:
    #            obj[par]
    #        except KeyError:
    #            print 'No input chain for %s, skipping'%(par)
    #            continue
    #        try:
    #            GreedyRes[par]
    #        except KeyError:
    #            print 'No bin size for %s, skipping'%(par)
    #            continue
    #        binParams={par: GreedyRes[par]}

    #       toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(obj,binParams, confidencelevels)
    #       oneDContCL, oneDContInj = bppu.contigious_interval_one_param(obj, binParams, confidencelevels)

    print 'Calculating std accuracies'
    # Calculate Std Accuracies
    stdacc = {}
    mean = {}
    std = {}
    for par in params:
        if not reduce(lambda a, b: a and b, map(lambda z: par in z.names,
                                                objs)):
            print '%s not found in all objects, skipping' % (par)
            continue
        stdacc[par] = []
        mean[par] = []
        std[par] = []
        for obj in objs:
            oneDpos = obj[par]
            stdacc[par].append(oneDpos.stacc)
            mean[par].append(oneDpos.mean)
            std[par].append(oneDpos.stdev)

    html_scatter = html.add_section('Standard Accuracy')
    html_scatter_content = '<table>'
    print 'Making std accuracy plots'
    # Make a scatter plot for std accuracy against each injection parameter
    for p1 in params:  # X axis, injection values
        try:
            injval = map(lambda o: o._getinjpar(p1), objs)
        except KeyError:
            print 'Error! Unable to find parameter %s in injection!' % (p1)
            continue
        html_scatter_content += '<tr><td>%s</td>' % (p1)
        for p2 in params:  # Y axis
            try:
                stdacc[p2]
            except KeyError:
                print 'No stdacc data for %s, skipping' % (p2)
                html_scatter_content += '<td>%s not found</td>' % (p2)
                continue
            fig = scatterAcc(injval, stdacc[p2], p1, p2)
            figname = p1 + '-' + p2 + '.png'
            figpath = os.path.join(accdir, figname)
            fig.savefig(figpath)
            html_scatter_content += '<td><img src="%s" alt="%s-%s" /></td>' % (
                figpath, p1, p2)
        html_scatter_content += '</tr>'
    html_scatter_content += '</table>'
    html_scatter.write(html_scatter_content)
    print 'Making errorbar plots'
    # Make an errorbar plot for each parameter against each injection parameter
    html_err = html.add_section('Parameter Estimates')
    html_err_content = '<table>'
    for p1 in params:  # X axis, injection values
        try:
            injval = map(lambda o: o._getinjpar(p1), objs)
        except KeyError:
            print 'Error! Unable to find parameter %s in injection!' % (p1)
            continue
        html_err_content += '<tr><td>%s</td>' % (p1)
        for p2 in params:
            try:
                mean[p2]
            except KeyError:
                print 'No mean for %s, skipping' % (p2)
                html_err_content += '<td>%s not found</td>' % (p2)
                continue
            yinjval = map(lambda o: o._getinjpar(p2), objs)
            fig = plt.figure()
            plt.errorbar(injval, mean[p2], std[p2], linestyle='None')
            plt.plot(injval, yinjval, 'gx')
            plt.xlabel(p1)
            plt.ylabel(p2)
            figname = p1 + '-' + p2 + '.png'
            figpath = os.path.join(errdir, figname)
            fig.savefig(figpath)
            html_err_content += '<td><img src="%s" alt="%s-%s" /></td>' % (
                figpath, p1, p2)
        html_err_content += '</tr>'
    html_err_content += '</table>'
    html_err.write(html_err_content)

    # Box and whiskers plot for each parameter pair
    html_box = html.add_section('Box plots')
    html_box_content = '<table>'
    print 'Making box plots'
    for p1 in params:
        # Error checking to be put here
        injval = map(lambda o: o._getinjpar(p1), objs)
        html_box_content += '<tr><td>%s</td>' % (p1)
        for p2 in params:
            try:
                mean[p2]
            except KeyError:
                print 'No mean for %s, skipping' % (p2)
                html_box_content += '<td>%s not found</td>' % (p2)
                continue
            posteriors = map(lambda o: o[p2].samples, objs)
            yinjval = map(lambda o: o._getinjpar(p2), objs)
            fig = plt.figure()
            upper = max(injval)
            lower = min(injval)
            boxwidth = 0.75 * (upper - lower) / len(injval)
            plt.boxplot(posteriors, positions=injval, widths=boxwidth)
            plt.plot(injval, yinjval, 'gx')
            plt.xlabel(p1)
            plt.ylabel(p2)
            plt.xlim(lower - 0.5 * boxwidth, upper + 0.5 * boxwidth)
            figname = p1 + '-' + p2 + '.png'
            figpath = os.path.join(boxdir, figname)
            fig.savefig(figpath)
            html_box_content += '<td><img src="%s" alt="%s-%s" /></td>' % (
                figpath, p1, p2)
        html_box_content += '</tr>'
    html_box_content += '</table>'
    html_box.write(html_box_content)

    html_footer = html.add_section('')
    html_footer.p('Produced using cbcBayesInjProc.py at ' +
                  strftime("%Y-%m-%d %H:%M:%S") + ' .')
    html_footer.p(git_version.verbose_msg)

    # Save page
    resultspage = open(os.path.join(outdir, 'injectionsummary.html'), 'w')
    resultspage.write(str(html))
    resultspage.close()
def cbcBayesConvergence(
        outdir,
        data,
        #Number of live points used in data
        ns_Nlive,
        #Threshold for failure for gelman-rubin test
        gelmanthresh=1.01
    #
):
    """
    This is a script which calculates convergence statistics for nested 
    sampling runs.
    """

    if data is None:
        raise RuntimeError('You must specify an input data file')
    #
    if outdir is None:
        raise RuntimeError("You must specify an output directory.")

    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    #

    import string
    from numpy import loadtxt
    pos_samples = []
    new_data = []
    for d in reversed(data):
        temp = [d]
        new_data.append(temp)

    peparser = bppu.PEOutputParser('ns')
    posfilename = os.path.join(outdir, 'posterior_samples.dat')

    for i in range(len(data)):
        # Create a posterior object (Npost=None avoids repeated samples which ruin the KS test)
        commonResultsObj = peparser.parse(new_data[i],
                                          Nlive=ns_Nlive,
                                          Npost=None)
        pos = bppu.Posterior(commonResultsObj)
        pos.write_to_file(posfilename)

        with open(posfilename) as f:
            param_arr = string.split(f.readline())
            loadfile = loadtxt(f)
            pos_samples.append(loadfile)

    #==================================================================#
    #Create webpage with nested sampling convergence information
    #==================================================================#

    import pylal.nsconvergence as nsc
    runs = len(pos_samples)
    html_nsconvergence = bppu.htmlPage('Convergence Information',
                                       css=bppu.__default_css_string)

    convergencedir = os.path.join(outdir, 'convergence')
    if not os.path.isdir(convergencedir):
        os.makedirs(convergencedir)

    #Summary Section
    html_nsconvergence_stats = html_nsconvergence.add_section('Summary')
    max_l, l_diff = nsc.compare_maxl(pos_samples, param_arr)
    html_nsconvergence_stats.p('Max difference in loglikelihood: %f' % l_diff)
    summary_table_string = ''
    summary_table_header = '<table border="1"><tr><th>Run</th><th>maxloglikelihood</th>'
    #maxposterior column
    if param_arr.count('prior') > 0:
        max_pos, pos_diff = nsc.compare_maxposterior(pos_samples, param_arr)
        html_nsconvergence_stats.p('Max difference in posterior: %f' %
                                   pos_diff)
        summary_table_header += '<th>maxposterior</th>'

    summary_table_header += '</tr>'
    summary_table_string += summary_table_header
    for i in range(runs):
        max_l_val = max_l[i]
        summary_table_string += '<tr><td>%i</td><td>%f</td>' % (i, max_l_val)
        if param_arr.count('prior') > 0:
            max_pos_val = max_pos[i]
            summary_table_string += '<td>%f</td>' % max_pos_val
        summary_table_string += '</tr>'

    summary_table_string += '</table>'
    html_nsconvergence_stats.write(summary_table_string)

    #KS Test Section
    html_nsconvergence_ks = html_nsconvergence.add_section('KS Test')
    ks_arr = nsc.kstest(pos_samples, param_arr, convergencedir)
    for index, p in enumerate(param_arr):
        ks_table_string = '<table><caption>%s</caption><tr><th></th>' % p
        for i in range(runs):
            ks_table_string += '<th>Run%i</th>' % i
        ks_table_string += '</tr>'
        for i in range(runs):
            ks_table_string += '<tr><th>Run%i</th>' % i
            for j in range(i * runs, (i * runs) + runs):
                pval = ks_arr[index][j]
                ks_table_string += '<td>%f</td>' % pval
            ks_table_string += '</tr>'
        ks_table_string += '</table>'
        html_nsconvergence_ks.write(ks_table_string)
    for p in param_arr:
        html_nsconvergence_ks.write(
            '<a href="./convergence/ks/' + p +
            '_ks.png" target="_blank"><img width="35%" src="./convergence/ks/'
            + p + '_ks.png"/></a>')

    #Gelman Rubin Section
    html_nsconvergence_gelman = html_nsconvergence.add_section('Gelman Rubin')

    gelmandir = os.path.join(convergencedir, 'gelmanrubin')
    if not os.path.isdir(gelmandir):
        os.makedirs(gelmandir)

    gelmanrubin = nsc.gelman_rubin(pos_samples, param_arr, gelmandir)
    warn = False
    warnparams = []
    for index, g in enumerate(gelmanrubin):
        if g > gelmanthresh:
            warn = True
            warnparams.append(index)
    if warn:
        with open(outdir + '/warning.txt', 'w') as warnfile:
            warnfile.write('Gelman-Rubin threshold set to %f\n' % gelmanthresh)
            for i in warnparams:
                warnfile.write('%s has an R-value of %f\n' %
                               (param_arr[i], gelmanrubin[i]))

    colors = ['b', 'r', 'g', 'c', 'k', 'm', 'y', .25, .5, .75]
    for param_index, param in enumerate(param_arr):
        for i in range(runs):
            data_range = []
            for j in range(len(pos_samples[i])):
                data_range.append(j)
            col = nsc.get_data_col(pos_samples[i], param_arr, param)
            plt.figure(param_index)
            plt.scatter(data_range, col, c=colors[i], s=5, edgecolors='none')
            plt.title('R = ' + str(gelmanrubin[param_index]))
            plt.xlabel('Sample')
            plt.ylabel(param)
            plt.xlim(0, len(pos_samples[i]))
            plt.ylim(min(col), max(col))
            plt.savefig(gelmandir + '/' + param)

    for p in param_arr:
        html_nsconvergence_gelman.write(
            '<a href="./convergence/gelmanrubin/' + p +
            '.png" target="_blank"><img width="35%" src="./convergence/gelmanrubin/'
            + p + '.png"/></a>')

    #Write convergence page
    nsconvergencepage = open(os.path.join(outdir, 'convergence.html'), 'w')
    nsconvergencepage.write(str(html_nsconvergence))
    nsconvergencepage.close()
예제 #4
0
def makeOutputPage(objs, params, outdir, confidencelevels):
    """
    Make a summary page with table of results and plots for each param in params
    """
    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    #Bin size/resolution for binning. Need to match (converted) column names.
    GreedyRes={'mc':0.025,'m1':0.1,'m2':0.1,'mass1':0.1,'mass2':0.1,'mtotal':0.1,'eta':0.001,'iota':0.01,'time':1e-3,'distance':1.0,'dist':1.0,'mchirp':0.025,'a1':0.02,'a2':0.02,'phi1':0.05,'phi2':0.05,'theta1':0.05,'theta2':0.05,'ra':0.05,'dec':0.05}


    html=bppu.htmlPage('Injection Summary',css=bppu.__default_css_string)
    html_meta=html.add_section('Summary')
    html_meta.p('Analysed %i injections.'%(len(objs)))
    
    # Make a directory for stdacc and errorbar plots
    accdir=os.path.join(outdir,'stdacc')
    if not os.path.isdir(accdir):
        os.makedirs(accdir)
    errdir=os.path.join(outdir,'errbar')
    if not os.path.isdir(errdir):
        os.makedirs(errdir)
    boxdir=os.path.join(outdir,'boxplot')
    if not os.path.isdir(boxdir):
        os.makedirs(boxdir)
    # Calculate confidence intervals for each injection and parameter
    
    #for par in params:
    #    par=par.lower()
    #    print 'Binning %s to determine confidence intervals'%(par)
    #    
    #    for obj in objs:
    #        try:
    #            obj[par]
    #        except KeyError:
    #            print 'No input chain for %s, skipping'%(par)
    #            continue
    #        try:
    #            GreedyRes[par]
    #        except KeyError:
    #            print 'No bin size for %s, skipping'%(par)
    #            continue
    #        binParams={par: GreedyRes[par]}

     #       toppoints,injectionconfidence,reses,injection_area=bppu.greedy_bin_one_param(obj,binParams, confidencelevels)
     #       oneDContCL, oneDContInj = bppu.contigious_interval_one_param(obj, binParams, confidencelevels)
    
    print 'Calculating std accuracies'
    # Calculate Std Accuracies
    stdacc={}
    mean={}
    std={}
    for par in params:
        if not reduce( lambda a,b: a and b, map(lambda z: par in z.names, objs)):
            print '%s not found in all objects, skipping'%(par)
            continue
        stdacc[par]=[]
        mean[par]=[]
        std[par]=[]
        for obj in objs:
            oneDpos=obj[par]
            stdacc[par].append(oneDpos.stacc)
            mean[par].append(oneDpos.mean)
            std[par].append(oneDpos.stdev)
    
    html_scatter=html.add_section('Standard Accuracy')
    html_scatter_content='<table>'
    print 'Making std accuracy plots'
    # Make a scatter plot for std accuracy against each injection parameter
    for p1 in params: # X axis, injection values
        try:
            injval=map(lambda o: o._getinjpar(p1), objs)
        except KeyError:
            print 'Error! Unable to find parameter %s in injection!'%(p1)
            continue
        html_scatter_content+='<tr><td>%s</td>'%(p1)
        for p2 in params: # Y axis
            try:
                stdacc[p2]
            except KeyError:
                print 'No stdacc data for %s, skipping'%(p2)
                html_scatter_content+='<td>%s not found</td>'%(p2)
                continue
            fig = scatterAcc(injval,stdacc[p2],p1,p2)
            figname=p1+'-'+p2+'.png'
            figpath=os.path.join(accdir,figname)
            fig.savefig(figpath)
            html_scatter_content+='<td><img src="%s" alt="%s-%s" /></td>'%(figpath,p1,p2)
        html_scatter_content+='</tr>'
    html_scatter_content+='</table>'
    html_scatter.write(html_scatter_content)
    print 'Making errorbar plots'
    # Make an errorbar plot for each parameter against each injection parameter
    html_err=html.add_section('Parameter Estimates')
    html_err_content='<table>'
    for p1 in params: # X axis, injection values
        try:
            injval=map(lambda o: o._getinjpar(p1),objs)
        except KeyError:
            print 'Error! Unable to find parameter %s in injection!'%(p1)
            continue
        html_err_content+='<tr><td>%s</td>'%(p1)
        for p2 in params:
            try:
                mean[p2]
            except KeyError:
                print 'No mean for %s, skipping'%(p2)
                html_err_content+='<td>%s not found</td>'%(p2)
                continue
            yinjval=map(lambda o: o._getinjpar(p2), objs)
            fig = plt.figure()
            plt.errorbar(injval,mean[p2],std[p2],linestyle='None')
            plt.plot(injval,yinjval,'gx')
            plt.xlabel(p1)
            plt.ylabel(p2)
            figname=p1+'-'+p2+'.png'
            figpath=os.path.join(errdir,figname)
            fig.savefig(figpath)
            html_err_content+='<td><img src="%s" alt="%s-%s" /></td>'%(figpath,p1,p2)
        html_err_content+='</tr>'
    html_err_content+='</table>'
    html_err.write(html_err_content)

    # Box and whiskers plot for each parameter pair
    html_box=html.add_section('Box plots')
    html_box_content='<table>'
    print 'Making box plots'
    for p1 in params:
        # Error checking to be put here
        injval=map(lambda o: o._getinjpar(p1),objs)
        html_box_content+='<tr><td>%s</td>'%(p1)
        for p2 in params:
            try:
                mean[p2]
            except KeyError:
                print 'No mean for %s, skipping'%(p2)
                html_box_content+='<td>%s not found</td>'%(p2)
                continue
            posteriors=map(lambda o: o[p2].samples, objs)
            yinjval=map(lambda o: o._getinjpar(p2),objs)
            fig=plt.figure()
            upper=max(injval)
            lower=min(injval)
            boxwidth=0.75*(upper-lower)/len(injval)
            plt.boxplot(posteriors,positions=injval,widths=boxwidth )
            plt.plot(injval,yinjval,'gx')
            plt.xlabel(p1)
            plt.ylabel(p2)
            plt.xlim(lower-0.5*boxwidth,upper+0.5*boxwidth)
            figname=p1+'-'+p2+'.png'
            figpath=os.path.join(boxdir,figname)
            fig.savefig(figpath)
            html_box_content+='<td><img src="%s" alt="%s-%s" /></td>'%(figpath,p1,p2)
        html_box_content+='</tr>'
    html_box_content+='</table>'
    html_box.write(html_box_content)

    html_footer=html.add_section('')
    html_footer.p('Produced using cbcBayesInjProc.py at '+strftime("%Y-%m-%d %H:%M:%S")+' .')
    html_footer.p(git_version.verbose_msg)

    # Save page
    resultspage=open(os.path.join(outdir,'injectionsummary.html'),'w')
    resultspage.write(str(html))
    resultspage.close()
예제 #5
0
    if len(opts.pos_list)!=len(names):
        print "Either add names for all posteriors or dont put any at all!"

    greedy2savepaths,oned_data,confidence_uncertainty,confidence_levels,max_logls=compare_bayes(outpath,zip(names,opts.pos_list),opts.inj,opts.eventnum,opts.username,opts.password,opts.reload_flag,opts.clf,opts.ldg_flag,contour_figsize=(float(opts.cw),float(opts.ch)),contour_dpi=int(opts.cdpi),contour_figposition=[0.15,0.15,float(opts.cpw),float(opts.cph)],fail_on_file_err=not opts.readFileErr,covarianceMatrices=opts.covarianceMatrices,meanVectors=opts.meanVectors,Npixels2D=int(opts.npixels_2d))

    ####Print Confidence Levels######
    output_confidence_levels_tex(confidence_levels,outpath)    
    output_confidence_levels_dat(confidence_levels,outpath)
    
    ####Save confidence uncertainty#####
    output_confidence_uncertainty(confidence_uncertainty,outpath)

    ####Print HTML!#######

    compare_page=bppu.htmlPage('Compare PDFs (single event)',css=bppu.__default_css_string)

    param_section=compare_page.add_section('Meta')

    param_section_write='<div><p>This comparison was created from the following analyses</p>'
    param_section_write+='<table border="1">'
    param_section_write+='<th>Analysis</th> <th> max(log(L)) </th>'
    for name,logl_max in max_logls:
        param_section_write+='<tr><td><a href="%s">%s</a></td> <td>%g</td></tr>'%(dict(zip(names,opts.pos_list))[name],name,logl_max)
    param_section_write+='</table></div>'

    param_section.write(param_section_write)
    param_section.write('<div><p><a href="confidence_table.tex">LaTeX table</a> of medians and confidence levels.</p></div>')
    if oned_data:

        param_section=compare_page.add_section('1D marginal posteriors')
def cbcBayesConvergence(
                        outdir,data,
                        #Number of live points used in data
                        ns_Nlive,
                        #Threshold for failure for gelman-rubin test
                        gelmanthresh=1.01
                        #
                    ):
    """
    This is a script which calculates convergence statistics for nested 
    sampling runs.
    """

    if data is None:
        raise RuntimeError('You must specify an input data file')
    #
    if outdir is None:
        raise RuntimeError("You must specify an output directory.")

    if not os.path.isdir(outdir):
        os.makedirs(outdir)
    #

    import string
    from numpy import loadtxt
    pos_samples = []
    new_data = []
    for d in reversed(data):
        temp = [d]
        new_data.append(temp)

    peparser=bppu.PEOutputParser('ns')
    posfilename=os.path.join(outdir,'posterior_samples.dat')
    
    for i in range(len(data)):
        # Create a posterior object (Npost=None avoids repeated samples which ruin the KS test)
        commonResultsObj=peparser.parse(new_data[i],Nlive=ns_Nlive,Npost=None)
        pos = bppu.Posterior(commonResultsObj)
        pos.write_to_file(posfilename)
    
        with open(posfilename) as f:
            param_arr = string.split(f.readline())
            loadfile = loadtxt(f)
            pos_samples.append(loadfile)
   
    #==================================================================#
    #Create webpage with nested sampling convergence information
    #==================================================================#
    
    import pylal.nsconvergence as nsc
    runs = len(pos_samples)
    html_nsconvergence=bppu.htmlPage('Convergence Information', css=bppu.__default_css_string)

    convergencedir = os.path.join(outdir, 'convergence')
    if not os.path.isdir(convergencedir):
        os.makedirs(convergencedir)    	

    #Summary Section
    html_nsconvergence_stats=html_nsconvergence.add_section('Summary')
    max_l, l_diff = nsc.compare_maxl(pos_samples, param_arr)
    html_nsconvergence_stats.p('Max difference in loglikelihood: %f'%l_diff)
    summary_table_string = ''
    summary_table_header = '<table border="1"><tr><th>Run</th><th>maxloglikelihood</th>'
    #maxposterior column
    if param_arr.count('prior') > 0:
        max_pos, pos_diff = nsc.compare_maxposterior(pos_samples, param_arr)
        html_nsconvergence_stats.p('Max difference in posterior: %f'%pos_diff)
        summary_table_header += '<th>maxposterior</th>'	

    summary_table_header += '</tr>'
    summary_table_string += summary_table_header
    for i in range(runs):
        max_l_val = max_l[i]
        summary_table_string += '<tr><td>%i</td><td>%f</td>'%(i,max_l_val)
        if param_arr.count('prior') > 0:
            max_pos_val = max_pos[i]
            summary_table_string += '<td>%f</td>'%max_pos_val
        summary_table_string += '</tr>'
        
    summary_table_string += '</table>'
    html_nsconvergence_stats.write(summary_table_string)
    
    #KS Test Section
    html_nsconvergence_ks=html_nsconvergence.add_section('KS Test')
    ks_arr = nsc.kstest(pos_samples, param_arr, convergencedir)
    for index, p in enumerate(param_arr):	
        ks_table_string = '<table><caption>%s</caption><tr><th></th>'%p
        for i in range(runs):
            ks_table_string += '<th>Run%i</th>'%i 
        ks_table_string+='</tr>'
        for i in range(runs):
            ks_table_string += '<tr><th>Run%i</th>'%i
            for j in range(i*runs,(i*runs)+runs):
                pval = ks_arr[index][j]	
                ks_table_string += '<td>%f</td>'%pval
            ks_table_string += '</tr>'	
        ks_table_string += '</table>'
        html_nsconvergence_ks.write(ks_table_string)
    for p in param_arr:
        html_nsconvergence_ks.write('<a href="./convergence/ks/'+p+'_ks.png" target="_blank"><img width="35%" src="./convergence/ks/'+p+'_ks.png"/></a>')

    #Gelman Rubin Section
    html_nsconvergence_gelman=html_nsconvergence.add_section('Gelman Rubin')
    
    gelmandir = os.path.join(convergencedir,'gelmanrubin')
    if not os.path.isdir(gelmandir):
        os.makedirs(gelmandir)
    
    gelmanrubin = nsc.gelman_rubin(pos_samples, param_arr, gelmandir)
    warn = False
    warnparams = []
    for index,g in enumerate(gelmanrubin):
        if g > gelmanthresh:
            warn = True
            warnparams.append(index)
    if warn:
        with open(outdir+'/warning.txt', 'w') as warnfile:
            warnfile.write('Gelman-Rubin threshold set to %f\n'%gelmanthresh)
            for i in warnparams:
                warnfile.write('%s has an R-value of %f\n'%(param_arr[i], gelmanrubin[i]))
                    
    colors = ['b', 'r', 'g', 'c', 'k', 'm', 'y', .25, .5, .75]
    for param_index, param in enumerate(param_arr):
        for i in range(runs):
            data_range = []
            for j in range(len(pos_samples[i])):
                data_range.append(j) 
            col = nsc.get_data_col(pos_samples[i], param_arr, param)
            plt.figure(param_index)
            plt.scatter(data_range, col, c = colors[i], s = 5, edgecolors = 'none')
            plt.title('R = ' + str(gelmanrubin[param_index]))
            plt.xlabel('Sample')
            plt.ylabel(param)	
            plt.xlim(0,len(pos_samples[i]))	
            plt.ylim(min(col),max(col))
            plt.savefig(gelmandir+'/'+param)
        
    for p in param_arr:
        html_nsconvergence_gelman.write('<a href="./convergence/gelmanrubin/'+p+'.png" target="_blank"><img width="35%" src="./convergence/gelmanrubin/'+p+'.png"/></a>')


    #Write convergence page
    nsconvergencepage=open(os.path.join(outdir, 'convergence.html'), 'w')
    nsconvergencepage.write(str(html_nsconvergence))
    nsconvergencepage.close()