Пример #1
0
def check_finesse(folder, recover=False):
    data = h5_2_dict(join(folder, 'input_finesse.h5'))
    ix = data['fit_ix']
    Lpost, dpost = read_Ld_results(data['Ld_folder'])

    i = np.random.choice(len(Lpost))
    L = Lpost[i]
    d = dpost[i]
    r = data['r']
    sig = data['sig']

    analyzer = pymultinest.Analyzer(5,
                                    outputfiles_basename=join(
                                        folder, "finesse_"))
    modes = analyzer.get_mode_stats()
    #F, A, Arel, Ti, offset = modes['modes'][0]['mean']
    F, A, Arel, Ti = modes['modes'][0]['mean']
    post = analyzer.get_equal_weighted_posterior()
    w0 = [487.873302, 487.98634]
    mu = [232.03806, 39.948]

    Lpost = Lpost[::30]
    dpost = dpost[::30]

    Fpost = post[::30, 0]
    Apost = post[::30, 1]
    Arelpost = post[::30, 2]
    Tipost = post[::30, 3]
    #offsetpost = post[::30, 4]

    #offset_mean = np.mean(post[:, 4])
    #offset_sd = np.std(post[:, 4])
    print('F: {0:f} +/- {1:f}'.format(np.mean(post[:, 0]), np.std(post[:, 0])))
    print('A: {0:e} +/- {1:e}'.format(np.mean(post[:, 1]), np.std(post[:, 1])))
    print('Arel: {0:f} +/- {1:f}'.format(np.mean(post[:, 2]), np.std(post[:,
                                                                          2])))
    print('Ti Ar: {0:f} +/- {1:f}'.format(np.mean(post[:, 3]),
                                          np.std(post[:, 3])))
    #print('offset: {0:e} +/- {1:e}'.format(offset_mean, offset_sd))

    if recover:
        try:
            post_dict = h5_2_dict(join(folder, "finesse_solver_model_post.h5"))
            sig_post = post_dict["signal post"]
            new_r = post_dict['new_r']
        except IOError, e:
            print(
                "Can't recover finesse solver q posterior. Calculating from scratch."
            )
            #sig_post = calculate_signal_post(r[ix], Lpost, dpost, Fpost, Apost, Arelpost, Tipost, offsetpost, w0, mu)
            new_r = np.linspace(0, 900, 1000)
            # sig_post = calculate_signal_post(r[ix], Lpost, dpost, Fpost, Apost, Arelpost, Tipost, w0, mu)
            sig_post = calculate_signal_post(new_r, Lpost, dpost, Fpost, Apost,
                                             Arelpost, Tipost, w0, mu)
            dict_2_h5(join(folder, "finesse_solver_model_post.h5"), {
                'signal post': sig_post,
                'new_r': new_r
            })
Пример #2
0
def check_solution(folder,
                   Ld_dir,
                   finesse_dir,
                   recover=False,
                   w0=487.98634,
                   mu=39.948):
    print("I'm here!")
    data = h5_2_dict(join(folder, 'input_plasma_data.h5'))

    ix = data['fit_ix']
    r = data['r']
    sig = data['sig']

    Lpost, dpost = read_Ld_results(Ld_dir)
    Fpost, _, _, _ = read_finesse_results(finesse_dir)
    nL = len(Lpost)
    nF = len(Fpost)
    i = np.random.choice(nL)
    j = np.random.choice(nF)

    L = Lpost[i]
    d = dpost[i]
    F = Fpost[j]

    Lstep = 100
    Fstep = 100
    Tistep = 100
    analyzer = pymultinest.Analyzer(3,
                                    outputfiles_basename=join(folder, "Ti_"))
    modes = analyzer.get_mode_stats()
    Ti, V, A = modes['modes'][0]['mean']
    print(modes['modes'][0]['sigma'])
    print(Ti, V, A)
    post = analyzer.get_equal_weighted_posterior()

    Tipost = post[::, 0]
    Vpost = post[::, 1]
    Apost = post[::, 2]

    if recover:
        try:
            post_dict = h5_2_dict(join(folder, "Ti_solver_model_post.h5"))
            sig_post = post_dict["signal post"]
        except:
            print(
                "Can't recover Ti solver q posterior.  Calculating from scratch."
            )
            sig_post = calculate_signal_post(r[ix],
                                             Lpost[::Lstep],
                                             dpost[::Lstep],
                                             Fpost[::Fstep],
                                             Tipost[::Tistep],
                                             Vpost[::Tistep],
                                             Apost[::Tistep],
                                             w0,
                                             mu,
                                             nprocs=32)
            dict_2_h5(join(folder, "Ti_solver_model_post.h5"),
                      {"signal post": sig_post})
    else:
        sig_post = calculate_signal_post(r[ix],
                                         Lpost[::Lstep],
                                         dpost[::Lstep],
                                         Fpost[::Fstep],
                                         Tipost[::Tistep],
                                         Vpost[::Tistep],
                                         Apost[::Tistep],
                                         w0,
                                         mu,
                                         nprocs=32)
        dict_2_h5(join(folder, "Ti_solver_model_post.h5"),
                  {"signal post": sig_post})

    sig_mean = np.mean(sig_post, axis=1)
    percentiles = calculate_percentile_ranges(sig_post)
    #vals = forward_model(r[ix], L, d, F, w0, mu, A, Ti, V, sm_ang=False, nlambda=1024)

    fig, ax = plt.subplots(figsize=(3.5, 3.5 / 1.61))
    # ax.plot(r[ix], sig[ix], 'C1', alpha=0.5, label='Data')
    ax.plot(r[ix], (sig[ix] - 3000.0) / 100.0, 'C1', alpha=0.5, label='Data')
    #ax.plot(r[ix], vals, 'r')
    alphas = [0.8, 0.5, 0.2]
    keys = [68, 95, 99]
    for alpha, per in zip(alphas, keys):
        if per == 99:
            # ax.fill_between(r[ix], percentiles[per][0], percentiles[per][1], color='C3', alpha=alpha, label='Fit')
            ax.fill_between(r[ix],
                            percentiles[per][0] / 100.0,
                            percentiles[per][1] / 100.0,
                            color='C3',
                            alpha=alpha,
                            label='Fit')
        else:
            # ax.fill_between(r[ix], percentiles[per][0], percentiles[per][1], color='C3', alpha=alpha)
            ax.fill_between(r[ix],
                            percentiles[per][0] / 100.0,
                            percentiles[per][1] / 100.0,
                            color='C3',
                            alpha=alpha)
    fig.legend(frameon=False,
               fontsize=8,
               loc='upper right',
               bbox_to_anchor=(0.5, 0.5))
    ax.set_xlabel("R (px)", fontsize=8, labelpad=-1)
    ax.set_ylabel("Counts (Hundreds)", fontsize=8, labelpad=-1)
    ax.tick_params(labelsize=8)
    #fig.tight_layout()
    fig.savefig(join(folder, "Ti_Ar_fit.png"), dpi=400)
    plt.show(block=False)

    axis_labels = ["Ti (eV)", "V (m/s)", "A (Counts)"]
    ylabels = ["P(Ti)", "P(V)", "P(A)"]
    # fig, ax = plt.subplots(3, figsize=(6, 15))
    fig, ax = plt.subplots(2, figsize=(3.5, 2 * 3.5 / 1.61))
    # for n in range(3):
    for n in range(2):
        my_hist(ax[n], post[:, n])
        ax[n].set_xlabel(axis_labels[n], fontsize=8, labelpad=-1)
        ax[n].set_ylabel(ylabels[n], fontsize=8, labelpad=-1)
        ax[n].tick_params(labelsize=8)
    #fig.tight_layout()
    fig.savefig(join(folder, "Ti_solver_histograms.png"), dpi=400)
    plt.show()
Пример #3
0
        parser.add_argument(
            '--recover',
            action='store_true',
            help=
            ("Recover finesse solver q posterior written to an h5 file because "
             "calculation takes a long time"))
        args = parser.parse_args()

        fname = abspath(join(args.folder, "input_plasma_data.h5"))
        basename = abspath(join(args.folder, 'Ti_'))
        org_fname = abspath(join(args.folder, 'ringsum.h5'))

        if not isfile(fname) or args.overwrite:
            if isfile(org_fname):
                data = h5_2_dict(org_fname)
                r = data['r']
                sig = data['sig']
                error = data['sig_sd']
                fit_ix, _ = get_fitting_region(r, sig, error)
                maxval = np.max(sig[fit_ix])
                amp_lim = [0.5 * maxval, 5.0 * maxval]
                dict_2_h5(fname, {
                    'r': r,
                    'sig': sig,
                    'fit_ix': fit_ix,
                    'error': error
                })
            else:
                raise ValueError('{0} does not exist!'.format(org_fname))
        else:
Пример #4
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        'Determine finesse fit region and write to hdf5 file')
    parser.add_argument(
        "folder",
        type=str,
        help='Folder containing output (ringsum.h5) of process_image.py')
    parser.add_argument("filename",
                        type=str,
                        help="Filename to be written in <folder>/")
    args = parser.parse_args()

    folder = abspath(args.folder)
    fname = join(folder, 'ringsum.h5')

    data = h5_2_dict(fname)
    print(data.keys())
    r = data['r']
    sig = data['sig']
    #data['sig_sd'] = np.sqrt(data['sig_sd']**2 + (0.02*sig)**2) # this is the error contribution from the center error
    data['sig_sd'] = np.sqrt(
        data['sig_sd']**2 + (0.005 * sig)**
        2)  # this is the error contribution from the center error

    #ix = get_fitting_region(r, sig, data['sig_sd'], plot_fit_region=True)['0']
    #print(ix)
    #remove_voigt(r[ix], sig[ix])

    #print("***********************************************")
    #print("Adding error to the region between ThI and ArII")
    #print("***********************************************")
Пример #5
0
def ld_check(folder, bins=None, saveit=True):
    '''
    plots results from a Ld calibration

    Args:
        folder (str): folder that contains multinest data
        bins (int, default=20): number of bins to use in histogram plots
        saveit (bool, default=False): if true, will save
            plots in 'plots' subfolder
    '''
    data = h5_2_dict(join(folder, 'input_Ld_solver.h5'))
    Lpost, dpost = read_Ld_results(folder)
    if saveit:
        fig_folder = join(folder, 'Ld_solver_plots')
        prep_folder(fig_folder)

    hists = {}
    for w, pk in data['peaks'].items():
        h_list = []
        for i, n in enumerate(data['orders'][w]):
            h_list.append(peak_calculator(Lpost, dpost, float(w), n))
        hists[w] = h_list

    means = {}
    stds = {}
    for w, pk in data['peaks'].items():
        means_list = []
        stds_list = []
        for h in hists[w]:
            means_list.append(np.mean(h))
            stds_list.append(np.std(h))
        means[w] = means_list
        stds[w] = stds_list

    norder = max([len(x) for x in data['orders'].values()])
    nwaves = len(data['peaks'].keys())

    fig0, ax0 = plt.subplots(figsize=(10,6))
    peak_plot(data['r'],data['sig'],data['peaks'],data['peaks_sd'],data['orders'],fax=(fig0,ax0),anspks=means,anspks_sd=stds)

    plt.show(block=False)

    fig_, ax_ = plt.subplots()
    for i, w in enumerate(hists.keys()):
        for j, hist in enumerate(hists[w]):
            ax_.axvline(data['peaks'][w][j], zorder=15)
    ax_.plot(data['r'], data['sig'])
    plt.show(block=False)
    wtest = 468.335172
    # print wtest
    # print peak_calculator(0.382288362412689094E+05, 0.883875718242851827E+00, wtest, 0)
    # print peak_calculator(0.382288362412689094E+05, 0.883875718242851827E+00, wtest, 1)

    fig1, ax1 = plt.subplots(2,1,figsize=(6,8))
    my_hist(ax1[0], Lpost*px_size, bins=bins)
    ax1[0].set_xlabel('L (mm)', fontsize=18)
    ax1[0].set_ylabel('P (L)', fontsize=18)
    ax1[0].tick_params(labelsize=16)
    ax1[0].get_xaxis().get_major_formatter().set_useOffset(False)
    ax1[0].get_xaxis().get_major_formatter().set_scientific(False)
    my_hist(ax1[1], dpost, bins=bins)
    ax1[1].set_xlabel('d (mm)', fontsize=18)
    ax1[1].set_ylabel('P (d)', fontsize=18)
    ax1[1].tick_params(labelsize=16)
    ax1[1].get_xaxis().get_major_formatter().set_useOffset(False)
    ax1[1].get_xaxis().get_major_formatter().set_scientific(False)
    ax1[1].set_xticks(ax1[1].get_xticks()[::2])
    fig1.tight_layout()
    plt.show(block=False)

    fig2, ax2 = plt.subplots(norder,nwaves,figsize=(12,10))
    axx = ax2.reshape(norder,nwaves)
    for i, w in enumerate(hists.keys()):
        for j, hist in enumerate(hists[w]):
            my_hist(axx[j,i], hist, bins=bins)
            axx[j,i].axvline(data['peaks'][w][j], color='k',zorder=15)
            axx[j,i].axvspan(data['peaks'][w][j]-data['peaks_sd'][w][j]/2.0,data['peaks'][w][j]+data['peaks_sd'][w][j]/2.0,color='gray',alpha=0.4,zorder=15)
            axx[j,i].set_ylabel('Order {0:d}'.format(data['orders'][w][j]))
            axx[j,i].get_xaxis().get_major_formatter().set_useOffset(False)
            axx[j,i].get_xaxis().get_major_formatter().set_scientific(False)
            axx[j,i].tick_params(labelsize=16)
        axx[0,i].set_title('{0} nm'.format(w),fontsize=18)
        axx[-1,i].set_xlabel('R (px)', fontsize=18)
    fig2.tight_layout()

    #if saveit:
    #    fig0.savefig(join(fig_folder,'peaks.png'), dpi=400)
    #    fig1.savefig(join(fig_folder,'Ld_marginals.png'), dpi=400)
    #    fig2.savefig(join(fig_folder,'peak_histograms.png'), dpi=400)

    plt.show()
Пример #6
0
    Ti = 0.5
    Vouter = 1000.0
    Router = 35.0
    Rmax = 42.0
    impact_fac = 35.0
    ne = 2e17
    nn = 8e17
    # mom_dif_length = 20.0
    mom_dif_length = 100.0 * plasma.Lnu(ne * nn, Ti, mu=40.0)
    fname = 'pcx_vel_profile_{0:2.0f}.h5'.format(impact_fac)
    print(fname)

    main(w0,
         mu,
         Ti,
         Vouter,
         Router,
         impact_fac,
         mom_dif_length,
         fname,
         rmax=Rmax)
    data = file_io.h5_2_dict(fname)
    data['image'] = add_noise(data['image'])

    file_io.dict_2_h5(fname, data)

    # fig, ax = plt.subplots()
    # im = ax.imshow(data['image'])
    # plt.colorbar(im)
    # plt.show()