Ejemplo n.º 1
0
def vectorized_MC_integration(tot_tests, a_nu, batch_size = 100000, rhs = 1., variability = 1./6., mean_field = 5., regular_saves = True, dump_fname = None): # This can be adapted to make it better for later tests. At the moment, it is implemented fast and dirty for our particular setup

    final_avg = 0

    # Since lists, nd_arrays are not hashable, we need to create a key (unique) for each nu... 
    hashable_nu = '{}'.format(a_nu)

    ## This definitely needs to be done better! 
    if dump_fname is None:
        dump_fname = 'MCEstimations_PWLD_6dim_small_var.npy'
    if os.path.isfile(dump_fname) is False:
        dict_results = {hashable_nu: {}} # or load if the file already exists
        batches = range(batch_size, tot_tests + 1, batch_size)
    else:
        dict_results = np.load(dump_fname).item()
        if hashable_nu not in dict_results:
            dict_results[hashable_nu] = {}
            nb_tests = 0
            batches = range(batch_size, tot_tests + 1, batch_size)
        else: 
            computed_tests = sorted(dict_results[hashable_nu].keys())
            if tot_tests >= computed_tests[-1]:
                batches = range(computed_tests[-1]+batch_size, tot_tests+1, batch_size)
                nb_tests = computed_tests[-1]
                final_avg = dict_results[hashable_nu][computed_tests[-1]]*computed_tests[-1]
            else:
                nb_tests = computed_tests[[computed_tests[i]-tot_tests >= 0 for i in xrange(0,len(computed_tests))].index(True)-1]
                batches = range(nb_tests+batch_size, tot_tests+1, batch_size)
                final_avg = dict_results[hashable_nu][nb_tests]*nb_tests


    for a_batch in batches:
        print '\tCurrent number of test samples: {}'.format(a_batch) 
        if a_batch in dict_results[hashable_nu]: 
            # Don't redo the calculation, and load the one already done:
            nb_tests = a_batch
            final_avg = dict_results[hashable_nu][nb_tests]*nb_tests
        else: 
            nb_tests = a_batch
            zs = np.cos(np.random.uniform(0,np.pi,[batch_size, 6]))
            ys = ctaa(zs, rhs, variability, mean_field)
            cheb_vals = vmvc(zs,a_nu)
            final_avg = final_avg + sum(ys*cheb_vals)
            dict_results[hashable_nu][nb_tests] = final_avg/nb_tests
            np.save(dump_fname, dict_results) 

    # one last batch of tests to make sure we have the required number
    remaining_tests = tot_tests - nb_tests
    if remaining_tests > 0:
        nb_tests = nb_tests+remaining_tests
        zs = np.cos(np.random.uniform(0,np.pi,[remaining_tests, 6]))
        ys = ctaa(zs, rhs, variability, mean_field)
        cheb_vals = vmvc(zs,a_nu)
        final_avg = final_avg + sum(ys*cheb_vals)
        dict_results[hashable_nu][nb_tests] = final_avg/nb_tests
        np.save(dump_fname, dict_results) 

    return final_avg/nb_tests
Ejemplo n.º 2
0
def coefs_QMCSobol_wrapper(nb_samples, a_nu): # note that a lot of calculations will be repeated by doing this. We need to be smarter!
    int_val = 0
    seed = 0
    dim = 6
    for one_sample in xrange(0,nb_samples): 
        [sample, new_seed] = i4_sobol(6, seed)
        int_val = int_val + ctaa(sample, 1.0,1.0/6,5.)*mvc(sample,a_nu)
        seed = new_seed
    return int_val/nb_samples
Ejemplo n.º 3
0
def faster_QMC_computations(nb_samples, nus): # note that a lot of calculations will be repeated by doing this. We need to be smarter!
    # first find the highest degree considered in the list of nu's
    # then go through the samples, one at a time, and everytime we see a new value, we add to the dictionary together with the T_j(value)
    max_degree = np.max(nus)
    cheb_evals = {}
    weights_eval = {}
    int_val = 0 # as for integral value
    seed = 0 # required for the sobol index
    dim = 6 # that hurts!
    for one_sample in xrange(0,nb_samples): 
        [sample, new_seed] = i4_sobol(6, seed)
        sample = sample*2 -1 # To set the anchor at (-1,-1,-1 ...) instead of the usual (0,0,...) for QMC methods
        not_computed = [a_param for a_param in one_sample if a_param not in cheb_evals] # contains the values that we have not precomputed before
        for to_compute in not_computed:
            # add these guys to the dictionary. 
            to_add_to_dict = [1]
            for one_deg in xrange(1,max_degree+1): 
                to_add_to_dict.append(np.polynomial.chebyshev.chebval(to_compute, np.hstack( (np.zeros(one_deg),1) )))
            cheb_evals[to_compute] = to_add_to_dict
            weights_eval[to_compute] = np.polynomial.chebyshev.chebweight(to_compute)
        int_val = int_val + ctaa(sample-1, 1.0,1.0/6,5.)*mvc(sample,a_nu)
        seed = new_seed
    return int_val/nb_samples
Ejemplo n.º 4
0
def plot_PWLD(infile, outfile, L_to_plot, v_to_plot, k_to_plot, which_to_plot = None):
    ### Load
    print("Loading {0} ...".format(infile))
    results = sorted(shelve.open(infile).values(), key=lambda r: r.L)

    ### Plot
    first_result = results[-1] # At that moment, first_result is a multi-level result
    d            = first_result.cspde_result[0].d # number of parameters
    spde_model   = first_result.spde_model
    epsilon      = first_result.epsilon


    d_plot       = len(k_to_plot) # k_to_plot corresponds to the number of pointwise convergences that we want to analyze
    results_plot = [r for r in results if r.wr_model.weights[0] in v_to_plot and r.L in L_to_plot]
    # Note: The number of grid points in the original grid can be recovered from spde_model.mesh_size - Obviously, every time we add a level, we multiply (every component) by two
    print len(results_plot)
    fig = plt.figure()
    # plt.suptitle("{0}: s={1}, epsilon={2}, {3} weights".format("Diffusion Model", s, epsilon, wr_model.weights.name))

    outer_grid = gridspec.GridSpec(int(np.ceil(d_plot/2.)), 2)

    l = []
    # First refine the mesh to a (much) finer level 
    # spde_model.refine_mesh(ratio=2**(np.max(L_to_plot)+2)) # Refine to 2 levels larger than the largest level we consider. 
    # This is not required since we have the exact formula for this case

    for (index, k) in enumerate(k_to_plot): 
        print("Plotting k={0} ...".format(k+1))

        # Reconstruct
        step = 0.02 # Change back to 0.02
        T = np.arange(-1+step, 1, step)
        Z = np.array([np.hstack((np.zeros(k), t, np.zeros(d-k-1))) for t in T])

		# Compute the solutions on that new mesh
        # Compute the TRUE solution 
        # Y = [spde_model.samples(Z)] # This acts as the "True" solution
        rhs 		= spde_model.f.c
        variability 	= spde_model.var
        abar 		= spde_model.abar
        # print Z[0]
        # Y = [ctaa([np.array([z])], rhs, variability, abar ) for z in Z]
        # Y = [cta(z, rhs, variability, abar ) for z in Z]
        # Y = ctaa([Z], rhs, variability, abar )
        Y = [np.array([float(ctaa([np.array(z)], rhs, variability, abar )) for z in Z])]
        # Y = cta(Z, rhs, variability, abar )
        # Y = [ctaa(np.array(z), rhs, variability, abar ) for z in Z]
        # print Y
        for result in results_plot:
            # print result
			# Evaluation of the estimation / approximation in the ML case: 
            Y.append(result.wr_model.estimate_ML_samples(result.cspde_result, Z)) # Check that we actually have the level we are testing. 
			# Evaluation in the SL case: 
            # F_recon = result.wr_model.operator.create(np.array(result.cspde_result.J_s)[x != 0], Z, normalization=np.sqrt(result.cspde_result.m))
            # Y.append(F_recon.apply(x[x != 0]))
			# x       = result.cspde_result.result.x

        # Plot
        if which_to_plot is None:
            inner_grid = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer_grid[index])

            ax = plt.Subplot(fig, inner_grid[0])
            ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
            for y in Y:
                l.append(ax.plot(T, y)[0])
            fig.add_subplot(ax)
            plt.title("$y_{0}$".format(k+1))

            ax = plt.Subplot(fig, inner_grid[1])
            ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
            for y in Y:
                ax.plot(T, Y[0]-y)
            fig.add_subplot(ax)
        else:
            inner_grid = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=outer_grid[index])
            ax = plt.Subplot(fig, inner_grid[0])
            ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))

            if which_to_plot[index] == 'function':
                for y in Y:
                    l.append(ax.plot(T, y)[0])
                fig.add_subplot(ax)
                # plt.title("$z_{0}$".format(k+1))
            else:
                for y in Y:
                    ax.plot(T, Y[0]-y)
                fig.add_subplot(ax)
                # plt.title("$z_{0}$ difference".format(k+1))

    fig.legend(l, ["Truth"] + ["L={0}".format(int(round(result.L))) for result in results_plot],
               loc='lower center', bbox_to_anchor=(0.5, -0.05), bbox_transform=plt.gcf().transFigure,
               ncol=len(results_plot)+1, fancybox=True, shadow=True)


    all_axes = fig.get_axes()

    #show only the outside spines
    for ax in all_axes:
        if ax.is_last_row():
            plt.setp(ax.get_xticklabels(), visible=True)
        else:
            plt.setp(ax.get_xticklabels(), visible=False)        

    if which_to_plot is None:
        plt.gcf().set_size_inches(17, int(4 * np.ceil(d_plot/2)))
    else:
        plt.gcf().set_size_inches(17, int(8 * np.ceil(d_plot/2)))

    # plt.tight_layout()
    plt.savefig(outfile, bbox_inches="tight")
Ejemplo n.º 5
0
def integrand(x0,x1,x2,x3,x4,x5,nu0,nu1,nu2,nu3,nu4,nu5, with_weights = True, rhs = 1., variability = 1.0/6., abar = 5.):
    # print ctaa([np.array([x0,x1,x2,x3,x4,x5])], rhs, variability, abar )
    return ctaa([np.array([x0,x1,x2,x3,x4,x5])], rhs, variability, abar )*mvc([x0,x1,x2,x3,x4,x5],np.array([nu0,nu1,nu2,nu3,nu4,nu5]), with_weights)
Ejemplo n.º 6
0
def coefs_mpmath_wrapper(a_nu): # seems like there is no way to use mpmath for more than 3 dimensions :( 
    mp.dps = 10 # 10 digits of accuracy should be more than enough.
    return quad(lambda x0,x1,x2,x3,x4,x5: ctaa([np.array([x0,x1,x2,x3,x4,x5])], 1.0, 1.0/6.0, 5.0 )*mvc([x0,x1,x2,x3,x4,x5],a_nu),[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1], method='tanh-sinh') # tanh-sinh tends to be more accurate when dealing with singularities
Ejemplo n.º 7
0
def coefs_nquad_wrapper(a_nu): 
    return spi.nquad(lambda x0,x1,x2,x3,x4,x5: ctaa([np.array([x0,x1,x2,x3,x4,x5])], 1.0, 1.0/6.0, 5.0 )*mvc([x0,x1,x2,x3,x4,x5],a_nu),[[-1,1]]*6)