def plotWassersteinConvergenceReferenceSolution(name, basename, resolutions,
                                                number_of_integration_points):
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    wasserstein2pterrors = []

    reference_resolution = resolutions[-1]
    for r in resolutions[:-1]:
        if rank == 0:
            print(r)
        filename = basename.format(resolution=reference_resolution)
        filename_coarse = basename.format(resolution=r)

        wasserstein2pterrors.append(
            wasserstein2pt_fast(filename, filename_coarse,
                                reference_resolution,
                                number_of_integration_points))
        if rank == 0:
            print("wasserstein2pterrors={}".format(wasserstein2pterrors))

    # Only plot from rank 0
    if rank == 0:
        plt.loglog(resolutions[:-1],
                   wasserstein2pterrors,
                   '-o',
                   basex=2,
                   basey=2)
        plt.xlabel("Resolution")
        min_value_log = np.floor(np.log2(np.min(wasserstein2pterrors)))
        max_value_log = np.ceil(np.log2(np.max(wasserstein2pterrors)))

        plt.ylim([2**min_value_log, 2**max_value_log])
        plt.xticks(resolutions[:-1],
                   ['${r}^3$'.format(r=r) for r in resolutions[1:]])
        plt.ylabel(
            f'$||W_1(\\nu^{{2, N}}, \\nu^{{2,{reference_resolution}}})||_{{L^1(D\\times D)}}$'
        )
        plt.title("""Wasserstein convergence for {title}
for second correlation marginal (against reference solution)
Using ${number_of_integration_points}^6={total_integration_points}$ equidistant integration points
        """.format(title=name,
                   number_of_integration_points=number_of_integration_points,
                   total_integration_points=number_of_integration_points**6))
        showAndSave('%s_wasserstein_convergence_reference_2pt' % name)

        saveData('%s_wasserstein_convergence_reference_2pt_resolutions' % name,
                 resolutions)
        saveData('%s_wasserstein_convergence_reference_2pt_wasserstein' % name,
                 wasserstein2pterrors)
def plot_convergence_single_sample(basename,
                                   title,
                                   variable,
                                   starting_resolution,
                                   zoom=True,
                                   compute_rate=False):
    resolution = starting_resolution

    resolutions = []
    errors = []

    while resolution_exists(basename, resolution) and resolution_exists(
            basename, 2 * resolution):
        print(resolution)
        error = 0.0
        for plane in range(2 * resolution):
            data_fine = load_plane(basename.format(resolution=2 * resolution),
                                   plane, variable)
            data_coarse = np.repeat(
                np.repeat(
                    load_plane(basename.format(resolution=resolution),
                               plane // 2, variable), 2, 0), 2, 1)

            error += np.sum(abs(data_coarse - data_fine))
        error /= resolution**3

        errors.append(error)
        resolutions.append(resolution)

        resolution *= 2

    resolutions = 2 * np.array(resolutions)

    min_error = np.min(errors)
    max_error = np.max(errors)
    if zoom:
        plt.ylim([
            2**np.floor(np.log2(min_error) - 1),
            2**np.ceil(np.log2(max_error) + 1)
        ])
    p = plt.loglog(resolutions, errors, '-o', basex=2, basey=2)
    if compute_rate:
        poly = np.polyfit(np.log(resolutions), np.log(errors), 1)
        plt.loglog(resolutions,
                   np.exp(poly[1]) * resolutions**poly[0],
                   '--',
                   color=p[0].get_color(),
                   label=f'$\\mathcal{{O}}(N^{{{poly[0]:.2f}}})$',
                   basex=2,
                   basey=2)
        plt.legend()
    plt.xlabel('Resolution ($N^3$)')
    plt.ylabel(
        f'Error ($\\lVert {latex_variables[variable]}^{{N}}-{latex_variables[variable]}^{{N/2}}\\rVert_{{L^1(D)}}$)'
    )
    plt.xticks(resolutions, [f"${r}^3$" for r in resolutions])
    plt.title(f"Convergence of single sample,\n"
              f"{title}\n"
              f"Variable: ${latex_variables[variable]}$")

    plot_info.saveData(f'single_sample_convergence_{title}_{variable}_errors',
                       errors)
    plot_info.saveData(
        f'single_sample_convergence_{title}_{variable}_resolutions',
        resolutions)

    plot_info.savePlot(f'single_sample_convergence_{title}_{variable}')
                    index_min_value = np.unravel_index(value_per_iteration[:end_index, :].argmin(),
                                                                   value_per_iteration.shape)

                    output_folder_min_value = os.path.join(get_configuration_name(configuration['basename'],
                                                                        index_min_value[1], starting_size,
                                                                        batch_size_factor ** (-1)), 'airfoil_chain')

                    output_parameters_filename_min_value = os.path.join(output_folder_min_value,
                                                              f'parameters.txt')
                    output_parameters_min_value = np.loadtxt(output_parameters_filename_min_value)

                    min_shapes_per_iteration.append(output_parameters_min_value[index_min_value[0]])



                plot_info.saveData(f'min_shapes_per_iteration_{python_script}_{generator}_{iterations[1]}_{starting_size}',
                                   min_shapes_per_iteration)
                plot_info.saveData(
                    f'closest_to_mean_shapes_per_iteration_{python_script}_{generator}_{iterations[1]}_{starting_size}',
                    closest_to_mean_shapes_per_iteration)
                
                min_value_per_iteration_competitor = np.zeros((len(iterations), number_of_reruns))
                aux_min_values_competitor = collections.defaultdict(lambda: np.zeros((len(iterations), number_of_reruns)))

                for rerun in range(number_of_reruns):

                    for iteration in range(len(iterations)):
                        try:
                            all_values = []
                            
                            number_of_samples = sum(iterations[:iteration+1])
                            
Exemple #4
0
def plot_convergence(basename, statistic_name, title, conserved_variables = conserved_variables_default,
                     resolutions=[64, 128,256,512],
                     reference=True):
    
    all_data = load_all_data(basename, resolutions, conserved_variables,
                             statistic_name, max(resolutions))
    
    min_values, max_values = get_min_max_values(all_data)
    
    if reference:
        # We do reference convergence
        reference_resolution = max(resolutions)
        reference_solution = all_data[reference_resolution]
    
    errors = []
    
    for resolution in resolutions[:-1]:
        timepoint = get_time(basename.format(resolution=resolution))
        
        
        data = all_data[resolution]
        
        if not reference:
            reference_resolution = resolution * 2
            reference_solution = all_data[reference_resolution]
        
        # Upscale
        while data.shape[0] < reference_resolution:
            data = np.repeat(np.repeat(np.repeat(data, 2, 0), 2, 1), 2, 2)
        
        # compute error in L^1
        error = np.sum(abs(data - reference_solution))/reference_resolution**3
        
        errors.append(error)
        
    if reference:
        convergence_type = 'reference'
    else:
        convergence_type = 'cauchy'
        
    plot_info.saveData(f'convergence_{convergence_type}_{statistic_name}_{title}_{timepoint}_errors', errors)
    plot_info.saveData(f'convergence_{convergence_type}_{statistic_name}_{title}_{timepoint}_resolutions', resolutions)
    
    plt.loglog(resolutions[:-1], errors, '-o')
    poly = np.polyfit(np.log(resolutions[:-1]), np.log(errors), 1)
    
    plt.loglog(resolutions[:-1], np.exp(poly[1])*resolutions[:-1]**poly[0],
               '--', label=f'$\\mathcal{{O}}(N^{{{poly[0]:.1f}}})$', basex=2,
               basey=2)
    
    plt.xlabel("Resolution ($N\\times N$)")
    if reference:
        plt.ylabel(f"Error ($\\|{stats_latex(statistic_name, 'N')}-{stats_latex(statistic_name, str(reference_resolution))}\\|_{{L^1(D)}}$)")
    else:
        plt.ylabel(f"Error ($\\|{stats_latex(statistic_name, 'N')}-{stats_latex(statistic_name, '2N')}\\|_{{L^1(D)}}$)")
    
    plt.xticks(resolutions[:-1], [f'${N}\\times {N}$' for N in resolutions[:-1]])
    plt.title(f'Convergence of {statistic_name.replace("_", " ")}\n{title.replace("_"," ")}\n$T={timepoint}$ {convergence_type} convergence')
    
    
    # Scale to nearest power of two to make the y axis not zoom in too much
    min_error = np.min(errors)
    max_error = np.max(errors)
    
    min_power_of_two = 2**(np.floor(np.log2(min_error)))
    max_power_of_two = 2**(np.ceil(np.log2(max_error)))
    
    if min_power_of_two == max_power_of_two:
        max_power_of_two *= 2
    
    plt.ylim([min_power_of_two, max_power_of_two])
    
    plt.legend()
    plot_info.savePlot(f'convergence_{convergence_type}_{statistic_name}_{title}_{timepoint}')
    plt.close('all')
Exemple #5
0
def plot_variance_decay(title, resolutions, sample_computer, norm_ord, name):
    variances, variances_details = compute_variance_decay(resolutions, 
                                                                    sample_computer,
                                                                    norm_ord)
    
    
    speedups = [1]
    
    for n in range(1, len(resolutions)):
        local_resolutions = resolutions[:n+1]
        
        # Notice the max, Monte Carlo is always a form of MLMC, so we 
        # have a minimum speedup of one!
        speedup = max(1, compute_speedup(local_resolutions, 
                                  variances[:n+1],
                                  variances_details[:n]))
        
        speedups.append(speedup)
        
        
    fig, ax1 = plt.subplots()
    ax1.loglog(resolutions, variances, '-o', 
               label=f'$\\|\\mathrm{{Var}}({name.format(N="N")})\\|_{{L^{{{norm_ord}}}}}$')
    
    
    ax1.loglog(resolutions[1:], variances_details, '-*', 
               label=f'$\\|\\mathrm{{Var}}({name.format(N="N")}-{name.format(N="N/2")})\\|_{{L^{{{norm_ord}}}}}$',
               basex=2, basey=2)
    
    ax1.legend()
    
    ax1.set_xlabel("Resolution ($N$)")
    
    ax1.set_ylabel("Variance")
    
    plt.xticks(resolutions, [f'${r}$' for r in resolutions])
    
    #plt.title(f'Structure function variance decay\n{title}\nVariable: {variable}')
    
    plot_info.savePlot(f'variance_decay_{name}_{norm_ord}_{title}')
    
    plot_info.saveData(f'variance_details_{name}_{norm_ord}_{title}', variances_details)

    plot_info.saveData(f'variance_{name}_{norm_ord}_{title}', variances)
    
    plot_info.saveData(f'variance_decay_resolutions_{name}_{norm_ord}_{title}', resolutions)
    
    ax2 = ax1.twinx()

    
    
    ax2.loglog(resolutions, speedups, '--x', label='MLMC Speedup')

    
    ax2.legend(loc=1)

    ax2.set_xscale("log", basex=2)
    ax2.set_yscale("log", basex=2)
    ax2.set_xticks(resolutions, [f'${r}$' for r in resolutions])
    ax2.set_ylabel("Potential MLMC speedup")
            
    ylims = ax2.get_ylim()
    
    ax2.set_ylim([min(ylims[0], 0.5), max(ylims[1], 4.4)])
    plt.xticks(resolutions, [f'${r}$' for r in resolutions])
    
    plot_info.savePlot(f'variance_decay_with_speedup_{name}_{norm_ord}_{title}')
    
    plot_info.saveData(f'variance_decay_speedups_{name}_{norm_ord}_{title}', speedups)
     
def plot_convergence(basename, title, variable, starting_resolution, stat, zoom, compute_rate, reference_solution=False):

    if variable != 'all':
        variables = [variable]
    else:
        variables = ['rho', 'mx', 'my', 'mz', 'E']

    resolution = starting_resolution

    resolutions = []
    errors = []
    if reference_solution:
        max_resolution = starting_resolution
        while resolution_exists(basename, max_resolution, stat):
            resolutions.append(max_resolution)
            max_resolution = 2*max_resolution
        print(f'max_resolution = {resolutions[-1]}')
        for resolution in resolutions[:-1]:
            print(resolution)
            error = 0.0
            for plane in range(resolutions[-1]):
                for variable_local in variables:
                    data_fine = load_plane(basename.format(resolution=resolutions[-1], stat=stat), plane, variable_local)
                
                    factor_plane = resolutions[-1]//resolution
                    data_coarse = load_plane(basename.format(resolution=resolution, stat=stat), plane//factor_plane, variable_local)
                
                    while data_coarse.shape[0] < data_fine.shape[0]:
                        data_coarse = np.repeat(np.repeat(data_coarse, 2, 0), 2, 1)

                    error += np.sum(abs(data_coarse-data_fine))
            error /= (resolutions[-1])**3

            errors.append(error)
        resolutions = np.array(resolutions)[:-1]
    else:
        while resolution_exists(basename, resolution,stat) and resolution_exists(basename, 2*resolution,stat):
            print(resolution)
            error = 0.0
            for plane in range(2*resolution):
                for variable_local in variables:
                    data_fine = load_plane(basename.format(resolution=2*resolution, stat=stat), plane, variable_local)
                    data_coarse = np.repeat(np.repeat(load_plane(basename.format(resolution=resolution, stat=stat), plane//2, variable_local), 2, 0), 2, 1)

                    error += np.sum(abs(data_coarse-data_fine))
            error /= (2*resolution)**3

            errors.append(error)
            resolutions.append(resolution)

            resolution *= 2

    
        resolutions = 2*np.array(resolutions)

    min_error = np.min(errors)
    max_error = np.max(errors)
    if zoom:
        plt.ylim([2**np.floor(np.log2(min_error)-1), 2**np.ceil(np.log2(max_error)+1)])
    p = plt.loglog(resolutions, errors, '-o', basex=2, basey=2)
    if compute_rate:
        poly = np.polyfit(np.log(resolutions), np.log(errors), 1)
        plt.loglog(resolutions, np.exp(poly[1])*resolutions**poly[0], '--',
                   color=p[0].get_color(),
                   label=f'$\\mathcal{{O}}(N^{{{poly[0]:.2f}}})$',
                   basex=2,
                   basey=2)
        plt.legend()

    plt.xlabel('Resolution ($N^3$)')
    if reference_solution:
        plt.ylabel(f'Error ($\\lVert {latex_stat[stat]}({latex_variables[variable]}^{{N}})-{latex_stat[stat]}({latex_variables[variable]}^{{{2*resolutions[-1]}}})\\rVert_{{L^1(D)}}$)')
    else:
        plt.ylabel(f'Error ($\\lVert {latex_stat[stat]}({latex_variables[variable]}^{{N}})-{latex_stat[stat]}({latex_variables[variable]}^{{N/2}})\\rVert_{{L^1(D)}}$)')
    plt.xticks(resolutions, [f"${r}^3$" for r in resolutions])
    plt.title(f"Convergence of {stat},\n"
              f"{title}\n"
              f"Variable: ${latex_variables[variable]}$")
    if reference_solution:
        plot_info.saveData(f'{stat}_convergence_reference_{title}_{variable}_errors', errors)
        plot_info.saveData(f'{stat}_convergence_reference_{title}_{variable}_resolutions', resolutions)

        plot_info.savePlot(f'{stat}_convergence_reference_{title}_{variable}')
    else:
        plot_info.saveData(f'{stat}_convergence_{title}_{variable}_errors', errors)
        plot_info.saveData(f'{stat}_convergence_{title}_{variable}_resolutions', resolutions)

        plot_info.savePlot(f'{stat}_convergence_{title}_{variable}')
Exemple #7
0
def plot_convergence(basename_a, name_a, basename_b, name_b, title, variable,
                     starting_resolution, stat, zoom, compute_rate):

    resolution = starting_resolution

    resolutions = []
    errors = []

    while resolution_exists(basename_a, resolution,
                            stat) and resolution_exists(
                                basename_b, resolution, stat):
        print(resolution)
        error = 0.0
        for plane in range(resolution):
            data_a = load_plane(
                basename_a.format(resolution=resolution, stat=stat), plane,
                variable)
            data_b = load_plane(
                basename_b.format(resolution=resolution, stat=stat), plane,
                variable)

            data_a_relative = np.max(abs(data_a))
            error += np.sum(abs(data_a - data_b) / abs(data_a_relative))
        error /= resolution**3

        errors.append(error)
        resolutions.append(resolution)

        resolution *= 2

    resolutions = np.array(resolutions)

    min_error = np.min(errors)
    max_error = np.max(errors)
    if zoom:
        plt.ylim([
            2**np.floor(np.log2(min_error) - 1),
            2**np.ceil(np.log2(max_error) + 1)
        ])
    p = plt.loglog(resolutions, errors, '-o', basex=2, basey=2)
    if compute_rate:
        poly = np.polyfit(np.log(resolutions), np.log(errors), 1)
        plt.loglog(resolutions,
                   np.exp(poly[1]) * resolutions**poly[0],
                   '--',
                   color=p[0].get_color(),
                   label=f'$\\mathcal{{O}}(N^{{{poly[0]:.2f}}})$',
                   basex=2,
                   basey=2)
        plt.legend()

    plt.xlabel('Resolution ($N^3$)')
    if "_" not in latex_variables[variable]:
        plt.ylabel(
            f'Error ($\\lVert \\left({latex_stat[stat]}({latex_variables[variable]}^{{N}}_{{\\mathrm{{{name_a}}}}})-{latex_stat[stat]}({latex_variables[variable]}^{{N}}_{{\\mathrm{{{name_b}}}}})\\right)/(\\max({latex_stat[stat]}({latex_variables[variable]}^{{N}}_{{\\mathrm{{{name_a}}}}}))\\rVert_{{L^1(D)}}$)'
        )
    else:
        latex_variable_with_subscript_a = f"{latex_variables[variable][:-2]}_{{{latex_variables[variable][-1]}, \\mathrm{{{name_a}}}}}"
        latex_variable_with_subscript_b = f"{latex_variables[variable][:-2]}_{{{latex_variables[variable][-1]}, \\mathrm{{{name_b}}}}}"
        plt.ylabel(
            f'Error ($\\lVert \\left({latex_stat[stat]}({latex_variable_with_subscript_a}^{{N}})-{latex_stat[stat]}({latex_variable_with_subscript_b}^{{N}})\\right)/(\\max({latex_stat[stat]}({latex_variable_with_subscript_a}^{{N}}))\\rVert_{{L^1(D)}}$)'
        )
    plt.xticks(resolutions, [f"${r}^3$" for r in resolutions])
    plt.title(f"Convergence of {stat},\n"
              f"Comparing {name_a} and {name_b}\n"
              f"{title}\n"
              f"Variable: ${latex_variables[variable]}$")

    plot_info.saveData(
        f'{stat}_convergence_comparison_{name_a}_{name_b}_{title}_{variable}_errors',
        errors)
    plot_info.saveData(
        f'{stat}_convergence_comparison_{name_a}_{name_b}_{title}_{variable}_resolutions',
        resolutions)

    plot_info.savePlot(
        f'{stat}_convergence_comparison_{name_a}_{name_b}_{title}_{variable}')
    parser.add_argument('--multi_z', type=int, required=True,
                     help='Number of processes in z direction')
    
    parser.add_argument('--outfile', type=str, required=True,
                        help='Name of output file')
    
    

    args = parser.parse_args()
    
    mpi_size = get_global_size()
    
    if args.multi_z * args.multi_y != mpi_size:
        raise Exception(f'Wrong number of mpi processes given.\n\tGiven: {mpi_size}\n\tExpected: multi_z*multi_y = {args.multi_z} * {args.multi_y} = {args.multi_z * args.multi_y}\n')
    
    
    # maybe these two next checks are too strict, but wanted to be on the safe side
    if not is_power_of_two(args.multi_y):
        raise Exception(f'multi_y must be power of 2, given {args.multi_y}')

    if not is_power_of_two(args.multi_z):
        raise Exception(f'multi_z must be power of 2, given {args.multi_z}')
        
    wasserstein = compute_wasserstein_one_point(args.file_a, args.file_b, args.multi_y, args.multi_z)
    comm = MPI.COMM_WORLD
    sum_distance = comm.reduce(wasserstein, op=MPI.SUM)
    
    if get_rank_global() == 0:
        plot_info.saveData(args.outfile, np.array(sum_distance).reshape(1))
    
Exemple #9
0
                        get_configuration_name(configuration['basename'],
                                               index_min_value[1],
                                               starting_size,
                                               batch_size_factor**(-1)),
                        source_folder)

                    output_parameters_filename_min_value = os.path.join(
                        output_folder_min_value, f'parameters.txt')
                    output_parameters_min_value = np.loadtxt(
                        output_parameters_filename_min_value)

                    min_shapes_per_iteration.append(
                        output_parameters_min_value[index_min_value[0]])

                plot_info.saveData(
                    f'min_shapes_per_iteration_{python_script}_{generator}_{iterations[1]}_{starting_size}',
                    min_shapes_per_iteration)
                plot_info.saveData(
                    f'closest_to_mean_shapes_per_iteration_{python_script}_{generator}_{iterations[1]}_{starting_size}',
                    closest_to_mean_shapes_per_iteration)

                min_value_per_iteration_competitor = np.zeros(
                    (len(iterations), number_of_reruns))
                aux_min_values_competitor = collections.defaultdict(
                    lambda: np.zeros((len(iterations), number_of_reruns)))

                for rerun in range(number_of_reruns):

                    for iteration in range(len(iterations)):
                        try:
                            all_values = []