Ejemplo n.º 1
0
def bit_for_bit(model_path, bench_path, config, plot=True):
    """
    Checks whether the given files have bit for bit solution matches
    on the given variable list.

    Args:
        model_path: absolute path to the model dataset
        bench_path: absolute path to the benchmark dataset
        config: the configuration of the set of analyses
        plot: a boolean of whether or not to generate plots

    Returns:
        A dictionary created by the elements object corresponding to
        the results of the bit for bit testing
    """
    fname = model_path.split(os.sep)[-1]
    # Error handling
    if not (os.path.isfile(bench_path) and os.path.isfile(model_path)):
        return elements.error(
            "Bit for Bit", "File named " + fname + " has no suitable match!")
    try:
        model_data = Dataset(model_path, 'r')
        bench_data = Dataset(bench_path, 'r')
    except:
        return elements.error("Bit for Bit",
                              "File named " + fname + " could not be read!")
    if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)):
        return elements.error("Bit for Bit",
                              "File named " + fname + " could not be read!")

    # Begin bit for bit analysis
    headers = ["Max Error", "Index of Max Error", "RMS Error", "Plot"]
    stats = LIVVDict()
    for i, var in enumerate(config["bit_for_bit_vars"]):
        if (var in model_data.variables and var in bench_data.variables):
            m_vardata = model_data.variables[var][:]
            b_vardata = bench_data.variables[var][:]
            diff_data = m_vardata - b_vardata
            if diff_data.any():
                stats[var]["Max Error"] = np.amax(np.absolute(diff_data))
                stats[var]["Index of Max Error"] = str(
                    np.unravel_index(
                        np.absolute(diff_data).argmax(), diff_data.shape))
                stats[var]["RMS Error"] = np.sqrt(
                    np.sum(np.square(diff_data).flatten()) / diff_data.size)
                pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata,
                                      diff_data)
            else:
                stats[var]["Max Error"] = stats[var]["RMS Error"] = 0
                pf = stats[var]["Index of Max Error"] = "N/A"
            stats[var]["Plot"] = pf
        else:
            stats[var] = {
                "Max Error": "No Match",
                "RMS Error": "N/A",
                "Plot": "N/A"
            }
    model_data.close()
    bench_data.close()
    return elements.bit_for_bit("Bit for Bit", headers, stats)
Ejemplo n.º 2
0
def bit_for_bit(model_path, bench_path, config):
    """
    Checks whether the given files have bit for bit solution matches
    on the given variable list.

    Args:
        model_path: absolute path to the model dataset
        bench_path: absolute path to the benchmark dataset
        config: the configuration of the set of analyses

    Returns:
        A dictionary created by the elements object corresponding to
        the results of the bit for bit testing
    """
    fname = model_path.split(os.path.sep)[-1]
    # Error handling
    if not (os.path.isfile(bench_path) and os.path.isfile(model_path)):
        return elements.error("Bit for Bit",
                              "File named " + fname + " has no suitable match!")
    try:
        model_data = Dataset(model_path)
        bench_data = Dataset(bench_path)
    except (FileNotFoundError, PermissionError):
        return elements.error("Bit for Bit",
                              "File named " + fname + " could not be read!")
    if not (netcdf.has_time(model_data) and netcdf.has_time(bench_data)):
        return elements.error("Bit for Bit",
                              "File named " + fname + " could not be read!")

    # Begin bit for bit analysis
    headers = ["Max Error", "Index of Max Error", "RMS Error", "Plot"]
    stats = LIVVDict()
    for i, var in enumerate(config["bit_for_bit_vars"]):
        if var in model_data.variables and var in bench_data.variables:
            m_vardata = model_data.variables[var][:]
            b_vardata = bench_data.variables[var][:]
            diff_data = m_vardata - b_vardata
            if diff_data.any():
                stats[var]["Max Error"] = np.amax(np.absolute(diff_data))
                stats[var]["Index of Max Error"] = str(
                        np.unravel_index(np.absolute(diff_data).argmax(), diff_data.shape))
                stats[var]["RMS Error"] = np.sqrt(np.sum(np.square(diff_data).flatten()) /
                                                  diff_data.size)
                pf = plot_bit_for_bit(fname, var, m_vardata, b_vardata, diff_data)
            else:
                stats[var]["Max Error"] = stats[var]["RMS Error"] = 0
                pf = stats[var]["Index of Max Error"] = "N/A"
            stats[var]["Plot"] = pf
        else:
            stats[var] = {"Max Error": "No Match", "RMS Error": "N/A", "Plot": "N/A"}
    model_data.close()
    bench_data.close()
    return elements.bit_for_bit("Bit for Bit", headers, stats)
Ejemplo n.º 3
0
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle):
    """
    Description

    Args:
        model_config: a dictionary with the model configuration data
        bench_config: a dictionary with the benchmark configuration data

    Returns:
        A dictionary created by the elements object corresponding to
        the results of the bit for bit testing
    """
    diff_dict = LIVVDict()
    model_data = model_bundle.parse_config(model_config)
    bench_data = bench_bundle.parse_config(bench_config)
    if model_data == {} and bench_data == {}:
        return elements.error("Configuration Comparison",
                              "Could not open file: " + model_config.split(os.sep)[-1])

    model_sections = set(six.iterkeys(model_data))
    bench_sections = set(six.iterkeys(bench_data))
    all_sections = set(model_sections.union(bench_sections))

    for s in all_sections:
        model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set()
        bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set()
        all_vars = set(model_vars.union(bench_vars))
        for v in all_vars:
            model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA'
            bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA'
            same = True if model_val == bench_val and model_val != 'NA' else False
            diff_dict[s][v] = (same, model_val, bench_val)
    return elements.file_diff("Configuration Comparison", diff_dict)
Ejemplo n.º 4
0
def diff_configurations(model_config, bench_config, model_bundle, bench_bundle):
    """
    Description

    Args:
        model_config: a dictionary with the model configuration data
        bench_config: a dictionary with the benchmark configuration data
        model_bundle: a LIVVkit model bundle object
        bench_bundle: a LIVVkit model bundle object

    Returns:
        A dictionary created by the elements object corresponding to
        the results of the bit for bit testing
    """
    diff_dict = LIVVDict()
    model_data = model_bundle.parse_config(model_config)
    bench_data = bench_bundle.parse_config(bench_config)
    if model_data == {} and bench_data == {}:
        return elements.error("Configuration Comparison",
                              "Could not open file: " + model_config.split(os.path.sep)[-1])

    model_sections = set(six.iterkeys(model_data))
    bench_sections = set(six.iterkeys(bench_data))
    all_sections = set(model_sections.union(bench_sections))

    for s in all_sections:
        model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set()
        bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set()
        all_vars = set(model_vars.union(bench_vars))
        for v in all_vars:
            model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA'
            bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA'
            same = True if model_val == bench_val and model_val != 'NA' else False
            diff_dict[s][v] = (same, model_val, bench_val)
    return elements.file_diff("Configuration Comparison", diff_dict)
Ejemplo n.º 5
0
def run(name, config):
    """
    Runs the analysis of the coverage of the ice sheet over the land mass.
    Produces both an overall coverage percentage metric and a coverage plot.

    Args:
        name: The name of the test
        config: A dictionary representation of the configuration file
    Returns:
        An elements.page with the list of elements to display
    """

    greenland_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                                  config['gl_data'])
    velocity_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                                 config['vel_data'])

    if not (os.path.exists(greenland_data) and os.path.exists(velocity_data)):
        # Add more handling here -- what do we want to return for failed tests
        return elements.error("lvargo13",
                              "Could not find necessary data for validation!")

    # Generate the script
    output_dir = os.path.join(livvkit.index_dir, 'validation', 'imgs')
    output_file_base = os.path.join(output_dir, 'lvargo13')
    functions.mkdir_p(output_dir)

    ncl_command = 'ncl \'gl_data = addfile("' + greenland_data + '", "r")\' '  \
                  + '\'vel_data = addfile("' + velocity_data + '", "r")\' '  \
                  + '\'model_prefix = "' \
                  + os.path.join(livvkit.__path__[0], config['data_dir'], config['model_prefix']) \
                  + '"\' '  \
                  + '\'model_suffix = "' + config['model_suffix'] + '"\' '  \
                  + '\'model_start = ' + config['model_start'] + '\' '  \
                  + '\'model_end = ' + config['model_end'] + '\' '  \
                  + '\'plot_file_base = "' + output_file_base + '"\' ' \
                  + os.path.join(livvkit.__path__[0], config['plot_script'])

    # Be cautious about running subprocesses
    p = subprocess.Popen(ncl_command,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    ncl_out, ncl_err = p.communicate()

    # TODO: Put some error checking here

    output_plots = [
        os.path.basename(p) for p in glob.glob(output_file_base + "*.png")
    ]
    plot_list = []
    for plot in output_plots:
        plot_list.append(elements.image(plot, "", plot))

    the_page = elements.page("lvargo13", config['description'],
                             elements.gallery("Plots", plot_list))

    return the_page
Ejemplo n.º 6
0
def run(name, config):
    """
    Runs the analysis.

    Args:
        name: The name of the test
        config: A dictionary representation of the configuration file

    Returns:
       The result of elements.page with the list of elements to display
    """
    # TODO: Put your analysis here
    element_list = elements.error("Unimplemented test",
                                  "This test contains no analysis code!")
    return elements.page(name, config['description'], element_list)
Ejemplo n.º 7
0
def run(name, config):
    """
    Runs the analysis of the coverage of the ice sheet over the land mass.
    Produces both an overall coverage percentage metric and a coverage plot.

    Args:
        name: The name of the test
        config: A dictionary representation of the configuration file
    Returns:
        An elements.page with the list of elements to display
    """
    bench_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                              config['bench_data'])
    model_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                              config['model_data'])

    if not (os.path.exists(model_data) and os.path.exists(bench_data)):
        # Add more handling here -- what do we want to return for failed tests
        print(
            "ERROR: Could not find necessary data to run the coverage validation!"
        )
        print(model_data)
        print(bench_data)
        print("")
        return elements.error(
            "coverage",
            "Could not find necessary data to run the coverage validation!")

    # Generate the script
    plot_name = "coverage.png"
    output_dir = os.path.join(livvkit.index_dir, 'validation', 'imgs')
    output_path = os.path.join(output_dir, plot_name)
    functions.mkdir_p(output_dir)

    plot_coverage(config['plot_script'], model_data, bench_data, output_path)

    plot_list = [elements.image(plot_name, " ", plot_name)]
    the_page = elements.page('coverage', config['description'],
                             elements.gallery("Plots", plot_list))

    return the_page
Ejemplo n.º 8
0
def run_suite(case, config, summary):
    """ Run the full suite of numerics tests """
    m = importlib.import_module(config['module'])
    m.set_up()
    config["name"] = case
    analysis_data = {}
    bundle = livvkit.numerics_model_module
    model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case)
    bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case)
    plot_dir = os.path.join(livvkit.output_dir, "numerics", "imgs")
    config["plot_dir"] = plot_dir
    functions.mkdir_p(plot_dir)
    model_cases = functions.collect_cases(model_dir)
    bench_cases = functions.collect_cases(bench_dir)

    for mscale in sorted(model_cases):
        bscale = bench_cases[mscale] if mscale in bench_cases else []
        for mproc in model_cases[mscale]:
            full_name = '-'.join([mscale, mproc])
            bpath = (os.path.join(bench_dir, mscale, mproc.replace("-", os.path.sep))
                     if mproc in bscale else "")
            mpath = os.path.join(model_dir, mscale, mproc.replace("-", os.path.sep))
            model_data = functions.find_file(mpath, "*" + config["output_ext"])
            bench_data = functions.find_file(bpath, "*" + config["output_ext"])
            analysis_data[full_name] = bundle.get_plot_data(model_data,
                                                            bench_data,
                                                            m.setup[case],
                                                            config)
    try:
        el = m.run(config, analysis_data)
    except KeyError:
        el = elements.error("Numerics Plots", "Missing data")
    result = elements.page(case, config['description'], element_list=el)
    summary[case] = _summarize_result(m, analysis_data, config)
    _print_summary(m, case, summary[case])
    functions.create_page_from_template("numerics.html",
                                        os.path.join(livvkit.index_dir, "numerics", case + ".html"))
    functions.write_json(result, os.path.join(livvkit.output_dir, "numerics"), case + ".json")
Ejemplo n.º 9
0
def parse_log(file_path):
    """
    Parse a CISM output log and extract some information.

    Args:
        file_path: absolute path to the log file

    Return:
        A dictionary created by the elements object corresponding to
        the results of the bit for bit testing
    """
    if not os.path.isfile(file_path):
        return elements.error(
            "Output Log",
            "Could not open file: " + file_path.split(os.sep)[-1])

    headers = [
        "Converged Iterations", "Avg. Iterations to Converge",
        "Processor Count", "Dycore Type"
    ]

    with open(file_path, 'r') as f:
        dycore_types = {
            "0": "Glide",
            "1": "Glam",
            "2": "Glissade",
            "3": "Albany_felix",
            "4": "BISICLES"
        }
        curr_step = 0
        proc_count = 0
        iter_number = 0
        converged_iters = []
        iters_to_converge = []
        for line in f:
            split = line.split()
            if ('CISM dycore type' in line):
                if line.split()[-1] == '=':
                    dycore_type = dycore_types[next(f).strip()]
                else:
                    dycore_type = dycore_types[line.split()[-1]]
            elif ('total procs' in line):
                proc_count += int(line.split()[-1])
            elif ('Nonlinear Solver Step' in line):
                curr_step = int(line.split()[4])
            elif ('Compute ice velocities, time = ' in line):
                converged_iters.append(curr_step)
                curr_step = float(line.split()[-1])
            elif ('"SOLVE_STATUS_CONVERGED"' in line):
                split = line.split()
                iters_to_converge.append(
                    int(split[split.index('"SOLVE_STATUS_CONVERGED"') + 2]))
            elif ("Compute dH/dt" in line):
                iters_to_converge.append(int(iter_number))
            elif len(split) > 0 and split[0].isdigit():
                iter_number = split[0]
        if iters_to_converge == []:
            iters_to_converge.append(int(iter_number))
    data = {
        "Dycore Type": dycore_type,
        "Processor Count": proc_count,
        "Converged Iterations": len(converged_iters),
        "Avg. Iterations to Converge": np.mean(iters_to_converge)
    }
    return elements.table("Output Log", headers, data)
Ejemplo n.º 10
0
        bscale = bench_cases[mscale] if mscale in bench_cases else []
        for mproc in model_cases[mscale]:
            full_name = '-'.join([mscale, mproc])
            bpath = (os.path.join(bench_dir, mscale,
                                  mproc.replace("-", os.path.sep))
                     if mproc in bscale else "")
            mpath = os.path.join(model_dir, mscale,
                                 mproc.replace("-", os.path.sep))
            model_data = functions.find_file(mpath, "*" + config["output_ext"])
            bench_data = functions.find_file(bpath, "*" + config["output_ext"])
            analysis_data[full_name] = bundle.get_plot_data(
                model_data, bench_data, m.setup[case], config)
    try:
        el = m.run(config, analysis_data)
    except KeyError:
        el = elements.error("Numerics Plots", "Missing data")
    result = elements.page(case, config['description'], element_list=el)
    summary[case] = _summarize_result(m, analysis_data, config)
    _print_summary(m, case, summary[case])
    functions.create_page_from_template(
        "numerics.html",
        os.path.join(livvkit.index_dir, "numerics", case + ".html"))
    functions.write_json(result, os.path.join(livvkit.output_dir, "numerics"),
                         case + ".json")


def _print_summary(module, case, summary):
    try:
        module.print_summary(case, summary)
    except (NotImplementedError, AttributeError):
        print("    Ran " + case + "!")