Ejemplo n.º 1
0
def run(name, config):
    """
    Runs the analysis of the coverage of the ice sheet over the land mass.
    Produces both an overall coverage percentage metric and a coverage plot.

    Args:
        name: The name of the test
        config: A dictionary representation of the configuration file
    Returns:
        An elements.page with the list of elements to display
    """

    greenland_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                                  config['gl_data'])
    velocity_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                                 config['vel_data'])

    if not (os.path.exists(greenland_data) and os.path.exists(velocity_data)):
        # Add more handling here -- what do we want to return for failed tests
        return elements.error("lvargo13",
                              "Could not find necessary data for validation!")

    # Generate the script
    output_dir = os.path.join(livvkit.index_dir, 'validation', 'imgs')
    output_file_base = os.path.join(output_dir, 'lvargo13')
    functions.mkdir_p(output_dir)

    ncl_command = 'ncl \'gl_data = addfile("' + greenland_data + '", "r")\' '  \
                  + '\'vel_data = addfile("' + velocity_data + '", "r")\' '  \
                  + '\'model_prefix = "' \
                  + os.path.join(livvkit.__path__[0], config['data_dir'], config['model_prefix']) \
                  + '"\' '  \
                  + '\'model_suffix = "' + config['model_suffix'] + '"\' '  \
                  + '\'model_start = ' + config['model_start'] + '\' '  \
                  + '\'model_end = ' + config['model_end'] + '\' '  \
                  + '\'plot_file_base = "' + output_file_base + '"\' ' \
                  + os.path.join(livvkit.__path__[0], config['plot_script'])

    # Be cautious about running subprocesses
    p = subprocess.Popen(ncl_command,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    ncl_out, ncl_err = p.communicate()

    # TODO: Put some error checking here

    output_plots = [
        os.path.basename(p) for p in glob.glob(output_file_base + "*.png")
    ]
    plot_list = []
    for plot in output_plots:
        plot_list.append(elements.image(plot, "", plot))

    the_page = elements.page("lvargo13", config['description'],
                             elements.gallery("Plots", plot_list))

    return the_page
Ejemplo n.º 2
0
def run(name, config, print_details=False):
    """
    Runs the extension.

    Args:
        name: The name of the extension
        config: The test's config dictionary
        print_details: Whether to print the analysis details to stdout
                       (default: False)

    Returns:
       A LIVVkit page element containing the LIVVkit elements to display on a webpage
    """

    # FIXME: move into a config to NameSpace function
    test_args = OrderedDict([(k.replace('-', '_'), v)
                             for k, v in config.items()])
    test_args = argparse.Namespace(**test_args)

    test_args.img_dir = os.path.join(livvkit.output_dir, 'validation', 'imgs',
                                     name)
    fn.mkdir_p(test_args.img_dir)

    details, img_gal = main(test_args)

    res_table = el.Table(title="Results",
                         data=OrderedDict({
                             'Null hypothesis': [details['h0']],
                             'T test (t, p)': [details['T test (t, p)']],
                             'Test status':
                             ['pass' if details['h0'] == 'accept' else 'fail'],
                             'Ensembles': [
                                 'statistically identical' if details['h0']
                                 == 'accept' else 'statistically different'
                             ],
                         }))
    if print_details:
        _print_details(details)

    bib_html = bib2html(os.path.join(os.path.dirname(__file__), 'pg.bib'))
    tabs = el.Tabs({
        "Figures": [img_gal],
        "References": [el.RawHTML(bib_html)]
    })
    page = el.Page(name,
                   __doc__.replace('\n\n', '<br/><br/>'),
                   elements=[res_table, tabs])

    return page
Ejemplo n.º 3
0
def main():
    datadir = sys.argv[1]
    outdir = sys.argv[2]
    functions.mkdir_p(outdir)

    data_files = glob.glob(datadir + "/**/*.json", recursive=True)
    data_files = [datadir + '/verification/dome.json']
    # data_files = [datadir + '/index.json']

    for each in data_files:
        data = functions.read_json(each)
        tex = th.translate_page(data)
        outfile = os.path.join(outdir, os.path.basename(each).replace('json', 'tex'))
        with open(outfile, 'w') as f:
            f.write(tex)
Ejemplo n.º 4
0
def main():
    datadir = sys.argv[1]
    outdir = sys.argv[2]
    functions.mkdir_p(outdir)

    data_files = glob.glob(datadir + "/**/*.json", recursive=True)
    data_files = [datadir + '/verification/dome.json']
    # data_files = [datadir + '/index.json']

    for each in data_files:
        data = functions.read_json(each)
        tex = th.translate_page(data)
        outfile = os.path.join(outdir,
                               os.path.basename(each).replace('json', 'tex'))
        with open(outfile, 'w') as f:
            f.write(tex)
Ejemplo n.º 5
0
def run(name, config):
    """
    Runs the analysis.

    Args:
        name: The name of the test
        config: A dictionary representation of the configuration file

    Returns:
       The result of elements.page with the list of elements to display
    """

    config_arg_list = []
    [
        config_arg_list.extend(['--' + key, str(val)])
        for key, val in config.items()
    ]

    args = parse_args(config_arg_list)

    args.img_dir = os.path.join(livvkit.output_dir, 'validation', 'imgs', name)
    fn.mkdir_p(args.img_dir)

    details, img_gal = main(args)

    tbl_data = OrderedDict(sorted(details.items()))

    tbl_el = {
        'Type': 'V-H Table',
        'Title': 'Validation',
        'TableTitle': 'Analyzed variables',
        'Headers': ['h0', 'K-S test (D, p)', 'T test (t, p)'],
        'Data': {
            '': tbl_data
        }
    }

    tl = [
        el.tab('Table', element_list=[tbl_el]),
        el.tab('Gallery', element_list=[img_gal])
    ]

    page = el.page(name, __doc__, tab_list=tl)
    page['critical'] = args.critical

    return page
Ejemplo n.º 6
0
def run(name, config):
    """
    Runs the analysis of the coverage of the ice sheet over the land mass.
    Produces both an overall coverage percentage metric and a coverage plot.

    Args:
        name: The name of the test
        config: A dictionary representation of the configuration file
    Returns:
        An elements.page with the list of elements to display
    """
    bench_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                              config['bench_data'])
    model_data = os.path.join(livvkit.__path__[0], config['data_dir'],
                              config['model_data'])

    if not (os.path.exists(model_data) and os.path.exists(bench_data)):
        # Add more handling here -- what do we want to return for failed tests
        print(
            "ERROR: Could not find necessary data to run the coverage validation!"
        )
        print(model_data)
        print(bench_data)
        print("")
        return elements.error(
            "coverage",
            "Could not find necessary data to run the coverage validation!")

    # Generate the script
    plot_name = "coverage.png"
    output_dir = os.path.join(livvkit.index_dir, 'validation', 'imgs')
    output_path = os.path.join(output_dir, plot_name)
    functions.mkdir_p(output_dir)

    plot_coverage(config['plot_script'], model_data, bench_data, output_path)

    plot_list = [elements.image(plot_name, " ", plot_name)]
    the_page = elements.page('coverage', config['description'],
                             elements.gallery("Plots", plot_list))

    return the_page
Ejemplo n.º 7
0
def run_suite(case, config, summary):
    """ Run the full suite of numerics tests """
    m = importlib.import_module(config['module'])
    m.set_up()
    config["name"] = case
    analysis_data = {}
    bundle = livvkit.numerics_model_module
    model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case)
    bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case)
    plot_dir = os.path.join(livvkit.output_dir, "numerics", "imgs")
    config["plot_dir"] = plot_dir
    functions.mkdir_p(plot_dir)
    model_cases = functions.collect_cases(model_dir)
    bench_cases = functions.collect_cases(bench_dir)

    for mscale in sorted(model_cases):
        bscale = bench_cases[mscale] if mscale in bench_cases else []
        for mproc in model_cases[mscale]:
            full_name = '-'.join([mscale, mproc])
            bpath = (os.path.join(bench_dir, mscale, mproc.replace("-", os.path.sep))
                     if mproc in bscale else "")
            mpath = os.path.join(model_dir, mscale, mproc.replace("-", os.path.sep))
            model_data = functions.find_file(mpath, "*" + config["output_ext"])
            bench_data = functions.find_file(bpath, "*" + config["output_ext"])
            analysis_data[full_name] = bundle.get_plot_data(model_data,
                                                            bench_data,
                                                            m.setup[case],
                                                            config)
    try:
        el = m.run(config, analysis_data)
    except KeyError:
        el = elements.error("Numerics Plots", "Missing data")
    result = elements.page(case, config['description'], element_list=el)
    summary[case] = _summarize_result(m, analysis_data, config)
    _print_summary(m, case, summary[case])
    functions.create_page_from_template("numerics.html",
                                        os.path.join(livvkit.index_dir, "numerics", case + ".html"))
    functions.write_json(result, os.path.join(livvkit.output_dir, "numerics"), case + ".json")
Ejemplo n.º 8
0
        model_vars = set(six.iterkeys(model_data[s])) if s in model_sections else set()
        bench_vars = set(six.iterkeys(bench_data[s])) if s in bench_sections else set()
        all_vars = set(model_vars.union(bench_vars))
        for v in all_vars:
            model_val = model_data[s][v] if s in model_sections and v in model_vars else 'NA'
            bench_val = bench_data[s][v] if s in bench_sections and v in bench_vars else 'NA'
            same = True if model_val == bench_val and model_val != 'NA' else False
            diff_dict[s][v] = (same, model_val, bench_val)
    return elements.file_diff("Configuration Comparison", diff_dict)


def plot_bit_for_bit(case, var_name, model_data, bench_data, diff_data):
    """ Create a bit for bit plot """
    plot_name = case + "_" + var_name + ".png"
    plot_path = os.path.join(os.path.join(livvkit.output_dir, "verification", "imgs"))
    functions.mkdir_p(plot_path)
    m_ndim = np.ndim(model_data)
    b_ndim = np.ndim(bench_data)
    if m_ndim != b_ndim:
        return "Dataset dimensions didn't match!"
    if m_ndim == 3:
        model_data = model_data[-1]
        bench_data = bench_data[-1]
        diff_data = diff_data[-1]
        plot_title = "Showing "+var_name+"[-1,:,:]"
    elif m_ndim == 4:
        model_data = model_data[-1][0]
        bench_data = bench_data[-1][0]
        diff_data = diff_data[-1][0]
        plot_title = "Showing "+var_name+"[-1,0,:,:]"
    plt.figure(figsize=(12, 3), dpi=80)
Ejemplo n.º 9
0
def run(name, config):
    """
    Runs the analysis.

    Args:
        name: The name of the test
        config: A dictionary representation of the configuration file

    Returns:
       The result of elements.page with the list of elements to display
    """

    config_arg_list = []
    [config_arg_list.extend(['--'+key, str(val)]) for key, val in config.items()]

    args = parse_args(config_arg_list)

    args.img_dir = os.path.join(livvkit.output_dir, 'validation', 'imgs', name)
    fn.mkdir_p(args.img_dir)

    details, img_gal = main(args)

    table_data = pd.DataFrame(details).T
    _hdrs = [
        "h0",
        "K-S test (D, p)",
        "T test (t, p)",
        "mean (test case, ref. case)",
        "std (test case, ref. case)",
    ]
    table_data = table_data[_hdrs]
    for _hdr in _hdrs[1:]:
        table_data[_hdr] = table_data[_hdr].apply(col_fmt)

    tables = [
        el.Table("Rejected", data=table_data[table_data["h0"] == "reject"]),
        el.Table("Accepted", data=table_data[table_data["h0"] == "accept"]),
        el.Table("Null", data=table_data[~table_data["h0"].isin(["accept", "reject"])])
    ]

    bib_html = bib2html(os.path.join(os.path.dirname(__file__), 'ks.bib'))

    tabs = el.Tabs(
        {
            "Figures": img_gal,
            "Details": tables,
            "References": [el.RawHTML(bib_html)]
        }
    )
    rejects = [var for var, dat in details.items() if dat["h0"] == "reject"]

    results = el.Table(
        title="Results",
        data=OrderedDict(
            {
                'Test status': ['pass' if len(rejects) < args.critical else 'fail'],
                'Variables analyzed': [len(details.keys())],
                'Rejecting': [len(rejects)],
                'Critical value': [int(args.critical)],
                'Ensembles': [
                    'statistically identical' if len(rejects) < args.critical else 'statistically different'
                ]
            }
        )
    )

    # FIXME: Put into a ___ function
    page = el.Page(name, __doc__.replace('\n\n', '<br><br>'), elements=[results, tabs])
    return page
Ejemplo n.º 10
0
def test_fn_mkdir_p_silent_existing(tmpdir):
    testdir = tmpdir.mkdir('mkdir_p')
    functions.mkdir_p(str(testdir))
Ejemplo n.º 11
0
def run(name, config, print_details=False):
    """
    Runs the extension.

    Args:
        name: The name of the extension
        config: The test's config dictionary
        print_details: Whether to print the analysis details to stdout
                       (default: False)

    Returns:
       A LIVVkit page element containing the LIVVkit elements to display on a webpage
    """

    # FIXME: move into a config to NameSpace function
    test_args = OrderedDict([(k.replace('-', '_'), v)
                             for k, v in config.items()])
    test_args = argparse.Namespace(**test_args)

    test_args.img_dir = os.path.join(livvkit.output_dir, 'validation', 'imgs',
                                     name)
    fn.mkdir_p(test_args.img_dir)
    details, img_gal = main(test_args)

    if print_details:
        _print_details(details)

    domain_headers = ['Null hypothesis', 'T test (t, P)']
    global_tbl_el = {
        'Type': 'V-H Table',
        'Title': 'Validation',
        'TableTitle': 'Analyzed variables',
        'Headers': domain_headers,
        'Data': details['global'],
    }
    land_tbl_el = {
        'Type': 'V-H Table',
        'Title': 'Validation',
        'TableTitle': 'Analyzed variables',
        'Headers': domain_headers,
        'Data': details['land'],
    }
    ocean_tbl_el = {
        'Type': 'V-H Table',
        'Title': 'Validation',
        'TableTitle': 'Analyzed variables',
        'Headers': domain_headers,
        'Data': details['ocean'],
    }
    bib_html = bib2html(os.path.join(os.path.dirname(__file__), 'tsc.bib'))
    detail_tables = {"global": [], "land": [], "ocean": []}
    table_headers = ["Variable", *domain_headers]
    for level in detail_tables:
        for _time in details[level]:
            tbl_data = {_hdr: [] for _hdr in table_headers}
            for _var in details[level][_time]:
                tbl_data["Variable"].append(_var)
                for _hdr, val in details[level][_time][_var].items():
                    tbl_data[_hdr].append(val)

            detail_tables[level].append(
                el.Table(title=f"{level.capitalize()}, {_time}",
                         data=tbl_data))

    tabs = el.Tabs({
        'Figures': [img_gal],
        'Global_details': detail_tables["global"],
        'Land_details': detail_tables["land"],
        'Ocean_details': detail_tables["ocean"],
        'References': [el.RawHTML(bib_html)]
    })

    results = el.Table(title="Results",
                       data=OrderedDict({
                           'Test status': [details['overall']],
                           'Global': [details['domains']['delta_l2_global']],
                           'Land': [details['domains']['delta_l2_land']],
                           'Ocean': [details['domains']['delta_l2_ocean']],
                           'Ensembles': [
                               'statistically identical' if details['overall']
                               == 'Pass' else 'statistically different'
                           ],
                       }))

    # FIXME: Put into a ___ function
    doc_text = __doc__.format(
        (1 - test_args.p_threshold) * 100).replace('\n\n', '<br><br>')
    page = el.Page(name, doc_text, elements=[results, tabs])
    return page
Ejemplo n.º 12
0
def plot_bit_for_bit(case, var_name, model_data, bench_data, diff_data):
    """ Create a bit for bit plot """
    plot_title = ""
    plot_name = case + "_" + var_name + ".png"
    plot_path = os.path.join(os.path.join(livvkit.output_dir, "verification", "imgs"))
    functions.mkdir_p(plot_path)
    m_ndim = np.ndim(model_data)
    b_ndim = np.ndim(bench_data)
    if m_ndim != b_ndim:
        return "Dataset dimensions didn't match!"
    if m_ndim == 3:
        model_data = model_data[-1]
        bench_data = bench_data[-1]
        diff_data = diff_data[-1]
        plot_title = "Showing "+var_name+"[-1,:,:]"
    elif m_ndim == 4:
        model_data = model_data[-1][0]
        bench_data = bench_data[-1][0]
        diff_data = diff_data[-1][0]
        plot_title = "Showing "+var_name+"[-1,0,:,:]"
    plt.figure(figsize=(12, 3), dpi=80)
    plt.clf()

    # Calculate min and max to scale the colorbars
    _max = np.amax([np.amax(model_data), np.amax(bench_data)])
    _min = np.amin([np.amin(model_data), np.amin(bench_data)])

    # Plot the model output
    plt.subplot(1, 3, 1)
    plt.xlabel("Model Data")
    plt.ylabel(var_name)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(model_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis)
    plt.colorbar()

    # Plot the benchmark data
    plt.subplot(1, 3, 2)
    plt.xlabel("Benchmark Data")
    plt.xticks([])
    plt.yticks([])
    plt.imshow(bench_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis)
    plt.colorbar()

    # Plot the difference
    plt.subplot(1, 3, 3)
    plt.xlabel("Difference")
    plt.xticks([])
    plt.yticks([])
    plt.imshow(diff_data, interpolation='nearest', cmap=colormaps.viridis)
    plt.colorbar()

    plt.tight_layout(rect=(0, 0, 0.95, 0.9))
    plt.suptitle(plot_title)

    plot_file = os.path.sep.join([plot_path, plot_name])
    if livvkit.publish:
        plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)
    plt.savefig(plot_file)
    plt.close()
    return os.path.join(os.path.relpath(plot_path,
                                        os.path.join(livvkit.output_dir, "verification")),
                        plot_name)
Ejemplo n.º 13
0
def test_fn_mkdir_p_silent_existing(tmpdir):
    testdir = tmpdir.mkdir('mkdir_p')
    functions.mkdir_p(str(testdir))
Ejemplo n.º 14
0
def test_fn_mkdir_p_new_dir_and_parent(tmpdir):
    testdir = tmpdir.join('mkdir_p', 'test_depth')
    functions.mkdir_p(str(testdir))

    assert testdir.check(dir=True) is True
Ejemplo n.º 15
0
def run_suite(case, config, summary):
    """ Run the full suite of performance tests """
    config["name"] = case
    timing_data = dict()
    model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case)
    bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case)
    plot_dir = os.path.join(livvkit.output_dir, "performance", "imgs")
    model_cases = functions.collect_cases(model_dir)
    bench_cases = functions.collect_cases(bench_dir)
    functions.mkdir_p(plot_dir)

    # Generate all of the timing data
    for subcase in sorted(model_cases):
        bench_subcases = bench_cases[subcase] if subcase in bench_cases else []
        timing_data[subcase] = dict()
        for mcase in model_cases[subcase]:
            config["case"] = "-".join([subcase, mcase])
            bpath = (os.path.join(bench_dir, subcase, mcase.replace("-", os.path.sep))
                     if mcase in bench_subcases else None)
            mpath = os.path.join(model_dir, subcase, mcase.replace("-", os.path.sep))
            timing_data[subcase][mcase] = _analyze_case(mpath, bpath, config)

    # Create scaling and timing breakdown plots
    weak_data = weak_scaling(timing_data, config['scaling_var'],
                             config['weak_scaling_points'])
    strong_data = strong_scaling(timing_data, config['scaling_var'],
                                 config['strong_scaling_points'])

    timing_plots = [
        generate_scaling_plot(weak_data,
                              "Weak scaling for " + case.capitalize(),
                              "runtime (s)", "",
                              os.path.join(plot_dir, case + "_weak_scaling.png")
                              ),
        weak_scaling_efficiency_plot(weak_data,
                                     "Weak scaling efficiency for " + case.capitalize(),
                                     "Parallel efficiency (% of linear)", "",
                                     os.path.join(plot_dir, case + "_weak_scaling_efficiency.png")
                                     ),
        generate_scaling_plot(strong_data,
                              "Strong scaling for " + case.capitalize(),
                              "Runtime (s)", "",
                              os.path.join(plot_dir, case + "_strong_scaling.png")
                              ),
        strong_scaling_efficiency_plot(strong_data,
                                       "Strong scaling efficiency for " + case.capitalize(),
                                       "Parallel efficiency (% of linear)", "",
                                       os.path.join(plot_dir,
                                                    case + "_strong_scaling_efficiency.png")
                                       ),
        ]

    timing_plots = timing_plots + \
        [generate_timing_breakdown_plot(timing_data[s],
                                        config['scaling_var'],
                                        "Timing breakdown for " + case.capitalize()+" "+s,
                                        "",
                                        os.path.join(plot_dir, case+"_"+s+"_timing_breakdown.png")
                                        )
         for s in sorted(six.iterkeys(timing_data), key=functions.sort_scale)]

    # Build an image gallery and write the results
    el = [
            elements.gallery("Performance Plots", timing_plots)
         ]
    result = elements.page(case, config["description"], element_list=el)
    summary[case] = _summarize_result(timing_data, config)
    _print_result(case, summary)
    functions.create_page_from_template("performance.html",
                                        os.path.join(livvkit.index_dir, "performance",
                                                     case + ".html"))
    functions.write_json(result, os.path.join(livvkit.output_dir, "performance"),
                         case + ".json")
Ejemplo n.º 16
0
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from __future__ import absolute_import, division, print_function, unicode_literals

import os
import sys
import glob

from livvkit.util import TexHelper as th
from livvkit.util import functions

datadir = sys.argv[1]
outdir = sys.argv[2]
functions.mkdir_p(outdir)

data_files = glob.glob(datadir + "/**/*.json", recursive=True)
data_files = [datadir + '/verification/dome.json']
# data_files = [datadir + '/index.json']

for each in data_files:
    data = functions.read_json(each)
    tex = th.translate_page(data)
    outfile = os.path.join(outdir,
                           os.path.basename(each).replace('json', 'tex'))
    with open(outfile, 'w') as f:
        f.write(tex)
Ejemplo n.º 17
0
from livvkit.util.LIVVDict import LIVVDict
from livvkit.util import elements

SEC_PER_DAY = 86400.0


def run_suite(case, config, summary):
    """ Run the full suite of performance tests """
    config["name"] = case
    timing_data = dict()
    model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case)
    bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case)
    plot_dir = os.path.join(livvkit.output_dir, "performance", "imgs")
    model_cases = functions.collect_cases(model_dir)
    bench_cases = functions.collect_cases(bench_dir)
    functions.mkdir_p(plot_dir)

    # Generate all of the timing data
    for subcase in sorted(model_cases):
        bench_subcases = bench_cases[subcase] if subcase in bench_cases else []
        timing_data[subcase] = dict()
        for mcase in model_cases[subcase]:
            config["case"] = "-".join([subcase, mcase])
            bpath = (os.path.join(bench_dir, subcase,
                                  mcase.replace("-", os.path.sep))
                     if mcase in bench_subcases else None)
            mpath = os.path.join(model_dir, subcase,
                                 mcase.replace("-", os.path.sep))
            timing_data[subcase][mcase] = _analyze_case(mpath, bpath, config)

    # Create scaling and timing breakdown plots
Ejemplo n.º 18
0
def test_fn_mkdir_p_new_dir_and_parent(tmpdir):
    testdir = tmpdir.join('mkdir_p', 'test_depth')
    functions.mkdir_p(str(testdir))

    assert testdir.check(dir=True) is True