Example #1
0
def run_mf2005(namefile, regression=True):
    """
    Run the simulation.

    """

    # Set root as the directory name where namefile is located
    testname = pymake.get_sim_name(namefile, rootpth=config.testpaths[0])[0]

    # Set nam as namefile name without path
    nam = os.path.basename(namefile)

    # Setup
    testpth = os.path.join(config.testdir, testname)
    pymake.setup(namefile, testpth)

    # run test models
    print('running model...{}'.format(testname))
    exe_name = os.path.abspath(config.target)
    success, buff = flopy.run_model(exe_name,
                                    nam,
                                    model_ws=testpth,
                                    silent=True)

    # If it is a regression run, then setup and run the model with the
    # release target and the reference target
    success_reg = True
    if regression:
        testname_reg = os.path.basename(config.target_release)
        testpth_reg = os.path.join(testpth, testname_reg)
        pymake.setup(namefile, testpth_reg)
        print('running regression model...{}'.format(testname_reg))
        exe_name = os.path.abspath(config.target_release)
        success_reg, buff = flopy.run_model(exe_name,
                                            nam,
                                            model_ws=testpth_reg,
                                            silent=True)

        if success_reg:
            outfile1 = os.path.join(
                os.path.split(os.path.join(testpth, nam))[0], 'bud.cmp')
            outfile2 = os.path.join(
                os.path.split(os.path.join(testpth, nam))[0], 'hds.cmp')
            success_reg = pymake.compare(os.path.join(testpth, nam),
                                         os.path.join(testpth_reg, nam),
                                         precision='single',
                                         max_cumpd=0.01,
                                         max_incpd=0.01,
                                         htol=0.001,
                                         outfile1=outfile1,
                                         outfile2=outfile2)

    # Clean things up
    if success and success_reg and not config.retain:
        pymake.teardown(testpth)
    assert success, 'model did not run'
    assert success_reg, 'regression model did not meet comparison criteria'

    return
Example #2
0
def run_mf2005(namefile, regression=True):
    """
    Run the simulation.

    """

    # Set root as the directory name where namefile is located
    testname = pymake.get_sim_name(namefile, rootpth=testpaths[0])[0]

    # Set nam as namefile name without path
    nam = os.path.basename(namefile)

    # Setup
    testpth = os.path.join(testdir, testname)
    pymake.setup(namefile, testpth)

    # run test models
    print('running model...{}'.format(testname))
    exe_name = os.path.abspath(target_release)
    success, buff = flopy.run_model(exe_name, nam, model_ws=testpth,
                                    silent=True)

    assert success, 'base model {} '.format(nam) + 'did not run.'

    # If it is a regression run, then setup and run the model with the
    # release target and the reference target
    success_reg = True
    if regression:
        testname_reg = os.path.basename(target_previous)
        testpth_reg = os.path.join(testpth, testname_reg)
        pymake.setup(namefile, testpth_reg)
        print('running regression model...{}'.format(testname_reg))
        exe_name = os.path.abspath(target_previous)
        success_reg, buff = flopy.run_model(exe_name, nam,
                                            model_ws=testpth_reg,
                                            silent=False)

        assert success_reg, 'regression model {} '.format(nam) + 'did not run.'

        # compare results
        outfile1 = os.path.join(os.path.split(os.path.join(testpth, nam))[0],
                                'bud.cmp')
        outfile2 = os.path.join(os.path.split(os.path.join(testpth, nam))[0],
                                'hds.cmp')
        success_reg = pymake.compare(os.path.join(testpth, nam),
                                     os.path.join(testpth_reg, nam),
                                     precision='single',
                                     max_cumpd=0.01, max_incpd=0.01,
                                     htol=0.001,
                                     outfile1=outfile1, outfile2=outfile2)

    # Clean things up
    if not retain:
        pymake.teardown(testpth)

    return
def run_mf2005(namefile, regression=True):
    """
    Run the simulation.

    """
    if namefile is not None:
        # Set root as the directory name where namefile is located
        testname = pymake.get_sim_name(namefile, rootpth=expth)[0]

        # Set nam as namefile name without path
        nam = os.path.basename(namefile)

        # Setup
        testpth = os.path.join(dstpth, testname)
        pymake.setup(namefile, testpth)

        # run test models
        exe_name = os.path.abspath(epth)
        msg = "running model...{}".format(testname) + " using {}".format(
            exe_name)
        print(msg)
        if os.path.exists(exe_name):
            success, buff = flopy.run_model(exe_name,
                                            nam,
                                            model_ws=testpth,
                                            silent=True)
        else:
            success = False

        assert success, "base model {} ".format(nam) + "did not run."

        # If it is a regression run, then setup and run the model with the
        # release target and the reference target
        success_reg = True
        if regression:
            testname_reg = os.path.basename(mfpth)
            testpth_reg = os.path.join(testpth, testname_reg)
            pymake.setup(namefile, testpth_reg)
            # exe_name = os.path.abspath(target_previous)
            msg = "running regression model...{}".format(
                testname_reg) + " using {}".format(exe_name)
            print(msg)

            if os.path.exists(exe_name):
                success_reg, buff = flopy.run_model(exe_name,
                                                    nam,
                                                    model_ws=testpth_reg,
                                                    silent=False)
            else:
                success_reg = False

            assert success_reg, ("regression model {} ".format(nam) +
                                 "did not run.")

        # compare results
        if success and success_reg:
            fpth = os.path.split(os.path.join(testpth, nam))[0]
            outfile1 = os.path.join(fpth, "bud.cmp")
            fpth = os.path.split(os.path.join(testpth, nam))[0]
            outfile2 = os.path.join(fpth, "hds.cmp")
            success_reg = pymake.compare(
                os.path.join(testpth, nam),
                os.path.join(testpth_reg, nam),
                precision="single",
                max_cumpd=0.01,
                max_incpd=0.01,
                htol=0.001,
                outfile1=outfile1,
                outfile2=outfile2,
            )
        # Clean things up
        if success_reg:
            pymake.teardown(testpth)
        else:
            success = False
            errmsg = "could not run...{}".format(os.path.basename(nam))
    else:
        success = False
        errmsg = "{} does not exist".format(target)

    assert success, errmsg

    return
Example #4
0
def run_mfusg(namefile, comparison=True):
    """
    Run the simulation.

    """

    # Set root as the directory name where namefile is located
    testname = pymake.get_sim_name(namefile, rootpth=config.testpaths[2])[0]

    # Set nam as namefile name without path
    nam = os.path.basename(namefile)

    # Setup
    testpth = os.path.join(config.testdir, testname)
    pymake.setup(namefile, testpth)

    # run test models
    print('running model...{}'.format(testname))
    exe_name = os.path.abspath(config.target)
    success, buff = flopy.run_model(exe_name,
                                    nam,
                                    model_ws=testpth,
                                    silent=True)
    success_cmp = True
    if comparison:
        action = pymake.setup_comparison(namefile, testpth)
        testpth_cmp = os.path.join(testpth, action)
        if action is not None:
            files_cmp = None
            if action.lower() == '.cmp':
                files_cmp = []
                files = os.listdir(testpth_cmp)
                for file in files:
                    files_cmp.append(
                        os.path.abspath(os.path.join(testpth_cmp, file)))
                success_cmp = True
                print(files_cmp)
            else:
                print('running comparison model...{}'.format(testpth_cmp))
                key = action.lower().replace('.cmp', '')
                exe_name = config.target_dict[key]
                success_cmp, buff = flopy.run_model(exe_name,
                                                    nam,
                                                    model_ws=testpth_cmp,
                                                    silent=True)
            if success_cmp:
                outfile1 = os.path.join(
                    os.path.split(os.path.join(testpth, nam))[0], 'bud.cmp')
                outfile2 = os.path.join(
                    os.path.split(os.path.join(testpth, nam))[0], 'hds.cmp')
                success_cmp = pymake.compare(os.path.join(testpth, nam),
                                             os.path.join(testpth_cmp, nam),
                                             precision='single',
                                             max_cumpd=0.01,
                                             max_incpd=0.01,
                                             htol=0.001,
                                             outfile1=outfile1,
                                             outfile2=outfile2,
                                             files2=files_cmp)

    # Clean things up
    if success and success_cmp and not config.retain:
        pymake.teardown(testpth)
    assert success, 'model did not run'
    assert success_cmp, 'comparison model did not meet comparison criteria'

    return