コード例 #1
0
def main():
    """Check that all the preconditioners run without crashing (doesn't check that they work robustly because they don't yet!)
    """

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument("--parallel", action="store_true")
    args = parser.parse_args()

    llg_precs = ["exact", "block"]
    llg_sub_precs = ["exact", "block"]

    # With magnetostatics
    argdicts_ms = {
        "-driver": "llg",
        "-dt": 0.1,
        "-solver": "gmres",
        "-matrix-type": "som",
        "-prec": "som-main-block",
        "-llg-prec": llg_precs,
        "-llg-sub-prec": llg_sub_precs,
    }

    # Without magnetostatics
    argdicts = {
        "-driver": "llg",
        "-dt": 0.1,
        "-ms-method": "disabled",
        "-solver": "gmres",
        "-prec": "dummy",
        "-llg-prec": llg_precs,
        "-llg-sub-prec": llg_sub_precs,
    }

    # Slightly redundant to run with multiple llg-sub-prec args even when
    # using exact llg-prec (llg-sub-prec is ignored in this case). But it's
    # so fast that it really doesn't matter.

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes_ms, _ = mm.run_sweep(argdicts_ms, base_outdir, serial_mode=not args.parallel)
    err_codes, _ = mm.run_sweep(argdicts, base_outdir, serial_mode=not args.parallel)

    # Check things ran ok
    ran = all((e == 0 for e in it.chain(err_codes_ms, err_codes)))

    # No analysis of output: not checking that here

    if ran:
        return 0
    else:
        return 1
コード例 #2
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()


    # What to run
    argdicts = {
        "-driver" : 'llg',
        "-solver" : "gmres",
        "-matrix-type" : "som",
        "-prec" : "som-main-ilu-0",
        "-mesh" : "sq_cubeoid",
        "-max-steps" : 1,
        "-h-app" : "zero",
        "-initial-m" : "xz",
        "-hlib-bem" : 0,
        "-phi1-singularity-method" : "pin_boundary",
        "-dt" : 0.001,
        '-ts' : 'bdf2',
        "-ms-method" : ["implicit", "decoupled"],
        }

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things
    ran = all((e == 0 for e in err_codes))

    # Load energies from file
    exact_energy_array = sp.loadtxt("../micromag_energy/cubeoid/cubeoid_energies", skiprows=1)
    exact_energies = {
        "exchange_energy" : exact_energy_array[0],
        "zeeman_energy" : exact_energy_array[1],
        "crystalline_anisotropy_energy" : exact_energy_array[2],
        "magnetostatic_energy" : exact_energy_array[3],
        }
    energy_keys = [k for k,_ in exact_energies.items()]

    # Check answers
    t1 = [tests.check_dicts_match(d, exact_energies, energy_keys, rtol=8, zero = 1.5e-4)
          for d in datasets]

    if ran and all(t1):
        return 0
    else:
        return 1
コード例 #3
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()


    # What to run
    argdicts = {
        "-driver" : 'llg',

        '-mesh' : 'ut_sphere',
        '-tmax' : 10,
        '-damping' : 1,

        '-ts' : 'imr',
        '-dt' : 0.1,

        '-ms-method' : 'implicit',
        '-quadrature' : 'lnodal',
        '-newton-tol' : 1e-12,
        "-solver" : "gmres",
        "-matrix-type" : "som",
        "-prec" : "som-main-exact",
        }

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things
    ran = all((e == 0 for e in err_codes))
    t1 = all([tests.check_m_length(d, 1e-7) for d in datasets])
    t2 = all([tests.check_error_norm(d, 1e-2) for d in datasets])

    # ??ds not convinced I should let the tols be this loose

    if ran and t1:
        return 0
    else:
        return 1
コード例 #4
0
def main():

    # Parse arguments
    # ============================================================

    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action='store_true')
    args = parser.parse_args()


    # What to run
    argdicts = {
        "-driver" : 'llg',
        "-solver" : "gmres",
        "-matrix-type" : "som",
        "-prec" : "som-main-exact",
        '-tmax' : 15,
        '-ref' : 1,
        '-dt' : [0.05],
        # '-newton-tol' : [1e-8, 1.1e-8],
        '-ts' : ['bdf2', 'midpoint-bdf'],
        '-ms-method' : ['implicit',
                        'decoupled',
                         # 'decoupled-no-extrapolation'
                         ],
        }

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things
    ran = all((e == 0 for e in err_codes))
    t1 = tests.check_solns_match_key(datasets, '-ms-method', tol=0.05)

    if ran and t1:
        return 0
    else:
        return 1
コード例 #5
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()

    # What to run
    argdicts = {
        "-driver" : ["llg"],
        "-ts" : ["midpoint-bdf", "bdf2"],
        "-ms-method" : ["implicit", "decoupled", "disabled"],
        "-solver" : "gmres",
        "-matrix-type" : "som",
        "-prec" : "som-main-exact",
        "-tmax" : 100,
        "-tol" : 0.01,
        "-mp-pred" : "ebdf3",
        # "-initial-m" : "exactly_z",
        }

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things
    ran = all((e == 0 for e in err_codes))
    t1 = all([tests.check_ndt_less_than(d, 80) for d in datasets
              if d['-ms-method'] != "decoupled"])
    t2 = all([tests.check_ndt_less_than(d, 115) for d in datasets
              if d['-ms-method'] == "decoupled"])

    # Decoupled ms introduces wiggles which put an effective cap on the step size.

    if all([ran, t1, t2]):
        return 0
    else:
        return 1
コード例 #6
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()

    # What to run
    argdicts = {
        "-driver" : 'llgode',
         '-exact' : "ll",
         '-ts' : ["rk2", "midpoint-bdf"],
         '-dt' : 0.01,
         '-damping' : 0.5,
         '-h-app' : 'minus_z',
         '-newton-tol' : 1e-12,
         '-newton-max-iterations' : 4,
        }
        # be very harsh on Newton conditions to check that the Jacobian is
        # correct.


    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check all errors are small
    ok = all([tests.check_error_norm(d, 1e-4) for d in datasets])

    ran = all([e == 0 for e in err_codes])

    if ran and ok:
        return 0
    else:
        return 1
コード例 #7
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()


    # What to run
    argdicts = {
        "-driver" : 'llg',
        "-solver" : "gmres",
        "-matrix-type" : "som",
        "-prec" : "som-main-exact",
        '-tmax' : 10,
        '-ts' : 'bdf2',
        }

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things
    ran = all((e == 0 for e in err_codes))
    # t1 = all([tests.check_error_norm(d, 1e-5) for d in datasets])
    # t2 = all([tests.check_m_length(d, 1e-14) for d in datasets])

    if ran:
        return 0
    else:
        return 1
コード例 #8
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()


    # Test without magnetostatics by comparison with Mallinson solution
    argdicts = {
        "-driver" : ["ll"],
        "-dt": [0.01],
        "-scale": [10],
        "-ms-method" : "disabled",
        "-ts" : ["cay-euler", "cay-rk2"],
        "-tmax" : [3],
        }

    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things
    ran = all((e == 0 for e in err_codes))
    t1 = all([tests.check_error_norm(d, 1e-5) for d in datasets])
    t2 = all([tests.check_m_length(d, 1e-14) for d in datasets])

    if ran and t1 and t2:
        return 0
    else:
        return 1
コード例 #9
0
def main():

    # What to run
    argdicts = {
        "-driver" : "ode",
        "-exact" : ["sin", "poly3", "poly2", "stiff_test"],
        "-ts" : ["bdf2", "midpoint-bdf", "tr", "bdf1"],
        "-tmax" : 10,
        "-tol" : 1e-4,
        "-disable-mm-opt" : 1,
        "-always-write-trace" : 1, # Otherwise we get wrong ndts by counting len(dts)
        }

    # ??ds not sure why we need "-disable-mm-opt",
    # but there's something stupid going on with mass matrix storage

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir)

    # Get data
    datasets = list(filter(lambda d: d is not None, map(mm.parse_run, outdirs)))


    # Check things ran
    test_results = []
    test_results.append(all([e == 0 for e in err_codes]))


    # Use bdf2's nsteps as a maximum, tr and imr are more accurate than
    # bdf2 so this should be true (unless bdf2's numerical damping has
    # kicked in and caused it to jump to a steady state too soon), (we
    # assume that the order of data is preserved here so that bdf_data[n]
    # is the same exact solution as imr_data[n] etc.).
    bdf2_data = [d for d in datasets if d['-ts'] == 'bdf2']



    for ts in argdicts['-ts']:

        # bdf1 sucks (first order) so it needs far more steps, do it
        # manually.
        if ts == "bdf1":
            max_err = 0.4
            max_steps = [550, 3800, 1050, 70]
        else:
            max_err = 0.07
            max_steps = [1.3*len(d['times']) for d in bdf2_data]

        ts_data = [d for d in datasets if d['-ts'] == ts]

        # Check errors are small
        test_results.append(all([tests.check_error_norm(d, max_err) for d in ts_data]))

        # Check all the runs:
        nsteps_ok = [tests.check_ndt_less_than(d, m)
                     for d, m in zip(ts_data, max_steps)]
        test_results.append(all(nsteps_ok))


    if all(test_results):
        return 0
    else:
        return 1
コード例 #10
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # constant dt
    # ============================================================

    argdicts_const_dt = {
        '-driver': "ode",
        "-disable-mm-opt" : 1,
        "-dt": "0.05",
        "-tmax": "4",
        "-ts": ["rk2", "rk4", "midpoint-bdf", "bdf2", "tr"],
        "-exact": ["sin", "cos", "poly3", "poly2"],
        }

    # Run const
    err_codes_const_dt, outdirs_const_dt = \
      mm.run_sweep(argdicts_const_dt, pjoin(base_outdir, "const_dt"),
                   serial_mode=not args.parallel)

    # Check they all ran without crashing
    const_ran = all(e == 0 for e in err_codes_const_dt)

    # Parse output and check error norms
    const_tests = [tests.check_error_norm(data, tol=0.1)
                   for data in map(mm.parse_run, outdirs_const_dt)]


    # varying dt
    # ============================================================

    argdicts_var_dt = {
        '-driver': "ode",
        "-disable-mm-opt" : 1,
        "-dt-initial": 1e-6,
        "-tol": 1e-3,
        "-tmax": "4",
        "-ts": ["midpoint-bdf", "bdf2-pred"],
        "-exact": ["sin", "poly3", "poly2"], # "cos" has slightly higher error
                                             # for bdf2 w/ rk4, not enough to
                                             # worry about I think but also I
                                             # don't want to raise the tol. So
                                             # I'm leaving it out.
        "-mp-pred" : ["ebdf3", "rk4"],
        }

    # Run var
    err_codes_var_dt, outdirs_var_dt = \
      mm.run_sweep(argdicts_var_dt, pjoin(base_outdir, "var_dt"),
                   serial_mode=not args.parallel)

    # Check they all ran without crashing
    var_ran = all(e == 0 for e in err_codes_var_dt)

    # Parse output
    datasets = list(map(mm.parse_run, outdirs_var_dt))

    # check error norms
    var_tests = [tests.check_error_norm_relative(data, tol=1e-2)
                 for data in datasets]

    # For second order polynomial we know lte is zero, check we got these cases
    # correct.
    poly2_data = [d for d in datasets if d['exact'] == 'poly2']
    lte_test = [tests.check_error_norm(d, tol=1e-7) for d in poly2_data]


    # return
    # ============================================================

    if const_ran and all(const_tests) \
      and var_ran and all(var_tests) and all(lte_test):
        return 0
    else:
        return 1
コード例 #11
0
def main():
    """Run driver multiple times in parallel with arguments specified by a file
    in etc/parameter_sets.

    Parameter file format is either:

    1) a dict of cli args and lists of their values, e.g.:
    {
    '-dt' : 0.1,
    '-ref' : [1, 2, 3],
    }
    will run with `-dt 0.1 -ref 1` , `-dt 0.1 -ref 2` and `-dt 0.1 -ref 3`.


    2) A tuple consisting of a dict as above and (any number of) lists
    of dicts. e.g.

    ({...}, [{'-a' : 1, '-b' : 2}, {'-a' : 4, '-b' : [4, 5, 6]}], ... )

    each list of dicts contains arguments which only make sense when
    used together. Each of these dicts should be in the same format as
    the main dict. Each one is merged into the main dict (using .update)
    and then used similarly to the main dict in 1).

    Note that all of the additional dicts must contain the same args.
    """

    # Parse inputs
    # ============================================================

    parser = argparse.ArgumentParser(description=main.__doc__,

    # Don't mess up my formating in the help message
    formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument('--debug-mode', action='store_true',
                        help = 'Enable debugging mode (run in serial).')

    parser.add_argument('--parameters', '-p', action='append',
                        help = 'Do a standard parameter sweep with the specified parameter set.')

    parser.add_argument('--clean', action='store_true',
                        help='clean up old results from the target folder')

    parser.add_argument('--ncores', '-j', '-n', default=mp.cpu_count(),
                        type=int, help='Number of processes to run at once.')

    parser.add_argument('--no-build', action='store_true',
                        help="Don't rebuild anything")

    parser.add_argument('--dry-run', action='store_true',
                        help="Don't actually do anything, just list the parameters to run.")

    args = parser.parse_args()


    for parameter_set in args.parameters:

        # Find the parameter set
        # ============================================================

        search_root = pjoin(mm.rootdir(), "etc", "parameter_sets")

        # Recurively find files named parameter_set in search_root
        parameter_files = []
        for root, dirs, files in os.walk(search_root, followlinks=True):
            for f in files:
                if f == parameter_set:
                    parameter_files.append(pjoin(root, f))

        # Error check number of files
        if len(parameter_files) > 1:
            sys.stderr.write("Found multiple files named "+ parameter_set + ": "
                             + " ".join(parameter_files) + "\n")
            return 5
        elif len(parameter_files) == 0:
            sys.stderr.write("Couldn't find a file named "+ parameter_set
                              + " in " + search_root + "\n")
            return 6
        else:
            parameter_file = parameter_files[0]



        # Parse parameters file
        # ============================================================

        output_root = pjoin(mm.rootdir(), "experiments", "parameter_sweeps",
                            '_'.join(parameter_set.split()))

        with open(parameter_file, 'r') as pfile:
            args_file = ast.literal_eval(pfile.read())

        print(args_file)

        try:
            args_dict = args_file[0]
            extra = list(args_file[1:])
        except KeyError:
            args_dict = args_file
            extra = None


        if args.dry_run:
            if extra is not None:
                parameter_dicts = sum([mm.generate_argdicts(args_dict, a) for a in extra], [])
            else:
                parameter_dicts = mm.generate_argdicts(args_dict)

            pprint(parameter_dicts)

            return 0


        # Make sure we're ready to go
        # ============================================================

        # Maybe build things
        if not args.no_build:

            # Make sure micromag library is up to date
            driver_folder = os.path.dirname(mm.driver_path())
            library_folder = pjoin(driver_folder, "../../")

            print("Building and installing libraries from", library_folder)
            subp.check_call(['make', '--silent', '--keep-going',
                             'LIBTOOLFLAGS=--silent'], cwd=library_folder)
            subp.check_call(['make', 'install', '--silent', '--keep-going',
                             'LIBTOOLFLAGS=--silent'], cwd=library_folder)

            print("Building driver in", driver_folder)
            subp.check_call(['make', '--silent', '--keep-going',
                             'LIBTOOLFLAGS=--silent'], cwd=driver_folder)

            # Make sure the binaries are up to date (if they aren't just the
            # default one).
            binaries = args_dict.get('-binary')
            if binaries is not None:
                driver_folders = [os.path.abspath(os.path.dirname(d)) for d in binaries]
                for f in driver_folders:
                    build_driver(f)


        # Remove old stuff if requested
        if args.clean and os.path.isdir(output_root):
            print("Cleaning out", output_root)
            # recursive_check_filenames_rm_safe(output_root)
            shutil.rmtree(output_root)
            os.mkdir(output_root)

        # Copy parameters file to output dir
        os.makedirs(output_root, exist_ok=True)
        shutil.copyfile(parameter_file, pjoin(output_root, "parameter_file"))



        # Run it
        # ============================================================

        print("Running parameter sweep with parameter set", parameter_set)
        print("Output is going into", output_root)
        mm.run_sweep(args_dict, output_root,
                     extra_argsets=extra,
                     serial_mode=args.debug_mode,
                     processes=args.ncores)

    return 0
コード例 #12
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()


    # What to run
    argdicts = {
        # Problem specification
        '-driver' : 'llg',
        '-ms-method' : 'disabled',
        '-mesh' : 'sq_line_periodic',
        '-initial-m' : 'periodic_exact',
        '-initial-is-exact' : 1,
        '-h-app' : 'zero',
        '-damping' : [0.9, 0.1, 0.01, 0.001, 0],
        '-tmax' : 0.1,
        '-wave-solution-c' : 1/12, # as used by Jeong et. al.

        # convergence test: one step and link dt to spatial refinement
        '-max-steps' : 1,
        '-convergence-test' : 1,
        '-doc-interval' : 0,
        '-doc-exact' : 1,


        # Integration/calculation details
        '-ts' : ["imr", "tr", "bdf2"],
        '-ref' : [2, 3, 4, 5, 6, 7, 8],
        '-newton-tol' : 1e-12,
        '-renormalise' : "never",
        '-quadrature' : ['lnodal', 'gauss'],
        }

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things all ran
    ran = all((e == 0 for e in err_codes))

    convergence_test_datasets = mm.split_to_comparable_groups(datasets, '-ref')

    def rate(datasets):
        """Expected convergence rate for a given timestepper.
        """
        # Check that all have the same ts (they should since this is a
        # convergence test!)
        assert all((d['-ts'] == datasets[0]['-ts'] for d in datasets))

        # bdf2 convergence is pretty bad, choose a lower convergence rate for it
        if datasets[0]['-ts'] == 'bdf2':
            return 1.75
        else:
            return 2

    # Check the convergence rates, seem to be just over 2, not sure why
    # since it should only be 2. Something to do with using an exact
    # solution?
    t1 = all([tests.check_convergence(d, 2.2, tol=0.2)
              for d in convergence_test_datasets])

    if ran and t1:
        return 0
    else:
        return 1
コード例 #13
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    parser.add_argument('--short', action = "store_true")
    args = parser.parse_args()


    # What to run
    argdicts_1d = {
         # Problem specification
        '-driver' : 'llg',
        '-ms-method' : 'disabled',
        '-mesh' : 'sq_line_periodic',
        '-initial-m' : 'periodic_exact',
        '-initial-is-exact' : 1,
        '-h-app' : 'zero',
        '-damping' : [0.9, 0.01, 0],
        '-tmax' : 1.0,
        '-wave-solution-c' : 1/12, # Jeong et. al's value


        # Integration/calculation details
        '-ts' : ["imr"],
        '-ref' : [1, 4],
        '-dt' : [0.1, 0.01, 0.005],
        # ref 1 is way to little and dt 0.1 is way to big but test
        # conservation properties anyway (should conserve).

        '-newton-tol' : 1e-12,
        '-renormalise' : "never",
        '-quadrature' : ['lnodal'],
        }

    argdicts_2d = {
         # Problem specification
        '-driver' : 'llg',
        '-ms-method' : 'disabled',
        '-mesh' : 'sq_square_periodic',
        '-initial-m' : 'periodic_exact',
        '-initial-is-exact' : 1,
        '-h-app' : 'zero',
        '-damping' : [0.5], #only one case because it's slower
        '-tmax' : 1.0,
        '-wave-solution-c' : 1/12, # Jeong et. al's value

        # Integration/calculation details
        '-ts' : ["imr"],
        '-ref' : [2, 5], # need to test high refinement to check that
                         # rescaling is working
        '-dt' : [0.1],
        '-newton-tol' : 1e-12,
        '-renormalise' : "never",
        '-quadrature' : ['lnodal'],
        }


    # Seems like exact solution is weird, so check another case as well
    argdicts_non_exact = {
         # Problem specification
        '-driver' : 'llg',
        '-ms-method' : 'disabled',
        '-mesh' : ['sq_square'], # 'st_square'],
        '-initial-m' : 'smoothly_varying_5',
        '-h-app' : 'zero',
        '-damping' : [0.5],
        '-tmax' : 1.0,

        # Integration/calculation details
        '-ts' : ["imr"],
        '-ref' : [2],
        '-dt' : [0.1],
        '-newton-tol' : 1e-12,
        '-renormalise' : "never",
        '-quadrature' : ['lnodal'],
        }


    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    if not args.short:
        err_codes_1d, outdirs_1d = mm.run_sweep(argdicts_1d, base_outdir,
                                                serial_mode=not args.parallel)
        err_codes_2d, outdirs_2d = mm.run_sweep(argdicts_2d, base_outdir + "_2d",
                                                serial_mode=not args.parallel)
    else:
        err_codes_1d = []
        outdirs_1d = []
        err_codes_2d = []
        outdirs_2d = []

    err_codes_non_exact, outdirs_non_exact = mm.run_sweep(argdicts_non_exact,
                                                          base_outdir+"_non_exact",
                                                          serial_mode=not args.parallel)

    err_codes = err_codes_1d + err_codes_2d + err_codes_non_exact
    outdirs = outdirs_1d + outdirs_2d + outdirs_non_exact


    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things
    ran = all((e == 0 for e in err_codes))

    t1 = all([tests.check_m_length(d, tol=1e-10) for d in datasets])

    if ran and t1:
        return 0
    else:
        return 1