def main(): # Look for parallel in args parser = argparse.ArgumentParser() parser.add_argument('--parallel', action = "store_true") args = parser.parse_args() # What to run argdicts = { "-driver" : 'llg', '-mesh' : 'ut_sphere', '-tmax' : 10, '-damping' : 1, '-ts' : 'imr', '-dt' : 0.1, '-ms-method' : 'implicit', '-quadrature' : 'lnodal', '-newton-tol' : 1e-12, "-solver" : "gmres", "-matrix-type" : "som", "-prec" : "som-main-exact", } # Where it's going to end up base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation")) # Run err_codes, outdirs = mm.run_sweep(argdicts, base_outdir, serial_mode=not args.parallel) # Get data datasets = list(map(mm.parse_run, outdirs)) # Check things ran = all((e == 0 for e in err_codes)) t1 = all([tests.check_m_length(d, 1e-7) for d in datasets]) t2 = all([tests.check_error_norm(d, 1e-2) for d in datasets]) # ??ds not convinced I should let the tols be this loose if ran and t1: return 0 else: return 1
def main(): # Look for parallel in args parser = argparse.ArgumentParser() parser.add_argument('--parallel', action = "store_true") args = parser.parse_args() # What to run argdicts = { "-driver" : 'llgode', '-exact' : "ll", '-ts' : ["rk2", "midpoint-bdf"], '-dt' : 0.01, '-damping' : 0.5, '-h-app' : 'minus_z', '-newton-tol' : 1e-12, '-newton-max-iterations' : 4, } # be very harsh on Newton conditions to check that the Jacobian is # correct. # Where it's going to end up base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation")) # Run err_codes, outdirs = mm.run_sweep(argdicts, base_outdir, serial_mode=not args.parallel) # Get data datasets = list(map(mm.parse_run, outdirs)) # Check all errors are small ok = all([tests.check_error_norm(d, 1e-4) for d in datasets]) ran = all([e == 0 for e in err_codes]) if ran and ok: return 0 else: return 1
def main(): # Look for parallel in args parser = argparse.ArgumentParser() parser.add_argument('--parallel', action = "store_true") args = parser.parse_args() # Test without magnetostatics by comparison with Mallinson solution argdicts = { "-driver" : ["ll"], "-dt": [0.01], "-scale": [10], "-ms-method" : "disabled", "-ts" : ["cay-euler", "cay-rk2"], "-tmax" : [3], } base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation")) # Run err_codes, outdirs = mm.run_sweep(argdicts, base_outdir, serial_mode=not args.parallel) # Get data datasets = list(map(mm.parse_run, outdirs)) # Check things ran = all((e == 0 for e in err_codes)) t1 = all([tests.check_error_norm(d, 1e-5) for d in datasets]) t2 = all([tests.check_m_length(d, 1e-14) for d in datasets]) if ran and t1 and t2: return 0 else: return 1
def main(): # What to run argdicts = { "-driver" : "ode", "-exact" : ["sin", "poly3", "poly2", "stiff_test"], "-ts" : ["bdf2", "midpoint-bdf", "tr", "bdf1"], "-tmax" : 10, "-tol" : 1e-4, "-disable-mm-opt" : 1, "-always-write-trace" : 1, # Otherwise we get wrong ndts by counting len(dts) } # ??ds not sure why we need "-disable-mm-opt", # but there's something stupid going on with mass matrix storage # Where it's going to end up base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation")) # Run err_codes, outdirs = mm.run_sweep(argdicts, base_outdir) # Get data datasets = list(filter(lambda d: d is not None, map(mm.parse_run, outdirs))) # Check things ran test_results = [] test_results.append(all([e == 0 for e in err_codes])) # Use bdf2's nsteps as a maximum, tr and imr are more accurate than # bdf2 so this should be true (unless bdf2's numerical damping has # kicked in and caused it to jump to a steady state too soon), (we # assume that the order of data is preserved here so that bdf_data[n] # is the same exact solution as imr_data[n] etc.). bdf2_data = [d for d in datasets if d['-ts'] == 'bdf2'] for ts in argdicts['-ts']: # bdf1 sucks (first order) so it needs far more steps, do it # manually. if ts == "bdf1": max_err = 0.4 max_steps = [550, 3800, 1050, 70] else: max_err = 0.07 max_steps = [1.3*len(d['times']) for d in bdf2_data] ts_data = [d for d in datasets if d['-ts'] == ts] # Check errors are small test_results.append(all([tests.check_error_norm(d, max_err) for d in ts_data])) # Check all the runs: nsteps_ok = [tests.check_ndt_less_than(d, m) for d, m in zip(ts_data, max_steps)] test_results.append(all(nsteps_ok)) if all(test_results): return 0 else: return 1
def main(): # Look for parallel in args parser = argparse.ArgumentParser() parser.add_argument('--parallel', action = "store_true") args = parser.parse_args() # Where it's going to end up base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation")) # constant dt # ============================================================ argdicts_const_dt = { '-driver': "ode", "-disable-mm-opt" : 1, "-dt": "0.05", "-tmax": "4", "-ts": ["rk2", "rk4", "midpoint-bdf", "bdf2", "tr"], "-exact": ["sin", "cos", "poly3", "poly2"], } # Run const err_codes_const_dt, outdirs_const_dt = \ mm.run_sweep(argdicts_const_dt, pjoin(base_outdir, "const_dt"), serial_mode=not args.parallel) # Check they all ran without crashing const_ran = all(e == 0 for e in err_codes_const_dt) # Parse output and check error norms const_tests = [tests.check_error_norm(data, tol=0.1) for data in map(mm.parse_run, outdirs_const_dt)] # varying dt # ============================================================ argdicts_var_dt = { '-driver': "ode", "-disable-mm-opt" : 1, "-dt-initial": 1e-6, "-tol": 1e-3, "-tmax": "4", "-ts": ["midpoint-bdf", "bdf2-pred"], "-exact": ["sin", "poly3", "poly2"], # "cos" has slightly higher error # for bdf2 w/ rk4, not enough to # worry about I think but also I # don't want to raise the tol. So # I'm leaving it out. "-mp-pred" : ["ebdf3", "rk4"], } # Run var err_codes_var_dt, outdirs_var_dt = \ mm.run_sweep(argdicts_var_dt, pjoin(base_outdir, "var_dt"), serial_mode=not args.parallel) # Check they all ran without crashing var_ran = all(e == 0 for e in err_codes_var_dt) # Parse output datasets = list(map(mm.parse_run, outdirs_var_dt)) # check error norms var_tests = [tests.check_error_norm_relative(data, tol=1e-2) for data in datasets] # For second order polynomial we know lte is zero, check we got these cases # correct. poly2_data = [d for d in datasets if d['exact'] == 'poly2'] lte_test = [tests.check_error_norm(d, tol=1e-7) for d in poly2_data] # return # ============================================================ if const_ran and all(const_tests) \ and var_ran and all(var_tests) and all(lte_test): return 0 else: return 1