コード例 #1
0
def check_solns_match_key(datasets, key_to_compare_over, **kwargs):
    """Given a dict of solutions split into pairs which only differ by
    key_to_compare_over then compare the pairs.

    kwargs go into underlying checking function
    """

    pairs = mm.split_to_comparable_groups(datasets, key_to_compare_over)

    ok = []
    for p in pairs:

        # Check we have pairs
        assert len(p) == 2

        # Compare
        ok.append(check_mean_m_matches(p[0], p[1], **kwargs))

    return all(ok)
コード例 #2
0
def main():

    # Parse arguments
    # ============================================================

    parser = argparse.ArgumentParser(description=main.__doc__,

    # Don't mess up my formating in the help message
    formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument('--dir', '-d', action='append',
                        help='Set the directory to look for data in (default "results").')

    parser.add_argument('--split', '-s', action='append',
                        help="Split into different plots for different values of these keys, for keys begining with dash specify them as: `-s='-dt'` to avoid issues with `-` being read as a new argument.")

    parser.add_argument('--skip-failed', action='store_true',
                        help='Skip runs which failed (dir contains file named failed)')


    args = parser.parse_args()

    if (args.dir is None) or (args.dir == []):
        print("No directories given, so parsing ./results")
        args.dir = ["results"]

    if args.split is None:
        args.split = ['mesh', 'h-app', 'initial-m', 'mag-params', 'scale']


    # Parse data
    # ============================================================

    # Get the results that aren't just empty
    really_all_results = mm.parse_parameter_sweep(args.dir,
                                                  skip_failed=args.skip_failed)
    all_results = [d for d in really_all_results if d is not None]


    print(len(all_results), "data sets out of", len(really_all_results), "used",
          "(any others didn't have enough time steps finished).")

    # print("Splitting plots based on values of", args.split)


    # split_data = utils.split_up_stuff(all_results, args.split)

    # Get mean step times
    for data in all_results:
        data['mean_step_time'] = sp.mean(data['total_step_time'])

    # Function specifying which dict keys we care about when splitting into
    # groups for comparison of computation time per step
    def key_filter(k):
        return all([mm.is_arg_key(k),
                    k != "-outdir",
                    k != "-dt",
                    k != "-driver",
                    k != "-fd-jac",
                    k != "-damping",
                    ])


    # Split data up
    grouped = mm.split_to_comparable_groups(all_results, '-ts',
                                            key_filter=key_filter)


    # Print it
    results = []
    for group in grouped:
        # for g in group:
        #     print(g['-ts'],g ['mean_step_time'])
        # print()

        rk_time = sp.mean([d['mean_step_time'] for d in group if d['-ts'] == 'rk2'])
        imr_time = sp.mean([d['mean_step_time'] for d in group if d['-ts'] == 'midpoint-bdf'])

        r = {
             '-ms-method' : group[0]['-ms-method'],
             '-ref' : group[0]['-ref'],
             'ratio' : imr_time/rk_time,
             }

        results.append(r)

    # pprint(results)

    final1 = sp.mean([d['ratio'] for d in results if d['-ms-method'] == 'decoupled'])
    final2 = sp.mean([d['ratio'] for d in results if d['-ms-method'] == 'disabled'])

    # Ignore timing results for 'sphere' because I never got around to
    # writing the code to include the sphere's hms in the Jacobian, so it
    # ends up taking a large number of newton steps to converge.

    # Additional note: some time per step is spent on output. The "full"
    # output (doc_solution()) is only run after certain amounts of
    # simulated time, so the slowdown caused by it is equivalent for
    # implicit/explicit methods. The "partial" output however is called
    # every step, however using valgrind --tool=callgrind we can see that
    # this only accounts for ~5% of execution time, so doesn't really
    # matter for the purposes of our experiment,

    print("decoupled ms time per step ratio:", final1)
    print("disabled ms time per step ratio:", final2)


    return 0
コード例 #3
0
def main():

    # Look for parallel in args
    parser = argparse.ArgumentParser()
    parser.add_argument('--parallel', action = "store_true")
    args = parser.parse_args()


    # What to run
    argdicts = {
        # Problem specification
        '-driver' : 'llg',
        '-ms-method' : 'disabled',
        '-mesh' : 'sq_line_periodic',
        '-initial-m' : 'periodic_exact',
        '-initial-is-exact' : 1,
        '-h-app' : 'zero',
        '-damping' : [0.9, 0.1, 0.01, 0.001, 0],
        '-tmax' : 0.1,
        '-wave-solution-c' : 1/12, # as used by Jeong et. al.

        # convergence test: one step and link dt to spatial refinement
        '-max-steps' : 1,
        '-convergence-test' : 1,
        '-doc-interval' : 0,
        '-doc-exact' : 1,


        # Integration/calculation details
        '-ts' : ["imr", "tr", "bdf2"],
        '-ref' : [2, 3, 4, 5, 6, 7, 8],
        '-newton-tol' : 1e-12,
        '-renormalise' : "never",
        '-quadrature' : ['lnodal', 'gauss'],
        }

    # Where it's going to end up
    base_outdir = os.path.abspath(pjoin(os.path.dirname(__file__), "Validation"))

    # Run
    err_codes, outdirs = mm.run_sweep(argdicts, base_outdir,
                                      serial_mode=not args.parallel)

    # Get data
    datasets = list(map(mm.parse_run, outdirs))

    # Check things all ran
    ran = all((e == 0 for e in err_codes))

    convergence_test_datasets = mm.split_to_comparable_groups(datasets, '-ref')

    def rate(datasets):
        """Expected convergence rate for a given timestepper.
        """
        # Check that all have the same ts (they should since this is a
        # convergence test!)
        assert all((d['-ts'] == datasets[0]['-ts'] for d in datasets))

        # bdf2 convergence is pretty bad, choose a lower convergence rate for it
        if datasets[0]['-ts'] == 'bdf2':
            return 1.75
        else:
            return 2

    # Check the convergence rates, seem to be just over 2, not sure why
    # since it should only be 2. Something to do with using an exact
    # solution?
    t1 = all([tests.check_convergence(d, 2.2, tol=0.2)
              for d in convergence_test_datasets])

    if ran and t1:
        return 0
    else:
        return 1