예제 #1
0
def get_error(model_files, dicts, source_file, target_file, devices):

    logging.info("Loading model options from {}".format(model_files[0]))
    with open(model_files[0], "r") as f:
        model_options = json.load(f)

    global dictionaries
    logging.info("loading dictionaries from {}, {}".format(*dicts))
    with open(dicts[0], "r") as f1, open(dicts[1], "r") as f2:
        dictionaries = [json.load(f1), json.load(f2)]

    logging.info("loading parameters from {}".format(model_files[1]))
    params = load_params(model_files[1])

    global in_queue
    global out_queue
    in_queue = Queue()
    out_queue = Queue()

    processes = [Process(target=error_process, name="process_{}".format(device),
                         args=(params, device), kwargs=model_options)
                 for device in devices.split(",")]

    for p in processes:
        p.daemon = True
        p.start()

    ti = TextIterator(source_file=source_file, target_file=target_file,
                      source_dict=dictionaries[0], target_dict=dictionaries[1],
                      maxlen=model_options["maxlen"],
                      n_words_source=model_options["n_words_source"],
                      n_words_target=model_options["n_words_target"],
                      raw_characters=model_options["characters"])

    num_batches = 0
    for batch in ti:
        in_queue.put(batch)
        num_batches += 1

    for _ in processes:
        in_queue.put("STOP")

    costs = []
    for num_processed in range(num_batches):
        costs.append(out_queue.get())
        percentage_done = (num_processed / num_batches) * 100
        print("{}: {:.2f}% of input processed".format(model_files[1], percentage_done),
              end="\r", flush=True)
    print()

    mean_cost = np.mean(costs)

    print(model_files[1], mean_cost)
    return mean_cost
예제 #2
0
def test_suite(argv):
    """
    the main test suite driver
    """

    # parse the commandline arguments
    args = test_util.get_args(arg_string=argv)

    # read in the test information
    suite, test_list = params.load_params(args)

    active_test_list = [t.name for t in test_list]

    test_list = suite.get_tests_to_run(test_list)

    suite.log.skip()
    suite.log.bold("running tests: ")
    suite.log.indent()
    for obj in test_list:
        suite.log.log(obj.name)
    suite.log.outdent()

    if not args.complete_report_from_crash == "":

        # make sure the web directory from the crash run exists
        suite.full_web_dir = "{}/{}/".format(
            suite.webTopDir, args.complete_report_from_crash)
        if not os.path.isdir(suite.full_web_dir):
            suite.log.fail("Crash directory does not exist")

        suite.test_dir = args.complete_report_from_crash

        # find all the tests that completed in that web directory
        tests = []
        test_file = ""
        was_benchmark_run = 0
        for sfile in os.listdir(suite.full_web_dir):
            if os.path.isfile(sfile) and sfile.endswith(".status"):
                index = string.rfind(sfile, ".status")
                tests.append(sfile[:index])

                with open(suite.full_web_dir + sfile, "r") as f:
                    for line in f:
                        if line.find("benchmarks updated") > 0:
                            was_benchmark_run = 1

            if os.path.isfile(sfile) and sfile.endswith(".ini"):
                test_file = sfile


        # create the report for this test run
        num_failed = report.report_this_test_run(suite, was_benchmark_run,
                                                 "recreated report after crash of suite",
                                                 "", tests, test_file)

        # create the suite report
        suite.log.bold("creating suite report...")
        report.report_all_runs(suite, active_test_list)
        suite.log.close_log()
        sys.exit("done")


    #--------------------------------------------------------------------------
    # check bench dir and create output directories
    #--------------------------------------------------------------------------
    all_compile = all([t.compileTest == 1 for t in test_list])

    if not all_compile:
        bench_dir = suite.get_bench_dir()

    if not args.copy_benchmarks is None:
        last_run = suite.get_last_run()

    suite.make_test_dirs()

    if suite.slack_post:
        msg = "{} ({}) test suite started, id: {}\n{}".format(
            suite.suiteName, suite.sub_title, suite.test_dir, args.note)
        suite.slack_post_it(msg)

    if not args.copy_benchmarks is None:
        old_full_test_dir = suite.testTopDir + suite.suiteName + "-tests/" + last_run
        copy_benchmarks(old_full_test_dir, suite.full_web_dir,
                        test_list, bench_dir, suite.log)

        # here, args.copy_benchmarks plays the role of make_benchmarks
        num_failed = report.report_this_test_run(suite, args.copy_benchmarks,
                                                 "copy_benchmarks used -- no new tests run",
                                                 "",
                                                 test_list, args.input_file[0])
        report.report_all_runs(suite, active_test_list)

        if suite.slack_post:
            msg = "copied benchmarks\n{}".format(args.copy_benchmarks)
            suite.slack_post_it(msg)

        sys.exit("done")


    #--------------------------------------------------------------------------
    # figure out what needs updating and do the git updates, save the
    # current hash / HEAD, and make a ChangeLog
    # --------------------------------------------------------------------------
    now = time.localtime(time.time())
    update_time = time.strftime("%Y-%m-%d %H:%M:%S %Z", now)

    no_update = args.no_update.lower()
    if not args.copy_benchmarks is None:
        no_update = "all"

    # the default is to update everything, unless we specified a hash
    # when constructing the Repo object
    if no_update == "none":
        pass

    elif no_update == "all":
        for k in suite.repos:
            suite.repos[k].update = False

    else:
        nouplist = [k.strip() for k in no_update.split(",")]

        for repo in suite.repos.keys():
            if repo.lower() in nouplist:
                suite.repos[repo].update = False

    os.chdir(suite.testTopDir)

    for k in suite.repos:
        suite.log.skip()
        suite.log.bold("repo: {}".format(suite.repos[k].name))
        suite.log.indent()

        if suite.repos[k].update or suite.repos[k].hash_wanted:
            suite.repos[k].git_update()

        suite.repos[k].save_head()

        if suite.repos[k].update:
            suite.repos[k].make_changelog()

        suite.log.outdent()


    # keep track if we are running on any branch that is not the suite
    # default
    branches = [suite.repos[r].branch_wanted for r in suite.repos]
    if not all(suite.default_branch == b for b in branches):
        suite.log.warn("some git repos are not on the default branch")
        bf = open("{}/branch.status".format(suite.full_web_dir), "w")
        bf.write("branch different than suite default")
        bf.close()

    #--------------------------------------------------------------------------
    # build the tools and do a make clean, only once per build directory
    #--------------------------------------------------------------------------
    suite.build_tools(test_list)

    all_build_dirs = find_build_dirs(test_list)

    suite.log.skip()
    suite.log.bold("make clean in...")

    for d, source_tree in all_build_dirs:

        if not source_tree == "":
            suite.log.log("{} in {}".format(d, source_tree))
            os.chdir(suite.repos[source_tree].dir + d)
            suite.make_realclean(repo=source_tree)
        else:
            suite.log.log("{}".format(d))
            os.chdir(suite.source_dir + d)
            if suite.sourceTree == "BoxLib":
                suite.make_realclean(repo="BoxLib")
            else:
                suite.make_realclean()

    os.chdir(suite.testTopDir)


    #--------------------------------------------------------------------------
    # main loop over tests
    #--------------------------------------------------------------------------
    for test in test_list:

        suite.log.outdent()  # just to make sure we have no indentation
        suite.log.skip()
        suite.log.bold("working on test: {}".format(test.name))
        suite.log.indent()

        if not args.make_benchmarks is None and (test.restartTest or test.compileTest or
                                                 test.selfTest):
            suite.log.warn("benchmarks not needed for test {}".format(test.name))
            continue

        output_dir = suite.full_test_dir + test.name + '/'
        os.mkdir(output_dir)
        test.output_dir = output_dir


        #----------------------------------------------------------------------
        # compile the code
        #----------------------------------------------------------------------
        if not test.extra_build_dir == "":
            bdir = suite.repos[test.extra_build_dir].dir + test.buildDir
        else:
            bdir = suite.source_dir + test.buildDir

        os.chdir(bdir)

        if test.reClean == 1:
            # for one reason or another, multiple tests use different
            # build options, make clean again to be safe
            suite.log.log("re-making clean...")
            if not test.extra_build_dir == "":
                suite.make_realclean(repo=test.extra_build_dir)
            else:
                suite.make_realclean()

        suite.log.log("building...")

        coutfile="{}/{}.make.out".format(output_dir, test.name)

        if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":

            comp_string, rc = suite.build_c(test=test, outfile=coutfile)
            executable = test_util.get_recent_filename(bdir, "", ".ex")

        elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":

            comp_string, rc = suite.build_f(test=test, outfile=coutfile)
            executable = test_util.get_recent_filename(bdir, "main", ".exe")

        test.comp_string = comp_string

        # make return code is 0 if build was successful
        if rc == 0: test.compile_successful = True

        # copy the make.out into the web directory
        shutil.copy("{}/{}.make.out".format(output_dir, test.name), suite.full_web_dir)

        if not test.compile_successful:
            error_msg = "ERROR: compilation failed"
            report.report_single_test(suite, test, test_list, failure_msg=error_msg)
            continue

        if test.compileTest:
            suite.log.log("creating problem test report ...")
            report.report_single_test(suite, test, test_list)
            continue


        #----------------------------------------------------------------------
        # copy the necessary files over to the run directory
        #----------------------------------------------------------------------
        suite.log.log("copying files to run directory...")

        needed_files = []
        needed_files.append((executable, "move"))

        needed_files.append((test.inputFile, "copy"))
        # strip out any sub-directory from the build dir
        test.inputFile = os.path.basename(test.inputFile)

        if test.probinFile != "":
            needed_files.append((test.probinFile, "copy"))
            # strip out any sub-directory from the build dir
            test.probinFile = os.path.basename(test.probinFile)

        for auxf in test.auxFiles:
            needed_files.append((auxf, "copy"))

        # if any copy/move fail, we move onto the next test
        skip_to_next_test = 0
        for nfile, action in needed_files:
            if action == "copy":
                act = shutil.copy
            elif action == "move":
                act = shutil.move
            else:
                suite.log.fail("invalid action")

            try: act(nfile, output_dir)
            except IOError:
                error_msg = "ERROR: unable to {} file {}".format(action, nfile)
                report.report_single_test(suite, test, test_list, failure_msg=error_msg)
                skip_to_next_test = 1
                break

        if skip_to_next_test: continue

        skip_to_next_test = 0
        for lfile in test.linkFiles:
            if not os.path.exists(lfile):
                error_msg = "ERROR: link file {} does not exist".format(lfile)
                report.report_single_test(suite, test, test_list, failure_msg=error_msg)
                skip_to_next_test = 1
                break

            else:
                link_source = os.path.abspath(lfile)
                link_name = os.path.join(output_dir, os.path.basename(lfile))
                try: os.symlink(link_source, link_name)
                except IOError:
                    error_msg = "ERROR: unable to symlink link file: {}".format(lfile)
                    report.report_single_test(suite, test, test_list, failure_msg=error_msg)
                    skip_to_next_test = 1
                    break

        if skip_to_next_test: continue


        #----------------------------------------------------------------------
        # run the test
        #----------------------------------------------------------------------
        suite.log.log("running the test...")

        os.chdir(output_dir)

        test.wall_time = time.time()

        if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":

            base_cmd = "./{} {} amr.plot_file={}_plt amr.check_file={}_chk".format(
                executable, test.inputFile, test.name, test.name)

            # keep around the checkpoint files only for the restart runs
            if test.restartTest:
                base_cmd += " amr.checkpoint_files_output=1 amr.check_int=%d" % \
                                (test.restartFileNum)
            else:
                base_cmd += " amr.checkpoint_files_output=0"

            base_cmd += "{} {}".format(suite.globalAddToExecString, test.runtime_params)

        elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":

            base_cmd = "./{} {} --plot_base_name {}_plt --check_base_name {}_chk ".format(
                executable, test.inputFile, test.name, test.name)

            # keep around the checkpoint files only for the restart runs
            if not test.restartTest: base_cmd += " --chk_int 0 "

            base_cmd += "{} {}".format(suite.globalAddToExecString, test.runtime_params)

        if args.with_valgrind:
            base_cmd = "valgrind " + args.valgrind_options + " " + base_cmd

        suite.run_test(test, base_cmd)


        # if it is a restart test, then rename the final output file and
        # restart the test
        if test.restartTest:
            skip_restart = False

            last_file = test.get_last_plotfile(output_dir=output_dir)

            if last_file == "":
                error_msg = "ERROR: test did not produce output.  Restart test not possible"
                skip_restart = True

            if len(test.find_backtrace()) > 0:
                error_msg = "ERROR: test produced backtraces.  Restart test not possible"
                skip_restart = True

            if skip_restart:
                # copy what we can
                test.wall_time = time.time() - test.wall_time
                shutil.copy("{}.run.out".format(test.name), suite.full_web_dir)
                if os.path.isfile("{}.err.out".format(test.name)):
                    shutil.copy("{}.err.out".format(test.name), suite.full_web_dir)
                    test.has_stderr = True
                suite.copy_backtrace(test)
                report.report_single_test(suite, test, test_list, failure_msg=error_msg)
                continue

            orig_last_file = "orig_{}".format(last_file)
            shutil.move(last_file, orig_last_file)

            if test.diffDir:
                orig_diff_dir = "orig_{}".format(test.diffDir)
                shutil.move(test.diffDir, orig_diff_dir)

            # get the file number to restart from
            restart_file = "%s_chk%5.5d" % (test.name, test.restartFileNum)

            suite.log.log("restarting from {} ... ".format(restart_file))

            if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":

                base_cmd = "./{} {} amr.plot_file={}_plt amr.check_file={}_chk amr.checkpoint_files_output=0 amr.restart={}".format(
                    executable, test.inputFile, test.name, test.name, restart_file)

            elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":

                base_cmd = "./{} {} --plot_base_name {}_plt --check_base_name {}_chk --chk_int 0 --restart {} {}".format(
                    executable, test.inputFile, test.name, test.name, test.restartFileNum, suite.globalAddToExecString)

            suite.run_test(test, base_cmd)

        test.wall_time = time.time() - test.wall_time


        #----------------------------------------------------------------------
        # do the comparison
        #----------------------------------------------------------------------
        if not test.selfTest:

            if test.outputFile == "":
                if test.compareFile == "":
                    compare_file = test.get_last_plotfile(output_dir=output_dir)
                else:
                    # we specified the name of the file we want to
                    # compare to -- make sure it exists
                    compare_file = test.compareFile
                    if not os.path.isdir(compare_file):
                        compare_file = ""

                output_file = compare_file
            else:
                output_file = test.outputFile
                compare_file = test.name+'_'+output_file


            # get the number of levels for reporting
            prog = "{} -l {}".format(suite.tools["fboxinfo"], output_file)
            stdout0, stderr0, rc = test_util.run(prog)
            test.nlevels = stdout0.rstrip('\n')
            if not type(params.convert_type(test.nlevels)) is int:
                test.nlevels = ""

            if args.make_benchmarks is None:

                suite.log.log("doing the comparison...")
                suite.log.indent()
                suite.log.log("comparison file: {}".format(output_file))

                test.compare_file_used = output_file

                if not test.restartTest:
                    bench_file = bench_dir + compare_file
                else:
                    bench_file = orig_last_file

                # see if it exists
                # note, with BoxLib, the plotfiles are actually directories

                if not os.path.isdir(bench_file):
                    suite.log.warn("no corresponding benchmark found")
                    bench_file = ""

                    with open("{}.compare.out".format(test.name), 'w') as cf:
                        cf.write("WARNING: no corresponding benchmark found\n")
                        cf.write("         unable to do a comparison\n")

                else:
                    if not compare_file == "":

                        suite.log.log("benchmark file: {}".format(bench_file))

                        command = "{} -n 0 {} {}".format(
                            suite.tools["fcompare"], bench_file, output_file)
                        sout, serr, ierr = test_util.run(command,
                                                         outfile="{}.compare.out".format(test.name), store_command=True)

                        if ierr == 0:
                            test.compare_successful = True

                    else:
                        suite.log.warn("unable to do a comparison")

                        with open("{}.compare.out".format(test.name), 'w') as cf:
                            cf.write("WARNING: run did not produce any output\n")
                            cf.write("         unable to do a comparison\n")

                suite.log.outdent()

                if not test.diffDir == "":
                    if not test.restartTest:
                        diff_dir_bench = bench_dir + '/' + test.name + '_' + test.diffDir
                    else:
                        diff_dir_bench = orig_diff_dir

                    suite.log.log("doing the diff...")
                    suite.log.log("diff dir: {}".format(test.diffDir))

                    command = "diff {} -r {} {}".format(
                        test.diffOpts, diff_dir_bench, test.diffDir)

                    outfile = "{}.compare.out".format(test.name)
                    sout, serr, diff_status = test_util.run(command, outfile=outfile, store_command=True)

                    if diff_status == 0:
                        diff_successful = True
                        with open("{}.compare.out".format(test.name), 'a') as cf:
                            cf.write("\ndiff was SUCCESSFUL\n")
                    else:
                        diff_successful = False

                    test.compare_successful = test.compare_successful and diff_successful

            else:   # make_benchmarks

                suite.log.log("storing output of {} as the new benchmark...".format(test.name))
                suite.log.indent()
                suite.log.warn("new benchmark file: {}".format(compare_file))
                suite.log.outdent()

                if not compare_file == "":
                    if not output_file == compare_file:
                        source_file = output_file
                    else:
                        source_file = compare_file

                    try: shutil.rmtree("{}/{}".format(bench_dir, compare_file))
                    except: pass
                    shutil.copytree(source_file, "{}/{}".format(bench_dir, compare_file))

                    with open("{}.status".format(test.name), 'w') as cf:
                        cf.write("benchmarks updated.  New file:  {}\n".format(compare_file) )

                else:
                    with open("{}.status".format(test.name), 'w') as cf:
                        cf.write("benchmarks failed")

                    # copy what we can
                    shutil.copy("{}.run.out".format(test.name), suite.full_web_dir)
                    if os.path.isfile("{}.err.out".format(test.name)):
                        shutil.copy("{}.err.out".format(test.name), suite.full_web_dir)
                        test.has_stderr = True
                    suite.copy_backtrace(test)
                    error_msg = "ERROR: runtime failure during benchmark creation"
                    report.report_single_test(suite, test, test_list, failure_msg=error_msg)


                if not test.diffDir == "":
                    diff_dir_bench = "{}/{}_{}".format(bench_dir, test.name, test.diffDir)
                    if os.path.isdir(diff_dir_bench):
                        shutil.rmtree(diff_dir_bench)
                        shutil.copytree(test.diffDir, diff_dir_bench)
                    else:
                        shutil.copy(test.diffDir, diff_dir_bench)
                    suite.log.log("new diffDir: {}_{}".format(test.name, test.diffDir))

        else:   # selfTest

            if args.make_benchmarks is None:

                suite.log.log("looking for selfTest success string: {} ...".format(test.stSuccessString))

                try: of = open("{}.run.out".format(test.name), 'r')
                except IOError:
                    suite.log.warn("no output file found")
                    out_lines = ['']
                else:
                    out_lines = of.readlines()

                    # successful comparison is indicated by presence
                    # of success string
                    for line in out_lines:
                        if line.find(test.stSuccessString) >= 0:
                            test.compare_successful = True
                            break

                    of.close()

                with open("{}.compare.out".format(test.name), 'w') as cf:
                    if test.compare_successful:
                        cf.write("SELF TEST SUCCESSFUL\n")
                    else:
                        cf.write("SELF TEST FAILED\n")


        #----------------------------------------------------------------------
        # do any requested visualization (2- and 3-d only) and analysis
        #----------------------------------------------------------------------
        if not test.selfTest:
            if output_file != "":
                if args.make_benchmarks is None:

                    # get any parameters for the summary table
                    job_info_file = "{}/job_info".format(output_file)
                    if os.path.isfile(job_info_file):
                        test.has_jobinfo = 1

                    try: jif = open(job_info_file, "r")
                    except:
                        suite.log.warn("unable to open the job_info file")
                    else:
                        job_file_lines = jif.readlines()

                        if suite.summary_job_info_field1 is not "":
                            for l in job_file_lines:
                                if l.find(suite.summary_job_info_field1) >= 0 and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field1 = _tmp[idx:]
                                    break

                        if suite.summary_job_info_field2 is not "":
                            for l in job_file_lines:
                                if l.find(suite.summary_job_info_field2) >= 0 and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field2 = _tmp[idx:]
                                    break

                        if suite.summary_job_info_field3 is not "":
                            for l in job_file_lines:
                                if l.find(suite.summary_job_info_field3) >= 0 and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field3 = _tmp[idx:]
                                    break

                    # visualization
                    if test.doVis:

                        if test.dim == 1:
                            suite.log.log("Visualization not supported for dim = {}".format(test.dim))
                        else:
                            suite.log.log("doing the visualization...")
                            tool = suite.tools["fsnapshot{}d".format(test.dim)]
                            test_util.run('{} --palette {}/Palette -cname "{}" -p "{}"'.format(
                                tool, suite.compare_tool_dir, test.visVar, output_file))

                            # convert the .ppm files into .png files
                            ppm_file = test_util.get_recent_filename(output_dir, "", ".ppm")
                            if not ppm_file is None:
                                png_file = ppm_file.replace(".ppm", ".png")
                                test_util.run("convert {} {}".format(ppm_file, png_file))
                                test.png_file = png_file

                    # analysis
                    if not test.analysisRoutine == "":

                        suite.log.log("doing the analysis...")
                        if not test.extra_build_dir == "":
                            tool = "{}/{}".format(suite.repos[test.extra_build_dir].dir, test.analysisRoutine)
                        else:
                            tool = "{}/{}".format(suite.source_dir, test.analysisRoutine)

                        shutil.copy(tool, os.getcwd())

                        option = eval("suite.{}".format(test.analysisMainArgs))
                        test_util.run("{} {} {}".format(os.path.basename(test.analysisRoutine),
                                                        option, output_file))

            else:
                if test.doVis or test.analysisRoutine != "":
                    suite.log.warn("no output file.  Skipping visualization")


        #----------------------------------------------------------------------
        # move the output files into the web directory
        #----------------------------------------------------------------------
        if args.make_benchmarks is None:
            shutil.copy("{}.run.out".format(test.name), suite.full_web_dir)
            if os.path.isfile("{}.err.out".format(test.name)):
                shutil.copy("{}.err.out".format(test.name), suite.full_web_dir)
                test.has_stderr = True
            shutil.copy("{}.compare.out".format(test.name), suite.full_web_dir)

            shutil.copy(test.inputFile, "{}/{}.{}".format(
                suite.full_web_dir, test.name, test.inputFile) )

            if test.has_jobinfo:
                shutil.copy(job_info_file, "{}/{}.job_info".format(
                    suite.full_web_dir, test.name))

            if suite.sourceTree == "C_Src" and test.probinFile != "":
                shutil.copy(test.probinFile, "{}/{}.{}".format(
                    suite.full_web_dir, test.name, test.probinFile) )

            for af in test.auxFiles:

                # strip out any sub-directory under build dir for the aux file
                # when copying
                shutil.copy(os.path.basename(af),
                            "{}/{}.{}".format(suite.full_web_dir,
                                              test.name, os.path.basename(af)) )

            if not test.png_file is None:
                try: shutil.copy(test.png_file, suite.full_web_dir)
                except IOError:
                    # visualization was not successful.  Reset image
                    test.png_file = None

            if not test.analysisRoutine == "":
                try: shutil.copy(test.analysisOutputImage, suite.full_web_dir)
                except IOError:
                    # analysis was not successful.  Reset the output image
                    test.analysisOutputImage = ""

            # were any Backtrace files output (indicating a crash)
            suite.copy_backtrace(test)

        else:
            shutil.copy("{}.status".format(test.name), suite.full_web_dir)


        #----------------------------------------------------------------------
        # archive (or delete) the output
        #----------------------------------------------------------------------
        suite.log.log("archiving the output...")
        for pfile in os.listdir(output_dir):
            if (os.path.isdir(pfile) and
                (pfile.startswith("{}_plt".format(test.name)) or
                 pfile.startswith("{}_chk".format(test.name)) ) ):

                if suite.purge_output == 1 and not pfile == output_file:
                    # delete the plt/chk file
                    if os.path.isdir(pfile):
                        try: shutil.rmtree(pfile)
                        except:
                            suite.log.warn("unable to remove {}".format(pfile))

                else:
                    # tar it up
                    try:
                        tar = tarfile.open("{}.tgz".format(pfile), "w:gz")
                        tar.add("{}".format(pfile))
                        tar.close()

                    except:
                        suite.log.warn("unable to tar output file {}".format(pfile))

                    else:
                        try: shutil.rmtree(pfile)
                        except OSError:
                            suite.log.warn("unable to remove {}".format(pfile))


        #----------------------------------------------------------------------
        # write the report for this test
        #----------------------------------------------------------------------
        if args.make_benchmarks is None:
            suite.log.log("creating problem test report ...")
            report.report_single_test(suite, test, test_list)


    #--------------------------------------------------------------------------
    # write the report for this instance of the test suite
    #--------------------------------------------------------------------------
    suite.log.outdent()
    suite.log.skip()
    suite.log.bold("creating new test report...")
    num_failed = report.report_this_test_run(suite, args.make_benchmarks, args.note,
                                             update_time,
                                             test_list, args.input_file[0])


    # make sure that all of the files in the web directory are world readable
    for file in os.listdir(suite.full_web_dir):
       current_file = suite.full_web_dir + file

       if os.path.isfile(current_file):
          os.chmod(current_file, 0o644)

    # reset the branch to what it was originally
    suite.log.skip()
    suite.log.bold("reverting git branches/hashes")
    suite.log.indent()

    for k in suite.repos:
        if suite.repos[k].update or suite.repos[k].hash_wanted:
            suite.repos[k].git_back()

    suite.log.outdent()

    # For temporary run, return now without creating suote report.
    if args.do_temp_run:
        return num_failed


    # store an output file in the web directory that can be parsed easily by
    # external program
    name = "source"
    if suite.sourceTree == "BoxLib": name = "BoxLib"
    branch = suite.repos[name].branch_wanted.strip("\"")

    with open("{}/suite.{}.status".format(suite.webTopDir, branch), "w") as f:
        f.write("{}; num failed: {}; source hash: {}".format(
            suite.repos[name].name, num_failed, suite.repos[name].hash_current))


    #--------------------------------------------------------------------------
    # generate the master report for all test instances
    #--------------------------------------------------------------------------
    suite.log.skip()
    suite.log.bold("creating suite report...")
    report.report_all_runs(suite, active_test_list)

    def email_developers():
        msg = email.message_from_string(suite.emailBody)
        msg['From'] = suite.emailFrom
        msg['To'] = ",".join(suite.emailTo)
        msg['Subject'] = suite.emailSubject

        server = smtplib.SMTP('localhost')
        server.sendmail(suite.emailFrom, suite.emailTo, msg.as_string())
        server.quit()

    if num_failed > 0 and suite.sendEmailWhenFail and not args.send_no_email:
        suite.log.skip()
        suite.log.bold("sending email...")
        email_developers()


    if suite.slack_post:
        suite.slack_post_it("test complete, num failed = {}\n{}".format(num_failed, suite.emailBody))

    return num_failed
예제 #3
0
def reg_test_gc(argv):
    usage = """
    ./reg_test_gc [--before|-b 2000-00-00]
       testfile.ini
    """

    if len(sys.argv) == 1:
        print usage
        sys.exit(2)
        
    try:
        opts, next = getopt.getopt(argv[1:], "b:",
                                   ["before="])

    except getopt.GetoptError:
        print "invalid calling sequence"
        print usage
        sys.exit(2)

    # defaults
    gcdate = ""
    
    for o, a in opts:
        if o == "--before" or o == "-b" :
            gcdate = a

    try:
        testFile = next[0]

    except IndexError:
        print "ERROR: a test file was not specified"
        print usage
        sys.exit(2)

    if not gcdate:
        print "ERROR: date was not specified"
        print usage
        sys.exit(2)
            
    gcd = valid_date(gcdate)
    if gcd == '':
        print "ERROR: invalid date", gcdate
        print usage
        sys.exit(2)


    workdir = os.getcwd()

    print "loading ", testFile

    args=test_util.get_args([testFile])

    suite, testList = params.load_params(args)
    activeTestList = [t.name for t in testList]

    benchmarkTestList = [t for t in testList if not (t.compileTest or t.restartTest)]
    benchmarkNotFound = {}
    for t in benchmarkTestList:
        benchmarkNotFound[t.name] = ''


    ### clean up the web dir
    print "\ncleaning ", suite.webTopDir

    os.chdir(suite.webTopDir)
    validDirs = []
    for d in os.listdir(suite.webTopDir):
        if (d.startswith("20") and os.path.isdir(d)):
            statusFile = d + '/' + d + '.status'
            if (os.path.isfile(statusFile)):
                validDirs.append(d)
    validDirs.sort()
    validDirs.reverse()
    
    latestBMDate = {}

    for d in validDirs:
        bmtests = benchmarkNotFound.keys()
        if d >= gcd and bmtests:
            if isBenchmarkDir(d):
                for t in bmtests:
                    if findBenchmark(d,t):
                        del benchmarkNotFound[t]
                        latestBMDate[t] = d
        else:
            if isBenchmarkDir(d) and bmtests:
                found = False
                for t in bmtests:
                    if findBenchmark(d,t):
                        found = True
                        del benchmarkNotFound[t]
                        latestBMDate[t] = d
                if not found:
                    rmDir(d)
            else:
                rmDir(d)


    ### clean up the test dir
    testDirs = os.path.join(suite.testTopDir,suite.suiteName+"-tests")
    print "\ncleaning ", testDirs

    os.chdir(testDirs)
    validDirs = []
    for d in os.listdir(testDirs):
        if (d.startswith("20") and os.path.isdir(d)):
            validDirs.append(d)
    validDirs.sort()
    validDirs.reverse()

    for d in validDirs:
        if d < gcd:
            tests = [t for t in os.listdir(d) if os.path.isdir(os.path.join(d,t))]
            found = False
            for t in tests:
                if t in latestBMDate.keys() and latestBMDate[t] == d:
                    found = True
                    break
            if not found:
                rmDir(d)
    
    print "\ncreating suite report..."
    report.report_all_runs(suite, activeTestList)

    print "\nGarbage cleaning finished."
예제 #4
0
def test_suite(argv):
    """
    the main test suite driver
    """

    # parse the commandline arguments
    args = test_util.get_args(arg_string=argv)

    # read in the test information
    suite, test_list = params.load_params(args)

    active_test_list = [t.name for t in test_list]

    test_list = suite.get_tests_to_run(test_list)

    suite.log.skip()
    suite.log.bold("running tests: ")
    suite.log.indent()
    for obj in test_list:
        suite.log.log(obj.name)
    suite.log.outdent()

    if not args.complete_report_from_crash == "":

        # make sure the web directory from the crash run exists
        suite.full_web_dir = "{}/{}/".format(suite.webTopDir,
                                             args.complete_report_from_crash)
        if not os.path.isdir(suite.full_web_dir):
            suite.log.fail("Crash directory does not exist")

        suite.test_dir = args.complete_report_from_crash

        # find all the tests that completed in that web directory
        tests = []
        test_file = ""
        was_benchmark_run = 0
        for sfile in os.listdir(suite.full_web_dir):
            if os.path.isfile(sfile) and sfile.endswith(".status"):
                index = string.rfind(sfile, ".status")
                tests.append(sfile[:index])

                with open(suite.full_web_dir + sfile, "r") as f:
                    for line in f:
                        if line.find("benchmarks updated") > 0:
                            was_benchmark_run = 1

            if os.path.isfile(sfile) and sfile.endswith(".ini"):
                test_file = sfile

        # create the report for this test run
        num_failed = report.report_this_test_run(
            suite, was_benchmark_run, "recreated report after crash of suite",
            "", tests, test_file)

        # create the suite report
        suite.log.bold("creating suite report...")
        report.report_all_runs(suite, active_test_list)
        suite.log.close_log()
        sys.exit("done")

    #--------------------------------------------------------------------------
    # check bench dir and create output directories
    #--------------------------------------------------------------------------
    all_compile = all([t.compileTest == 1 for t in test_list])

    if not all_compile:
        bench_dir = suite.get_bench_dir()

    if not args.copy_benchmarks is None:
        last_run = suite.get_last_run()

    suite.make_test_dirs()

    if suite.slack_post:
        msg = "> {} ({}) test suite started, id: {}\n> {}".format(
            suite.suiteName, suite.sub_title, suite.test_dir, args.note)
        suite.slack_post_it(msg)

    if not args.copy_benchmarks is None:
        old_full_test_dir = suite.testTopDir + suite.suiteName + "-tests/" + last_run
        copy_benchmarks(old_full_test_dir, suite.full_web_dir, test_list,
                        bench_dir, suite.log)

        # here, args.copy_benchmarks plays the role of make_benchmarks
        num_failed = report.report_this_test_run(
            suite, args.copy_benchmarks,
            "copy_benchmarks used -- no new tests run", "", test_list,
            args.input_file[0])
        report.report_all_runs(suite, active_test_list)

        if suite.slack_post:
            msg = "> copied benchmarks\n> {}".format(args.copy_benchmarks)
            suite.slack_post_it(msg)

        sys.exit("done")

    #--------------------------------------------------------------------------
    # figure out what needs updating and do the git updates, save the
    # current hash / HEAD, and make a ChangeLog
    # --------------------------------------------------------------------------
    now = time.localtime(time.time())
    update_time = time.strftime("%Y-%m-%d %H:%M:%S %Z", now)

    no_update = args.no_update.lower()
    if not args.copy_benchmarks is None:
        no_update = "all"

    # the default is to update everything, unless we specified a hash
    # when constructing the Repo object
    if no_update == "none":
        pass

    elif no_update == "all":
        for k in suite.repos:
            suite.repos[k].update = False

    else:
        nouplist = [k.strip() for k in no_update.split(",")]

        for repo in suite.repos.keys():
            if repo.lower() in nouplist:
                suite.repos[repo].update = False

    os.chdir(suite.testTopDir)

    for k in suite.repos:
        suite.log.skip()
        suite.log.bold("repo: {}".format(suite.repos[k].name))
        suite.log.indent()

        if suite.repos[k].update or suite.repos[k].hash_wanted:
            suite.repos[k].git_update()

        suite.repos[k].save_head()

        if suite.repos[k].update:
            suite.repos[k].make_changelog()

        suite.log.outdent()

    # keep track if we are running on any branch that is not the suite
    # default
    branches = [suite.repos[r].branch_wanted for r in suite.repos]
    if not all(suite.default_branch == b for b in branches):
        suite.log.warn("some git repos are not on the default branch")
        bf = open("{}/branch.status".format(suite.full_web_dir), "w")
        bf.write("branch different than suite default")
        bf.close()

    #--------------------------------------------------------------------------
    # build the tools and do a make clean, only once per build directory
    #--------------------------------------------------------------------------
    suite.build_tools(test_list)

    all_build_dirs = find_build_dirs(test_list)

    suite.log.skip()
    suite.log.bold("make clean in...")

    for d, source_tree in all_build_dirs:

        if not source_tree == "":
            suite.log.log("{} in {}".format(d, source_tree))
            os.chdir(suite.repos[source_tree].dir + d)
            suite.make_realclean(repo=source_tree)
        else:
            suite.log.log("{}".format(d))
            os.chdir(suite.source_dir + d)
            if suite.sourceTree in ["AMReX", "amrex"]:
                suite.make_realclean(repo="AMReX")
            else:
                suite.make_realclean()

    os.chdir(suite.testTopDir)

    #--------------------------------------------------------------------------
    # Setup Cmake if needed
    #--------------------------------------------------------------------------
    if (suite.useCmake):
        cmake_setup(suite)

    #--------------------------------------------------------------------------
    # main loop over tests
    #--------------------------------------------------------------------------
    for test in test_list:

        suite.log.outdent()  # just to make sure we have no indentation
        suite.log.skip()
        suite.log.bold("working on test: {}".format(test.name))
        suite.log.indent()

        if not args.make_benchmarks is None and (test.restartTest
                                                 or test.compileTest
                                                 or test.selfTest):
            suite.log.warn("benchmarks not needed for test {}".format(
                test.name))
            continue

        output_dir = suite.full_test_dir + test.name + '/'
        os.mkdir(output_dir)
        test.output_dir = output_dir

        #----------------------------------------------------------------------
        # compile the code
        #----------------------------------------------------------------------
        if not test.extra_build_dir == "":
            bdir = suite.repos[test.extra_build_dir].dir + test.buildDir
        else:
            bdir = suite.source_dir + test.buildDir

        # # For cmake builds, there is only one build dir
        # if ( suite.useCmake ): bdir = suite.source_build_dir

        os.chdir(bdir)

        if test.reClean == 1:
            # for one reason or another, multiple tests use different
            # build options, make clean again to be safe
            suite.log.log("re-making clean...")
            if not test.extra_build_dir == "":
                suite.make_realclean(repo=test.extra_build_dir)
            else:
                suite.make_realclean()

        suite.log.log("building...")

        coutfile = "{}/{}.make.out".format(output_dir, test.name)

        if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":
            if (suite.useCmake):
                comp_string, rc = suite.build_test_cmake(test=test,
                                                         outfile=coutfile)
            else:
                comp_string, rc = suite.build_c(test=test, outfile=coutfile)

            executable = test_util.get_recent_filename(bdir, "", ".ex")

        elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":
            comp_string, rc = suite.build_f(test=test, outfile=coutfile)
            executable = test_util.get_recent_filename(bdir, "main", ".exe")

        test.comp_string = comp_string

        # make return code is 0 if build was successful
        if rc == 0: test.compile_successful = True

        # copy the make.out into the web directory
        shutil.copy("{}/{}.make.out".format(output_dir, test.name),
                    suite.full_web_dir)

        if not test.compile_successful:
            error_msg = "ERROR: compilation failed"
            report.report_single_test(suite,
                                      test,
                                      test_list,
                                      failure_msg=error_msg)
            continue

        if test.compileTest:
            suite.log.log("creating problem test report ...")
            report.report_single_test(suite, test, test_list)
            continue

        #----------------------------------------------------------------------
        # copy the necessary files over to the run directory
        #----------------------------------------------------------------------
        suite.log.log("copying files to run directory...")

        needed_files = []
        if executable is not None:
            needed_files.append((executable, "move"))

        needed_files.append((test.inputFile, "copy"))
        # strip out any sub-directory from the build dir
        test.inputFile = os.path.basename(test.inputFile)

        if test.probinFile != "":
            needed_files.append((test.probinFile, "copy"))
            # strip out any sub-directory from the build dir
            test.probinFile = os.path.basename(test.probinFile)

        for auxf in test.auxFiles:
            needed_files.append((auxf, "copy"))

        # if any copy/move fail, we move onto the next test
        skip_to_next_test = 0
        for nfile, action in needed_files:
            if action == "copy":
                act = shutil.copy
            elif action == "move":
                act = shutil.move
            else:
                suite.log.fail("invalid action")

            try:
                act(nfile, output_dir)
            except IOError:
                error_msg = "ERROR: unable to {} file {}".format(action, nfile)
                report.report_single_test(suite,
                                          test,
                                          test_list,
                                          failure_msg=error_msg)
                skip_to_next_test = 1
                break

        if skip_to_next_test: continue

        skip_to_next_test = 0
        for lfile in test.linkFiles:
            if not os.path.exists(lfile):
                error_msg = "ERROR: link file {} does not exist".format(lfile)
                report.report_single_test(suite,
                                          test,
                                          test_list,
                                          failure_msg=error_msg)
                skip_to_next_test = 1
                break

            else:
                link_source = os.path.abspath(lfile)
                link_name = os.path.join(output_dir, os.path.basename(lfile))
                try:
                    os.symlink(link_source, link_name)
                except IOError:
                    error_msg = "ERROR: unable to symlink link file: {}".format(
                        lfile)
                    report.report_single_test(suite,
                                              test,
                                              test_list,
                                              failure_msg=error_msg)
                    skip_to_next_test = 1
                    break

        if skip_to_next_test: continue

        #----------------------------------------------------------------------
        # run the test
        #----------------------------------------------------------------------
        suite.log.log("running the test...")

        os.chdir(output_dir)

        test.wall_time = time.time()

        if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":

            base_cmd = "./{} {} amr.plot_file={}_plt amr.check_file={}_chk".format(
                executable, test.inputFile, test.name, test.name)

            # keep around the checkpoint files only for the restart runs
            if test.restartTest:
                base_cmd += " amr.checkpoint_files_output=1 amr.check_int=%d" % \
                                (test.restartFileNum)
            else:
                base_cmd += " amr.checkpoint_files_output=0"

            base_cmd += " {} {}".format(suite.globalAddToExecString,
                                        test.runtime_params)

        elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":

            base_cmd = "./{} {} --plot_base_name {}_plt --check_base_name {}_chk ".format(
                executable, test.inputFile, test.name, test.name)

            # keep around the checkpoint files only for the restart runs
            if not test.restartTest: base_cmd += " --chk_int 0 "

            base_cmd += "{} {}".format(suite.globalAddToExecString,
                                       test.runtime_params)

        if args.with_valgrind:
            base_cmd = "valgrind " + args.valgrind_options + " " + base_cmd

        if test.customRunCmd is not None:
            base_cmd = test.customRunCmd

        suite.run_test(test, base_cmd)

        # if it is a restart test, then rename the final output file and
        # restart the test
        if test.restartTest:
            skip_restart = False

            last_file = test.get_last_plotfile(output_dir=output_dir)

            if last_file == "":
                error_msg = "ERROR: test did not produce output.  Restart test not possible"
                skip_restart = True

            if len(test.find_backtrace()) > 0:
                error_msg = "ERROR: test produced backtraces.  Restart test not possible"
                skip_restart = True

            if skip_restart:
                # copy what we can
                test.wall_time = time.time() - test.wall_time
                shutil.copy("{}.run.out".format(test.name), suite.full_web_dir)
                if os.path.isfile("{}.err.out".format(test.name)):
                    shutil.copy("{}.err.out".format(test.name),
                                suite.full_web_dir)
                    test.has_stderr = True
                suite.copy_backtrace(test)
                report.report_single_test(suite,
                                          test,
                                          test_list,
                                          failure_msg=error_msg)
                continue
            orig_last_file = "orig_{}".format(last_file)
            shutil.move(last_file, orig_last_file)

            if test.diffDir:
                orig_diff_dir = "orig_{}".format(test.diffDir)
                shutil.move(test.diffDir, orig_diff_dir)

            # get the file number to restart from
            restart_file = "%s_chk%5.5d" % (test.name, test.restartFileNum)

            suite.log.log("restarting from {} ... ".format(restart_file))

            if suite.sourceTree == "C_Src" or test.testSrcTree == "C_Src":

                base_cmd = "./{} {} amr.plot_file={}_plt amr.check_file={}_chk amr.checkpoint_files_output=0 amr.restart={}".format(
                    executable, test.inputFile, test.name, test.name,
                    restart_file)

            elif suite.sourceTree == "F_Src" or test.testSrcTree == "F_Src":

                base_cmd = "./{} {} --plot_base_name {}_plt --check_base_name {}_chk --chk_int 0 --restart {} {}".format(
                    executable, test.inputFile, test.name, test.name,
                    test.restartFileNum, suite.globalAddToExecString)

            suite.run_test(test, base_cmd)

        test.wall_time = time.time() - test.wall_time

        #----------------------------------------------------------------------
        # do the comparison
        #----------------------------------------------------------------------
        if not test.selfTest:

            if test.outputFile == "":
                if test.compareFile == "":
                    compare_file = test.get_last_plotfile(
                        output_dir=output_dir)
                else:
                    # we specified the name of the file we want to
                    # compare to -- make sure it exists
                    compare_file = test.compareFile
                    if not os.path.isdir(compare_file):
                        compare_file = ""

                output_file = compare_file
            else:
                output_file = test.outputFile
                compare_file = test.name + '_' + output_file

            # get the number of levels for reporting
            prog = "{} -l {}".format(suite.tools["fboxinfo"], output_file)
            stdout0, stderr0, rc = test_util.run(prog)
            test.nlevels = stdout0.rstrip('\n')
            if not type(params.convert_type(test.nlevels)) is int:
                test.nlevels = ""

            if args.make_benchmarks is None:

                suite.log.log("doing the comparison...")
                suite.log.indent()
                suite.log.log("comparison file: {}".format(output_file))

                test.compare_file_used = output_file

                if not test.restartTest:
                    bench_file = bench_dir + compare_file
                else:
                    bench_file = orig_last_file

                # see if it exists
                # note, with AMReX, the plotfiles are actually directories

                if not os.path.isdir(bench_file):
                    suite.log.warn("no corresponding benchmark found")
                    bench_file = ""

                    with open("{}.compare.out".format(test.name), 'w') as cf:
                        cf.write("WARNING: no corresponding benchmark found\n")
                        cf.write("         unable to do a comparison\n")

                else:
                    if not compare_file == "":

                        suite.log.log("benchmark file: {}".format(bench_file))

                        command = "{} -n 0 {} {}".format(
                            suite.tools["fcompare"], bench_file, output_file)

                        sout, serr, ierr = test_util.run(
                            command,
                            outfile="{}.compare.out".format(test.name),
                            store_command=True)

                        if ierr == 0:
                            test.compare_successful = True

                        if test.compareParticles:
                            for ptype in test.particleTypes.strip().split():
                                command = "{} {} {} {}".format(
                                    suite.tools["particle_compare"],
                                    bench_file, output_file, ptype)

                                sout, serr, ierr = test_util.run(
                                    command,
                                    outfile="{}.compare.out".format(test.name),
                                    store_command=True)

                                test.compare_successful = test.compare_successful and not ierr

                    else:
                        suite.log.warn("unable to do a comparison")

                        with open("{}.compare.out".format(test.name),
                                  'w') as cf:
                            cf.write(
                                "WARNING: run did not produce any output\n")
                            cf.write("         unable to do a comparison\n")

                suite.log.outdent()

                if not test.diffDir == "":
                    if not test.restartTest:
                        diff_dir_bench = bench_dir + '/' + test.name + '_' + test.diffDir
                    else:
                        diff_dir_bench = orig_diff_dir

                    suite.log.log("doing the diff...")
                    suite.log.log("diff dir: {}".format(test.diffDir))

                    command = "diff {} -r {} {}".format(
                        test.diffOpts, diff_dir_bench, test.diffDir)

                    outfile = "{}.compare.out".format(test.name)
                    sout, serr, diff_status = test_util.run(command,
                                                            outfile=outfile,
                                                            store_command=True)

                    if diff_status == 0:
                        diff_successful = True
                        with open("{}.compare.out".format(test.name),
                                  'a') as cf:
                            cf.write("\ndiff was SUCCESSFUL\n")
                    else:
                        diff_successful = False

                    test.compare_successful = test.compare_successful and diff_successful

            else:  # make_benchmarks

                suite.log.log(
                    "storing output of {} as the new benchmark...".format(
                        test.name))
                suite.log.indent()
                suite.log.warn("new benchmark file: {}".format(compare_file))
                suite.log.outdent()

                if not compare_file == "":
                    if not output_file == compare_file:
                        source_file = output_file
                    else:
                        source_file = compare_file

                    try:
                        shutil.rmtree("{}/{}".format(bench_dir, compare_file))
                    except:
                        pass
                    shutil.copytree(source_file,
                                    "{}/{}".format(bench_dir, compare_file))

                    with open("{}.status".format(test.name), 'w') as cf:
                        cf.write("benchmarks updated.  New file:  {}\n".format(
                            compare_file))

                else:
                    with open("{}.status".format(test.name), 'w') as cf:
                        cf.write("benchmarks failed")

                    # copy what we can
                    shutil.copy("{}.run.out".format(test.name),
                                suite.full_web_dir)
                    if os.path.isfile("{}.err.out".format(test.name)):
                        shutil.copy("{}.err.out".format(test.name),
                                    suite.full_web_dir)
                        test.has_stderr = True
                    suite.copy_backtrace(test)
                    error_msg = "ERROR: runtime failure during benchmark creation"
                    report.report_single_test(suite,
                                              test,
                                              test_list,
                                              failure_msg=error_msg)

                if not test.diffDir == "":
                    diff_dir_bench = "{}/{}_{}".format(bench_dir, test.name,
                                                       test.diffDir)
                    if os.path.isdir(diff_dir_bench):
                        shutil.rmtree(diff_dir_bench)
                        shutil.copytree(test.diffDir, diff_dir_bench)
                    else:
                        if os.path.isdir(test.diffDir):
                            shutil.copytree(test.diffDir, diff_dir_bench)
                        else:
                            shutil.copy(test.diffDir, diff_dir_bench)
                    suite.log.log("new diffDir: {}_{}".format(
                        test.name, test.diffDir))

        else:  # selfTest

            if args.make_benchmarks is None:

                suite.log.log(
                    "looking for selfTest success string: {} ...".format(
                        test.stSuccessString))

                try:
                    of = open("{}.run.out".format(test.name), 'r')
                except IOError:
                    suite.log.warn("no output file found")
                    out_lines = ['']
                else:
                    out_lines = of.readlines()

                    # successful comparison is indicated by presence
                    # of success string
                    for line in out_lines:
                        if line.find(test.stSuccessString) >= 0:
                            test.compare_successful = True
                            break

                    of.close()

                with open("{}.compare.out".format(test.name), 'w') as cf:
                    if test.compare_successful:
                        cf.write("SELF TEST SUCCESSFUL\n")
                    else:
                        cf.write("SELF TEST FAILED\n")

        #----------------------------------------------------------------------
        # do any requested visualization (2- and 3-d only) and analysis
        #----------------------------------------------------------------------
        if not test.selfTest:
            if output_file != "":
                if args.make_benchmarks is None:

                    # get any parameters for the summary table
                    job_info_file = "{}/job_info".format(output_file)
                    if os.path.isfile(job_info_file):
                        test.has_jobinfo = 1

                    try:
                        jif = open(job_info_file, "r")
                    except:
                        suite.log.warn("unable to open the job_info file")
                    else:
                        job_file_lines = jif.readlines()
                        jif.close()

                        if suite.summary_job_info_field1 is not "":
                            for l in job_file_lines:
                                if l.startswith(suite.summary_job_info_field1.
                                                strip()) and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field1 = _tmp[idx:]
                                    break

                        if suite.summary_job_info_field2 is not "":
                            for l in job_file_lines:
                                if l.startswith(suite.summary_job_info_field2.
                                                strip()) and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field2 = _tmp[idx:]
                                    break

                        if suite.summary_job_info_field3 is not "":
                            for l in job_file_lines:
                                if l.startswith(suite.summary_job_info_field3.
                                                strip()) and l.find(":") >= 0:
                                    _tmp = l.split(":")[1]
                                    idx = _tmp.rfind("/") + 1
                                    test.job_info_field3 = _tmp[idx:]
                                    break

                    # visualization
                    if test.doVis:

                        if test.dim == 1:
                            suite.log.log(
                                "Visualization not supported for dim = {}".
                                format(test.dim))
                        else:
                            suite.log.log("doing the visualization...")
                            tool = suite.tools["fsnapshot{}d".format(test.dim)]
                            test_util.run(
                                '{} --palette {}/Palette -cname "{}" -p "{}"'.
                                format(tool, suite.f_compare_tool_dir,
                                       test.visVar, output_file))

                            # convert the .ppm files into .png files
                            ppm_file = test_util.get_recent_filename(
                                output_dir, "", ".ppm")
                            if not ppm_file is None:
                                png_file = ppm_file.replace(".ppm", ".png")
                                test_util.run("convert {} {}".format(
                                    ppm_file, png_file))
                                test.png_file = png_file

                    # analysis
                    if not test.analysisRoutine == "":

                        suite.log.log("doing the analysis...")
                        if not test.extra_build_dir == "":
                            tool = "{}/{}".format(
                                suite.repos[test.extra_build_dir].dir,
                                test.analysisRoutine)
                        else:
                            tool = "{}/{}".format(suite.source_dir,
                                                  test.analysisRoutine)

                        shutil.copy(tool, os.getcwd())

                        if test.analysisMainArgs == "":
                            option = ""
                        else:
                            option = eval("suite.{}".format(
                                test.analysisMainArgs))

                        cmd_name = os.path.basename(test.analysisRoutine)
                        cmd_string = "./{} {} {}".format(
                            cmd_name, option, output_file)
                        outfile = "{}.analysis.out".format(test.name)
                        _, _, rc = test_util.run(cmd_string,
                                                 outfile=outfile,
                                                 store_command=True)

                        if rc == 0:
                            analysis_successful = True
                        else:
                            analysis_successful = False
                            suite.log.warn("analysis failed...")

                        test.compare_successful = test.compare_successful and analysis_successful

            else:
                if test.doVis or test.analysisRoutine != "":
                    suite.log.warn("no output file.  Skipping visualization")

        #----------------------------------------------------------------------
        # move the output files into the web directory
        #----------------------------------------------------------------------
        if args.make_benchmarks is None:
            shutil.copy("{}.run.out".format(test.name), suite.full_web_dir)
            if os.path.isfile("{}.err.out".format(test.name)):
                shutil.copy("{}.err.out".format(test.name), suite.full_web_dir)
                test.has_stderr = True
            shutil.copy("{}.compare.out".format(test.name), suite.full_web_dir)
            try:
                shutil.copy("{}.analysis.out".format(test.name),
                            suite.full_web_dir)
            except:
                pass

            shutil.copy(
                test.inputFile, "{}/{}.{}".format(suite.full_web_dir,
                                                  test.name, test.inputFile))

            if test.has_jobinfo:
                shutil.copy(
                    job_info_file,
                    "{}/{}.job_info".format(suite.full_web_dir, test.name))

            if suite.sourceTree == "C_Src" and test.probinFile != "":
                shutil.copy(
                    test.probinFile,
                    "{}/{}.{}".format(suite.full_web_dir, test.name,
                                      test.probinFile))

            for af in test.auxFiles:

                # strip out any sub-directory under build dir for the aux file
                # when copying
                shutil.copy(
                    os.path.basename(af),
                    "{}/{}.{}".format(suite.full_web_dir, test.name,
                                      os.path.basename(af)))

            if not test.png_file is None:
                try:
                    shutil.copy(test.png_file, suite.full_web_dir)
                except IOError:
                    # visualization was not successful.  Reset image
                    test.png_file = None

            if not test.analysisRoutine == "":
                try:
                    shutil.copy(test.analysisOutputImage, suite.full_web_dir)
                except IOError:
                    # analysis was not successful.  Reset the output image
                    test.analysisOutputImage = ""

            # were any Backtrace files output (indicating a crash)
            suite.copy_backtrace(test)

        else:
            shutil.copy("{}.status".format(test.name), suite.full_web_dir)

        #----------------------------------------------------------------------
        # archive (or delete) the output
        #----------------------------------------------------------------------
        suite.log.log("archiving the output...")
        for pfile in os.listdir(output_dir):
            if (os.path.isdir(pfile)
                    and (pfile.startswith("{}_plt".format(test.name))
                         or pfile.startswith("{}_chk".format(test.name)))):

                if suite.purge_output == 1 and not pfile == output_file:
                    # delete the plt/chk file
                    if os.path.isdir(pfile):
                        try:
                            shutil.rmtree(pfile)
                        except:
                            suite.log.warn("unable to remove {}".format(pfile))

                else:
                    # tar it up
                    try:
                        tar = tarfile.open("{}.tgz".format(pfile), "w:gz")
                        tar.add("{}".format(pfile))
                        tar.close()

                    except:
                        suite.log.warn(
                            "unable to tar output file {}".format(pfile))

                    else:
                        try:
                            shutil.rmtree(pfile)
                        except OSError:
                            suite.log.warn("unable to remove {}".format(pfile))

        #----------------------------------------------------------------------
        # write the report for this test
        #----------------------------------------------------------------------
        if args.make_benchmarks is None:
            suite.log.log("creating problem test report ...")
            report.report_single_test(suite, test, test_list)

    #--------------------------------------------------------------------------
    # Clean Cmake build and install directories if needed
    #--------------------------------------------------------------------------
    if (suite.useCmake):
        suite.cmake_clean("AMReX", suite.amrex_dir)
        suite.cmake_clean(suite.suiteName, suite.source_dir)

    #--------------------------------------------------------------------------
    # write the report for this instance of the test suite
    #--------------------------------------------------------------------------
    suite.log.outdent()
    suite.log.skip()
    suite.log.bold("creating new test report...")
    num_failed = report.report_this_test_run(suite, args.make_benchmarks,
                                             args.note, update_time, test_list,
                                             args.input_file[0])

    # make sure that all of the files in the web directory are world readable
    for file in os.listdir(suite.full_web_dir):
        current_file = suite.full_web_dir + file

        if os.path.isfile(current_file):
            os.chmod(current_file, 0o644)

    # reset the branch to what it was originally
    suite.log.skip()
    suite.log.bold("reverting git branches/hashes")
    suite.log.indent()

    for k in suite.repos:
        if suite.repos[k].update or suite.repos[k].hash_wanted:
            suite.repos[k].git_back()

    suite.log.outdent()

    # For temporary run, return now without creating suote report.
    if args.do_temp_run:
        return num_failed

    # store an output file in the web directory that can be parsed easily by
    # external program
    name = "source"
    if suite.sourceTree in ["AMReX", "amrex"]: name = "AMReX"
    branch = ''
    if suite.repos[name].branch_wanted:
        branch = suite.repos[name].branch_wanted.strip("\"")

    with open("{}/suite.{}.status".format(suite.webTopDir, branch), "w") as f:
        f.write("{}; num failed: {}; source hash: {}".format(
            suite.repos[name].name, num_failed,
            suite.repos[name].hash_current))

    #--------------------------------------------------------------------------
    # generate the master report for all test instances
    #--------------------------------------------------------------------------
    suite.log.skip()
    suite.log.bold("creating suite report...")
    report.report_all_runs(suite, active_test_list)

    def email_developers():
        msg = email.message_from_string(suite.emailBody)
        msg['From'] = suite.emailFrom
        msg['To'] = ",".join(suite.emailTo)
        msg['Subject'] = suite.emailSubject

        server = smtplib.SMTP('localhost')
        server.sendmail(suite.emailFrom, suite.emailTo, msg.as_string())
        server.quit()

    if num_failed > 0 and suite.sendEmailWhenFail and not args.send_no_email:
        suite.log.skip()
        suite.log.bold("sending email...")
        email_developers()

    if suite.slack_post:
        suite.slack_post_it("> test complete, num failed = {}\n{}".format(
            num_failed, suite.emailBody))

    return num_failed
예제 #5
0
def quench_single_inverse_power(coord_file_name, foldpath, sub_fold_name,
                                optimizer, opt_param_dict):
    """
    figures out the minimum correspoding to a set of particle coords
    Parameters
    ----------
    coord_file_name: string
        name of the path to the coordinates
    foldername: str
        folder definining the run
    sub_fold_name:
        name of subfolder where the run data is stored
    optimizer: optimizer
        quench
    opt_param_dict: dict
        dictionary of parameters for the optimizer
    """

    sysparams = load_params(foldpath)

    # path to quench coords
    quench_coords_path = (foldpath + "/" + sub_fold_name + "/" + "ensemble/" +
                          coord_file_name)
    quench_coords = np.loadtxt(quench_coords_path)
    radii = get_hs_radii(foldpath, sub_fold_name)
    box_length = get_box_length(radii, sysparams.ndim.value,
                                sysparams.phi.value)

    boxv = [box_length] * sysparams.ndim.value
    ncellx_scale = get_ncellsx_scale(radii, boxv)

    potential = InversePower(
        sysparams.power.value,
        sysparams.eps.value,
        use_cell_lists=False,
        ndim=sysparams.ndim.value,
        radii=radii * 1.0,
        boxvec=boxv,
    )

    boxv = [box_length] * sysparams.ndim.value
    # ncellx_scale = get_ncellsx_scale(radii, boxv)

    print(potential.getEnergy(quench_coords))
    ret = optimizer(quench_coords, potential, **opt_param_dict)
    try:
        ret = optimizer(quench_coords, potential, **opt_param_dict)
    except:
        print("exception occured")
        # if exception occurs, treat as failure. this is for rattlers
        # not enough failures occur that it would make a difference to not just assume this never happens
        # but we shoudl switch this out
        # but jic
        return (quench_coords, False, 0, 0, 0, 0, 0)

    # This exists because some runs don't have hessian evaluations
    try:
        ret["nhev"]
    except:
        ret["nhev"] = 0

    # mixed optimizer statistics
    try:
        ret["n_phase_1"]
    except:
        ret["n_phase_1"] = 0
    # mixed optimizer statistics
    try:
        ret["n_phase_2"]
    except:
        ret["n_phase_2"] = 0

    print(ret.coords - quench_coords)
    print(opt_param_dict)
    results = (
        ret.coords,
        ret.success,
        ret.nfev,
        ret.nsteps,
        ret.nhev,
        ret.n_phase_1,
        ret.n_phase_2,
    )
    print(quench_coords_path)
    return results
예제 #6
0
def googlenet_train(train_batch_size=32, val_batch_size=50, image_size=(3, 224, 224), n_epochs=60):

    #mean_path = '/home/2T/caffe/data/ilsvrc12/imagenet_mean.binaryproto'
    train_lmdb_path = '/home/2T/imagenet/ilsvrc2012/lmdb/ilsvrc12_train_lmdb'
    val_lmdb_path = '/home/2T/imagenet/ilsvrc2012/lmdb/ilsvrc12_val_lmdb'

    train_input_shape = (train_batch_size,) + image_size
    val_input_shape = (val_batch_size,) + image_size
    trainning_model = googlenet(train_input_shape)
    validating_model = googlenet(val_input_shape)

    #####read lmdb
    train_lmdb_iterator = read_lmdb(train_batch_size, train_lmdb_path)
    train_data_size = train_lmdb_iterator.total_number
    n_train_batches = train_data_size / train_batch_size
    print('n_train_batches = '+ str(n_train_batches))

    val_lmdb_iterator = read_lmdb(val_batch_size, val_lmdb_path)
    val_data_size = val_lmdb_iterator.total_number
    n_val_batches = val_data_size / val_batch_size
    print('n_val_batches = '+ str(n_val_batches))
    
    ## COMPILE FUNCTIONS ##
    (train_model, train_error,
        train_shared_x, train_shared_y, shared_lr) = compile_train_model(trainning_model, batch_size=train_batch_size)

    (val_model, val_shared_x, val_shared_y) = compile_val_model(validating_model, batch_size=val_batch_size)
    
    all_costs = []
    all_errors = []

    ####load net state
    net_params = load_net_state()
    if net_params:
        load_params(model.params, net_params['model_params'])
        train_lmdb_iterator.set_cursor(net_params['minibatch_index'])
        all_errors = net_params['all_errors']
        all_costs = net_params['all_costs']
        epoch = net_params['epoch']
        minibatch_index = net_params['minibatch_index']
    else:
        all_costs = []
        all_errors = []
        epoch = 0
        minibatch_index = 0

    print('... training')
    while(epoch < n_epochs):

        while(minibatch_index < n_train_batches):
            ####training
            #print(minibatch_index)
            iter = epoch * n_train_batches + minibatch_index
            print('training @ epoch = %d : iter = %d : totoal_batches = %d' %(epoch, iter, n_train_batches))
            begin_time = time.time()
            train_data, train_label = train_lmdb_iterator.next()
            train_shared_x.set_value(train_data)
            train_shared_y.set_value(train_label)
            set_learning_rate(shared_lr, iter)

            #begin_time = time.time()
            cost_ij = train_model()
            error_ij = train_error()
            all_costs.append(cost_ij)
            all_errors.append(error_ij)
            print('train_error: %f %%' %(error_ij*100))
            print('trian_cost: %f' %(cost_ij))
            end_time = time.time()
            print('Time per iteration: %f' % (end_time - begin_time))
            if math.isnan(cost_ij):
                nan_params = get_params(model.params)
                common_save(nan_params, './nan_params')
                sys.exit(0)

            ###validation		 
            if (iter+1) % (4*n_train_batches) == 0:
                validation_erorrs = []
                validating_model.set_dropout_off()

                for validation_index in xrange(0, n_val_batches):
                    #print('validation_index = %d : total_batches = %d' %(validation_index, n_val_batches))
                    val_data, val_label = val_lmdb_iterator.next()
                    val_shared_x.set_value(val_data)
                    val_shared_y.set_value(val_label)
                    cost, errors, errors_top_5 = val_model()
                    validation_erorrs.append(errors_top_5)
                validating_model.set_dropout_on()
                this_validation_error = np.mean(validation_erorrs)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_error * 100.))

            ###save params every epoch
            if (iter+1) % n_train_batches == 0:
                net_params['model_params'] = get_params(model.params)
                net_params['minibatch_index'] = minibatch_index
                net_params['all_costs'] = all_costs
                net_params['all_errors'] = all_errors
                net_params['epoch'] = epoch
                save_net_state(net_params)
                save_figure(all_costs, all_errors)
                
            minibatch_index += 1

        if minibatch_index == n_train_batches:
            minibatch_index = 0
        epoch = epoch + 1
예제 #7
0
from params import load_params, load_secondary_params
from pele.potentials import InversePower

# change according to which minima you want to compare
m1_arg = 148
m2_arg = 159
m3_arg = 194
m4_arg = 195

foldnameInversePower = "ndim=2phi=0.9seed=0n_part=16r1=1.0r2=1.4rstd1=0.05rstd2=0.06999999999999999use_cell_lists=0power=2.5eps=1.0"
minima_database_path = (BASE_DIRECTORY + "/" + foldnameInversePower + "/" +
                        MINIMA_DATABASE_NAME)
th = np.load(minima_database_path)

foldpath = BASE_DIRECTORY + "/" + foldnameInversePower
sysparams = load_params(foldpath)
(hs_radii, initial_coords, box_length) = load_secondary_params(foldpath)
ctol = 1e-3
ndim = 2
potential = InversePower(
    sysparams.power.value,
    sysparams.eps.value,
    use_cell_lists=False,
    ndim=sysparams.ndim.value,
    radii=hs_radii * 1.0,
    boxvec=[box_length, box_length],
)
minima_container = CheckSameMinimum(
    ctol,
    ndim,
    boxl=box_length,
apply_mask_l2 = theano.function(
    inputs=[],
    updates={model.models_stack[2].w : model.models_stack[2].w * mask_l2_theano}
)

apply_mask = [apply_mask_l0, apply_mask_l1, apply_mask_l2]
# deeper layers are fully connected. 

# pdb.set_trace()
"""
print "Done."

#############
# PRE-TRAIN #
#############
"""
for i in range(len(hid_layer_sizes)):
    print "\n\nPre-training layer %d:" % i
    trainer = GraddescentMinibatch(
        varin=model.varin, data=train_x, 
        cost=model.models_stack[i].cost(),
        params=model.models_stack[i].params_private,
        supervised=False,
        batchsize=batchsize, learningrate=pretrain_lr, momentum=momentum,
        rng=npy_rng
    )

    init_lr = trainer.learningrate
    prev_cost = numpy.inf
    epc_cost = 0.
    patience = 0
def map_binary_inversepower(
    foldername,
    particle_coords,
    optimizer,
    parameter_dict,
    random_coord_0=0,
    random_coord_1=-1,
    z=0,
):
    """
    Finds whether a point defined by particle_coord
    on the meshgrid correspond to a minimum or not for a 2d
    case.
    """
    foldpath = BASE_DIRECTORY + "/" + foldername
    # import params
    sysparams = load_params(foldpath)
    (hs_radii, initial_coords, box_length) = load_secondary_params(foldpath)
    assert sysparams.ndim.value == 2
    minimum_coords = np.loadtxt(foldpath + "/coords_of_minimum.txt", delimiter=",")
    quench_coords = initial_coords.copy()

    if len(quench_coords) == 16:
        quench_coords = (
            quench_coords
            + particle_coords[0] * VEC_8_0
            + particle_coords[1] * VEC_8_1
            + z * VEC_8_2
        )
    elif len(quench_coords) == 32:
        quench_coords = (
            quench_coords
            + particle_coords[0] * VEC_16_0
            + particle_coords[1] * VEC_16_1
            + z * VEC_16_2
        )
    elif len(quench_coords) == 64:
        quench_coords = (
            quench_coords
            + particle_coords[0] * VEC_16_0
            + particle_coords[1] * VEC_16_1
            + z * VEC_16_2
        )
    else:
        raise Exception("error other random coords have not been generated")

    print(quench_coords, "quench")
    # print(quench_coords, 'quench coords')
    # box length
    box_length = float(box_length)
    boxv = [box_length] * sysparams.ndim.value
    ncellx_scale = get_ncellsx_scale(hs_radii, boxv)
    potential = InversePower(
        sysparams.power.value,
        sysparams.eps.value,
        use_cell_lists=False,
        ndim=sysparams.ndim.value,
        radii=hs_radii * 1.0,
        boxvec=boxv,
    )

    # ret = quench_mixed_optimizer(potential,
    #                              quench_coords,  # make sure right coords are being passed
    #                              T=10,
    #                              step=1,
    #                              nsteps=100000,
    #                              conv_tol=1e-8,
    #                              tol=1e-6, rtol=1e-4, atol=1e-4)
    # ret = quench_steepest(
    #     potential,
    #     quench_coords,  # make sure right coords are being passed
    #     nsteps=2000000,
    #     stepsize=5e-3,  # for steepest descent step size should be small
    #     tol=1e-4)
    # ret = quench_cvode_opt(potential, quench_coords, tol=1e-6, rtol=1e-4, atol=1e-4)
    try:
        ret = optimizer(quench_coords, potential, **parameter_dict)
    except:
        print(quench_coords, "failed here")
        print(initial_coords, "coords")
        print(len(quench_coords))
        raise Exception("failure")

    # ret = lbfgs_cpp(quench_coords, potential, tol=1e-8, M=1)

    # This exists because some runs don't have hessian evaluations
    try:
        ret["nhev"]
    except:
        ret["nhev"] = 0
    coordarg = 0
    results = (ret.coords, ret.success, coordarg, ret.nfev, ret.nsteps, ret.nhev)
    # the reason the potential is being passed is because quench coords needs the potential to figure out what to do
    return results
def map_pointset_loop_xy(
    foldname,
    pointset,
    optimizer,
    parameter_dict,
    ctol=1e-2,
    ndim=2,
    use_minima_database=True,
    minima_database_path=None,
    coord_arg_0=0,
    coord_arg_1=1,
    z=0,
):
    """ Checks a bunch of points if they match to a minimum by using a for loop
    """
    is_same_minimum_list = []
    resultlist = []
    foldpath = BASE_DIRECTORY + "/" + foldname

    sysparams = load_params(foldpath)
    (hs_radii, initial_coords, box_length) = load_secondary_params(foldpath)
    minimum_coords = np.loadtxt(foldpath + "/coords_of_minimum.txt", delimiter=",")

    # Initialize CheckSameMinimum
    potential = InversePower(
        sysparams.power.value,
        sysparams.eps.value,
        use_cell_lists=False,
        ndim=sysparams.ndim.value,
        radii=hs_radii * 1.0,
        boxvec=[box_length, box_length],
    )
    minima_container = CheckSameMinimum(
        ctol,
        ndim,
        boxl=box_length,
        minimalist_max_len=200000,
        minima_database_location=minima_database_path,
        update_database=True,
        rattler_check=True,
        potential=potential,
        hs_radii=hs_radii,
    )

    if use_minima_database == True:
        try:
            minima_container.minimalist = [
                minima_container.box_reshape_coords(x)
                for x in np.load(minima_database_path)
            ]
        except:
            print("warning no minima data found. generating")
            minima_container.minimalist = [
                # minima_container.box_reshape_coords(minimum_coords)
            ]
    nfevlist = []
    nstepslist = []
    nhevlist = []
    for index, point in enumerate(pointset):
        res = map_binary_inversepower(
            foldname,
            point,
            optimizer,
            parameter_dict,
            random_coord_0=coord_arg_0,
            random_coord_1=coord_arg_1,
            z=z,
        )
        minima_container.add_minimum(res[0], point, res[2])
        # print(index)
        # print(minima_container.nrattlermin, 'nrattlermin')
        # print(minima_container.nfluidstates, 'nfluidstates')
        nfevlist.append(res[3])
        nstepslist.append(res[4])
        nhevlist.append(res[5])

    # print(np.average(nfevlist), 'number of function evaluations')
    # print(np.average(nstepslist), 'number of steps')
    # print(np.average(nstepslist), 'number of steps')
    # print(np.average(nhevlist), "number of hessian evaluations")

    # print(minima_container.orderparamlist)

    foldpathdata = foldpath + "/" + QUENCH_FOLDER_NAME + "/z_data_30_l6/" + str(z)
    os.makedirs(foldpathdata, exist_ok=True)
    minima_container.dump_map(foldpathdata)

    run_diagnostics = {}
    run_diagnostics["nfev"] = float(np.average(nfevlist))
    run_diagnostics["nhev"] = float(np.average(nhevlist))
    run_diagnostics["nsteps"] = float(np.average(nstepslist))

    # print(minima_container.initial_coords_list)
    # print(minima_container.orderparamlist)
    # print(minima_container.orderparamlist)
    return run_diagnostics, is_same_minimum_list, resultlist
            coord_arg_0=coord_arg_0,
            coord_arg_1=coord_arg_1,
            z=z,
        )

    # print(res)
    # # boollist = np.loadtxt(BASE_DIRECTORY + '/' + foldnameInversePower + '/' + 'quench_results_fire.txt')
    # np.savetxt(BASE_DIRECTORY + '/' + foldnameInversePower + '/' + 'quench_results_mxopt.txt', boollist)
    # boollistreshaped = np.reshape(boollist, (nmesh, nmesh))
    # print(boollistreshaped)
    # print(boollist)
    # plt.imshow(boollistreshaped)
    # plt.show()
    # # print(reslist)

    sysparams = load_params(data_location)
    # save all parameters from run
    run_diagnostics = res[0]
    run_diagnostics["identification tolerance"] = identification_tolerance
    run_diagnostics["nmesh"] = nmesh
    run_diagnostics["ndim"] = sysparams.ndim.value
    run_diagnostics["nparticles"] = sysparams.n_part.value
    run_diagnostics["run data location"] = data_location + "/" + QUENCH_FOLDER_NAME

    opt_name = parameter_dict["name"].replace(" ", "_")
    # write run data to two different locations
    write_run_data_to_file(
        parameter_dict,
        run_diagnostics,
        folder_location=data_location,
        name=opt_name + ".yaml",
예제 #12
0
			map1.shelter_policy({"school":0.00,"shop":0.05,"entertainment":0.0,"home":1,"hospital":0.00})		
		'''
		print('\n'+"Day "+str(map1.date)+" statistics:")
		if len(stats)==0:
			stats=simulate_one_day(map1)
		else:
			stats= np.vstack((stats,simulate_one_day(map1)))
		
		if map1.date>=10:
			map1.quarantine_policy({"school":0.0,"shop":0.0,"entertainment":0.0,"home":0.0,"hospital":0},detect_prob=0.15)
		
	print("R0 values",get_R0(stats))
	end_time=time.time()
	print("Simulation time:",end_time-start_time)
	plot_stats(stats)
	print("peak value",(np.argmax(stats[:,0]),np.max(stats[:,0])))
if __name__ == "__main__":
	Jkantor_param=load_params()
	print("parameters",Jkantor_param)
	simulate()
	'''
	map1.print_people()
	'''







           model_ft.models_stack[-1].params)
)
def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate([numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result
p, g, numlinesearches = minimize(
    get_params(model_ft.models_stack[-1]), return_cost, return_grad,
    (train_x.get_value(), train_y.get_value()), logreg_epc, verbose=False
)
set_params(model_ft.models_stack[-1], p)
save_params(model_ft, 'ZLIN_4000_1000_4000_10_normhid_nolinb_cae1_dtagmt2_dropout.npy')

load_params(model_ft, 'ZLIN_4000_1000_4000_10_normhid_nolinb_cae1_dtagmt2_dropout.npy')
print "***error rate: train: %f, test: %f" % (
    train_set_error_rate(), test_set_error_rate()
)

#############
# FINE-TUNE #
#############

"""
print "\n\n... fine-tuning the whole network"
truth = T.lmatrix('truth')
trainer = GraddescentMinibatch(
    varin=model_ft.varin, data=train_x, 
    truth=model_ft.models_stack[-1].vartruth, truth_data=train_y,
    supervised=True,
예제 #14
0
def quench_single_mxopt_inverse_power_julia(
    coord_file_name, foldpath, sub_fold_name, optimizer, opt_param_dict
):
    """
    quenches a single system through mxopt
    Parameters
    ----------
    coord_file_name: string
        name of the path to the coordinates
    foldername: str
        folder definining the run
    sub_fold_name:
        name of subfolder where the run data is stored
    optimizer: optimizer
        quench
    opt_param_dict: dict
        dictionary of parameters for the optimizer
    """

    sysparams = load_params(foldpath)

    # path to quench coords
    quench_coords_path = (
        foldpath + "/" + sub_fold_name + "/" + "ensemble/" + coord_file_name
    )
    quench_coords = np.loadtxt(quench_coords_path)
    radii = get_hs_radii(foldpath, sub_fold_name)

    box_length = get_box_length(radii, sysparams.ndim.value, sysparams.phi.value)
    box_length

    boxv = np.array([box_length] * sysparams.ndim.value)

    ncellx_scale = get_ncellsx_scale(radii, boxv)

    # potential = InversePower(sysparams.power.value,
    #                          sysparams.eps.value,
    #                          use_cell_lists=False,
    #                          ndim=sysparams.ndim.value,
    #                          radii=radii * 1.0,
    #                          boxvec=boxv)
    mxd = Main.Mixed_Descent(
        pele_wrapped_python_pot,
        Main.solver,
        Main.nls,
        coords,
        opt_param_dict["T"],
        opt_param_dict["rtol"],
        opt_param_dict["conv_tol"],
        opt_param_dict["tol"],
    )

    pot = Main.pot.InversePower(
        sysparams.power.value,
        sysparams.eps.value,
        radii,
        ndim=sysparams.ndim.value,
        boxvec=boxv,
        use_cell_lists=False,
        ncellx_scale=cell_scale,
    )
    ppot = Main.PythonPotential(pot)

    try:
        Main.run_b(mxd, 10000)
    except:
        print("exception occured")
        # if exception occurs, treat as failure. this is for rattlers
        # not enough failures occur that it would make a difference to not just assume this never happens
        # but we shoudl switch this out
        # but jic
        return (quench_coords, False, 0, 0, 0, 0, 0)

    results = (mxd.optimizer.x0, mxd.converged, 0, mxd.iter_number, 0, 0, 0)
    return results
예제 #15
0
파일: reg_test_gc.py 프로젝트: zliu72/amrex
def reg_test_gc(argv):
    usage = """
    ./reg_test_gc [--before|-b 2000-00-00]
       testfile.ini
    """

    if len(sys.argv) == 1:
        print usage
        sys.exit(2)

    try:
        opts, next = getopt.getopt(argv[1:], "b:", ["before="])

    except getopt.GetoptError:
        print "invalid calling sequence"
        print usage
        sys.exit(2)

    # defaults
    gcdate = ""

    for o, a in opts:
        if o == "--before" or o == "-b":
            gcdate = a

    try:
        testFile = next[0]

    except IndexError:
        print "ERROR: a test file was not specified"
        print usage
        sys.exit(2)

    if not gcdate:
        print "ERROR: date was not specified"
        print usage
        sys.exit(2)

    gcd = valid_date(gcdate)
    if gcd == '':
        print "ERROR: invalid date", gcdate
        print usage
        sys.exit(2)

    workdir = os.getcwd()

    print "loading ", testFile

    args = test_util.get_args([testFile])

    suite, testList = params.load_params(args)
    activeTestList = [t.name for t in testList]

    benchmarkTestList = [
        t for t in testList if not (t.compileTest or t.restartTest)
    ]
    benchmarkNotFound = {}
    for t in benchmarkTestList:
        benchmarkNotFound[t.name] = ''

    ### clean up the web dir
    print "\ncleaning ", suite.webTopDir

    os.chdir(suite.webTopDir)
    validDirs = []
    for d in os.listdir(suite.webTopDir):
        if (d.startswith("20") and os.path.isdir(d)):
            statusFile = d + '/' + d + '.status'
            if (os.path.isfile(statusFile)):
                validDirs.append(d)
    validDirs.sort()
    validDirs.reverse()

    latestBMDate = {}

    for d in validDirs:
        bmtests = benchmarkNotFound.keys()
        if d >= gcd and bmtests:
            if isBenchmarkDir(d):
                for t in bmtests:
                    if findBenchmark(d, t):
                        del benchmarkNotFound[t]
                        latestBMDate[t] = d
        else:
            if isBenchmarkDir(d) and bmtests:
                found = False
                for t in bmtests:
                    if findBenchmark(d, t):
                        found = True
                        del benchmarkNotFound[t]
                        latestBMDate[t] = d
                if not found:
                    rmDir(d)
            else:
                rmDir(d)

    ### clean up the test dir
    testDirs = os.path.join(suite.testTopDir, suite.suiteName + "-tests")
    print "\ncleaning ", testDirs

    os.chdir(testDirs)
    validDirs = []
    for d in os.listdir(testDirs):
        if (d.startswith("20") and os.path.isdir(d)):
            validDirs.append(d)
    validDirs.sort()
    validDirs.reverse()

    for d in validDirs:
        if d < gcd:
            tests = [
                t for t in os.listdir(d) if os.path.isdir(os.path.join(d, t))
            ]
            found = False
            for t in tests:
                if t in latestBMDate.keys() and latestBMDate[t] == d:
                    found = True
                    break
            if not found:
                rmDir(d)

    print "\ncreating suite report..."
    report.report_all_runs(suite, activeTestList)

    print "\nGarbage cleaning finished."
예제 #16
0
def train(train_data, dicts, save_to, save_frequency, valid_data, valid_frequency, patience,
          encoder, decoder, params_dtype, dim_emb, dim_rnn, n_words_source, n_words_target, maxlen,
          decay_c, alpha_c, clip_c, dropout, l_rate, epochs, batch_size, optimizer, devices, characters,
          resume_training, log_file, display_frequency):
    """
    Trains a Neural Machine Translation model with the specified parameters.
    Provides asynchronous optimization algorithms, see option --optimizer.

    Training (and validation) data must provide tokenization which can be recovered
    with `str.split`. Not necessary for character based models.
    """

    if log_file:
        f_handler = logging.FileHandler(log_file)
        f_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s %(module)s - %(message)s",
                                                 datefmt="%Y-%m-%d %H:%M:%S"))
        logging.getLogger().addHandler(f_handler)
        logging.info("logging to {}".format(log_file))

    logging.info("loading dictionaries from {}, {}".format(*dicts))
    with open(dicts[0], 'r') as f1, open(dicts[1], 'r') as f2:
        dictionaries = [json.load(f1), json.load(f2)]

    logging.info("preparing training data streams from {}, {}".format(*train_data))
    train_data_iter = TextIterator(train_data[0], train_data[1], dictionaries[0], dictionaries[1],
                                   n_words_source=n_words_source, n_words_target=n_words_target,
                                   batch_size=batch_size, maxlen=maxlen, raw_characters=characters)
    valid_data_iter = None
    if valid_data:
        logging.info("preparing validation data streams from {}, {}".format(*valid_data))
        valid_data_iter = TextIterator(valid_data[0], valid_data[1], dictionaries[0], dictionaries[1],
                                       n_words_source=n_words_source, n_words_target=n_words_target,
                                       batch_size=batch_size, maxlen=maxlen, raw_characters=characters)

    logging.info("initializing weights")
    if resume_training:
        params = load_params(resume_training[0])
        # check if model options saved in json format match the current settings
        with open(resume_training[1], "r") as f:
            resume_options = json.load(f)
        if not all([encoder == resume_options["encoder"],
                    decoder == resume_options["decoder"],
                    dim_emb == resume_options["dim_emb"],
                    dim_rnn == resume_options["dim_rnn"],
                    n_words_source == resume_options["n_words_source"],
                    n_words_target == resume_options["n_words_target"],
                    maxlen == resume_options["maxlen"],
                    decay_c == resume_options["decay_c"],
                    alpha_c == resume_options["alpha_c"],
                    dropout == resume_options["dropout"],
                    characters == resume_options["characters"]]):
            raise ValueError("Option mismatch!")
    else:
        params = init_params(n_words_source, n_words_target, dim_emb, dim_rnn, dtype=params_dtype)

    if optimizer in ["hogwild", "async_agrad", "async_da"]:
        logging.info("selected parallelizable optimizing algorithm {}, handing over to async-train".format(optimizer))
        # saving, validation and logging is taken care of by async_train.train_params # TODO: set noise to 0 if used
        trained_params = train_params(params, build_model, data=train_data_iter,
                                      devices=devices.split(","), update_scheme=optimizer,
                                      num_epochs=epochs, l_rate=l_rate, log_level=30, log_file=log_file,
                                      valid_data=valid_data_iter, valid_freq=valid_frequency, patience=patience,
                                      save_to=save_to, save_freq=save_frequency,
                                      dim_emb=dim_emb, dim_rnn=dim_rnn, encoder=encoder, decoder=decoder,
                                      n_words_target=n_words_target, n_words_source=n_words_source, maxlen=maxlen,
                                      params_dtype=params_dtype, dropout=dropout,
                                      decay_c=decay_c, alpha_c=alpha_c, clip_c=clip_c,
                                      display_freq=display_frequency, characters=characters)

    elif optimizer in ["sgd", "adagrad", "adadelta", "adam", "rmsprop"]:
        logging.info("selected sequential optimizer {}".format(optimizer))

        dir_path = os.path.dirname(save_to)
        if dir_path and not os.path.exists(dir_path):
            os.makedirs(dir_path)
        save_file = save_params(params, save_to, epoch_update=(0, 0))
        logging.info("update {}, saving current model parameters to {}".format(0, save_file))
        train_options_file = os.path.splitext(save_to)[0] + ".json"
        logging.info("saving training options to {}".format(train_options_file))
        train_options = {"devices": devices,
                         "optimizer": optimizer,
                         "l_rate": l_rate,
                         "save_to": save_to,
                         "patience": patience,
                         "dim_emb": dim_emb,
                         "dim_rnn": dim_rnn,
                         "encoder": encoder,
                         "decoder": decoder,
                         "dropout": dropout,
                         "n_words_target": n_words_target,
                         "n_words_source": n_words_source,
                         "maxlen": maxlen,
                         "decay_c": decay_c,
                         "alpha_c": alpha_c,
                         "clip_c": clip_c,
                         "characters": characters}
        with open(train_options_file, "w") as f:
            json.dump(train_options, f, indent=4)

        # get callable optimizer function
        import seq_optimizers
        opt_func = getattr(seq_optimizers, optimizer)
        import theano
        import theano.tensor as T
        import theano.sandbox.cuda
        theano.sandbox.cuda.use(devices.split(",")[0])
        tparams = OrderedDict()
        for param_name, param in params.items():
            tparams[param_name] = theano.shared(param)

        def pull_from_tparams(param_dict):
            params = OrderedDict()
            for param_name, param in param_dict.items():
                params[param_name] = param.value()
            return params

        logging.info("building model")
        inputs, cost, _ = build_model(tparams, dim_emb=dim_emb, encoder=encoder, decoder=decoder,
                                      dropout=dropout, n_words_target=n_words_target,
                                      decay_c=decay_c, alpha_c=alpha_c)
        grads = T.grad(cost, wrt=list(tparams.values()))

        if clip_c > 0.:
            grads_squard_sum = 0.
            for g in grads:
                grads_squard_sum += (g**2).sum()
            grads = [T.switch(grads_squard_sum > (clip_c**2),
                              g / T.sqrt(grads_squard_sum) * clip_c,
                              g)
                     for g in grads]

        logging.info("compiling model")
        learning_rate = T.scalar("learning_rate")
        f_grad_shared, f_update = opt_func(learning_rate, tparams, grads, inputs, cost)

        update_idx = 0
        early_stop = False
        best_params = params
        best_valid_error = np.inf
        patience_left = patience

        logging.info("starting training")
        for epoch_idx in range(1, epochs+1):

            for train_batch in train_data_iter:

                logging.debug("processing next data sample")
                cost = f_grad_shared(*train_batch)
                f_update(l_rate)
                update_idx += 1

                if update_idx % display_frequency == 0:
                    logging.info("epoch {} update {}, cost of last processed batch: {}"
                                 .format(epoch_idx, update_idx, cost))

                if update_idx % valid_frequency == 0 and valid_data:
                    # TODO: set noise to 0 if used
                    cur_valid_error = np.mean([f_grad_shared(*d) for d in valid_data_iter])
                    if cur_valid_error < best_valid_error:
                        best_params = pull_from_tparams(tparams)
                        best_valid_error = cur_valid_error
                        patience_left = patience
                    else:
                        patience_left -= 1

                    if patience_left == 0:
                        early_stop = True
                        break

                if update_idx % save_frequency == 0:
                    save_file = save_params(pull_from_tparams(tparams), save_to, epoch_update=(epoch_idx, update_idx))
                    logging.info("epoch {}, update {} saved to {}".format(epoch_idx, update_idx, save_file))

            if early_stop:
                break

        save_file = save_params(best_params, save_to)
        logging.info("saved best parameters to {}".format(save_file))

    logging.info("done!")
예제 #17
0
def map_binary_inversepower_mxopt_jl(
    foldername,
    particle_coords,
    optimizer,
    opt_param_dict,
    random_coord_0=0,
    random_coord_1=-1,
    z=0,
):
    """
    Finds whether a point defined by particle_coord
    on the meshgrid correspond to a minimum or not for a 2d
    case.
    """
    foldpath = BASE_DIRECTORY + "/" + foldername
    # import params
    sysparams = load_params(foldpath)
    (hs_radii, initial_coords, box_length) = load_secondary_params(foldpath)
    assert sysparams.ndim.value == 2
    minimum_coords = np.loadtxt(foldpath + "/coords_of_minimum.txt",
                                delimiter=",")
    minimum_coords = np.loadtxt(foldpath + "/coords_of_minimum.txt",
                                delimiter=",")
    quench_coords = initial_coords.copy()

    quench_coords = (quench_coords + particle_coords[0] * VEC_16_0 +
                     particle_coords[1] * VEC_16_1 + z * VEC_16_2)

    # quench_coords = quench_coords + \
    #     particle_coords[0]*VEC_8_0 + particle_coords[1]*VEC_8_1 + z*VEC_8_2
    # print(quench_coords, 'quench coords')
    # box length
    box_length = float(box_length)
    boxv = [box_length] * sysparams.ndim.value
    ncellx_scale = get_ncellsx_scale(hs_radii, boxv)
    print(hs_radii, "hs_radii")
    print(quench_coords, "quench_coords")
    print(boxv)
    potential = Main.pot.InversePower(
        sysparams.power.value,
        sysparams.eps.value,
        use_cell_lists=False,
        ndim=sysparams.ndim.value,
        radii=hs_radii * 1.0,
        boxvec=boxv,
    )

    ppot = Main.PythonPotential(potential)
    nls = Main.NewtonLinesearch(ppot, quench_coords, opt_param_dict["tol"])
    mxd = Main.Mixed_Descent(
        ppot,
        Main.CVODE_BDF(),
        nls,
        quench_coords,
        opt_param_dict["T"],
        opt_param_dict["rtol"],
        opt_param_dict["conv_tol"],
        opt_param_dict["tol"],
    )
    try:
        Main.run_b(mxd, 10000)
    except:
        print(quench_coords, "failed here")
        print(initial_coords, "coords")
        print(len(quench_coords))
    # ret = lbfgs_cpp(quench_coords, potential, tol=1e-8, M=1)

    # This exists because some runs don't have hessian evaluations
    coordarg = 0
    print(mxd.converged, "converged")
    results = (
        mxd.optimizer.x0,
        mxd.converged,
        0,
        mxd.n_g_evals,
        mxd.iter_number,
        mxd.n_h_evals,
    )
    # the reason the potential is being passed is because quench coords needs the potential to figure out what to do
    return results
예제 #18
0
def train_mnist():
	nv, nhs = 28*28, [500] #, 200, 200, 200]
	batch_size = 20
	train_size = 10000
	n_epochs = 15
	learning_rate = 0.001
	decay = 1.
	L2_reg = 0.01
	L1_reg = 0.000
	momi, momf, momsw = 0.5, 0.9, 10
	k=1
	sample_every = None

	LOAD_PARAMS = False

	data_path = '/export/mlrg/ebuchman/datasets/mnistX_binary_lowres.pkl'
	data_path = '/export/mlrg/ebuchman/datasets/mnist_binary.pkl'
	data_path = '/mnt/data/datasets/mnist_binary.pkl'
	print 'opening data'
	f = open(data_path)
	d = pickle.load(f)
	f.close()
	if len(d) == 3:
		tr, val, ts = d
		X = tr[0]
	else:
		X = d

	print X.shape

	X0 = X
	optimizer = 'cd'

	n_layers = len(nhs)
	layer_params = []


	X = X[:100]
	#	X = X[random.sample(xrange(len(X0)), train_size)]


	w, bh, bv, t = test_rbm(X)

	print 'took', t
	f = open('data/results/mnist_cd_deeplearning_ref.pkl')
	pickle.dump([None, [w, bh, bv]], d)
	f.close()
	quit()



	# train each layer of DBN
	for layer in xrange(n_layers):
		
		if layer == 0:
			nv, nh = nv, nhs[0]
		else:
			nv, nh = nhs[layer-1], nhs[layer]	

		if LOAD_PARAMS: # this needs to be fixed...
			param_init = load_params(nv, nh, k, 10, learning_rate)
		else:
			theta_init = random_theta(nv, nh, k=k)
			param_init = split_theta(theta_init, nv, nh, k=k)
			param_init[0] = param_init[0].reshape(nv*nh)

		# fit rbm
		params_fit = _train(X, optimizer, param_init, nv, nh, batch_size, n_epochs, learning_rate, decay, momi, momf, momsw, L1_reg, L2_reg, k)
		layer_params.append(params_fit)
	#	sample_and_save(param_first_layer, nh, n_epochs, learning_rate, k, optimizer)

		# get data for next layer by propogating mnist up to current layer
		if k == 1:
			X = X0[random.sample(xrange(len(X0)), train_size)]
			for i in xrange(layer+1):
				W, bh, bv = layer_params[i]
				mean = False if i == layer else True
				X = sample_h_given_v_np(X, W, bh, nh, mean=mean)
		elif k == 2:
			X = X0[random.sample(xrange(len(X0)), train_size)]
			for i in xrange(layer+1):
				W, Wh, bh, bv = layer_params[i]
				mean = False if i == layer else True
				X = sample_h_given_v_2wise_np(X, W, Wh, bh, nh, mean = mean)

	#save_name = "mnist_


	#params_fit = split_theta(model.mpf.theta.get_value(), nv, nh, k=k)
	deep_samples(layer_params, nsamps=50, opt=optimizer)
예제 #19
0
def map_binary_inversepower(
    foldername,
    particle_coords,
    optimizer,
    parameter_dict,
    random_coord_0=0,
    random_coord_1=-1,
    z=0,
    index=None,
    mesh_length=None,
):
    """
    Finds whether a point defined by particle_coord
    on the meshgrid correspond to a minimum or not for a 2d
    case.
    """
    foldpath = BASE_DIRECTORY + "/" + foldername
    # import params
    sysparams = load_params(foldpath)
    (hs_radii, initial_coords, box_length) = load_secondary_params(foldpath)
    assert sysparams.ndim.value == 2
    quench_coords = initial_coords.copy()
    print("initial_coords", initial_coords)

    print(particle_coords[0], particle_coords[1],
          "vector coefficients as passed")
    print("z value")
    quench_coords = (quench_coords + particle_coords[0] * VEC_16_0 +
                     particle_coords[1] * VEC_16_1 + z * VEC_16_2)

    # TODO: save this as a unit meshgrid
    # print(quench_coords, 'quench coords')
    # box length
    box_length = float(box_length)
    boxv = [box_length] * sysparams.ndim.value
    ncellx_scale = get_ncellsx_scale(hs_radii, boxv)
    print(hs_radii)
    potential = InversePower(
        sysparams.power.value,
        sysparams.eps.value,
        use_cell_lists=False,
        ndim=sysparams.ndim.value,
        radii=hs_radii * 1.0,
        boxvec=boxv,
    )
    # save potential parameters
    # writing this out since Enum to dict conversion does not give the mapping we want
    potential_params_fname = "potential_params.yaml"
    potential_param_dict = {
        "ndim": sysparams.ndim.value,
        "phi": sysparams.phi.value,
        "seed": 0,
        "n_part": sysparams.n_part.value,
        "r1": sysparams.r1.value,
        "r2": sysparams.r2.value,
        "rstd1": sysparams.rstd1.value,
        "rstd2": sysparams.rstd2.value,
        "use_cell_lists": int(False),
        "power": sysparams.power.value,
        "eps": sysparams.eps.value,
    }

    param_str = generate_param_str_from_dict(potential_param_dict)

    # make directory to emulate structure by refactored code
    emulation_folder = COMPARE_FOLDER_NAME + "/" + param_str
    # add README to emulation folder
    os.makedirs(emulation_folder, exist_ok=True)
    with open(COMPARE_FOLDER_NAME + "/README.org", "w") as f:
        f.write(" \#+AUTHOR Praharsh Suryadevara .\n")
        f.write("* Read First.")
        f.write(
            "This dirctory has been auto-generated by old code before refactoring. Do not change.\n"
        )

    # emulates path of the refactored code
    ensemble_folder_path = emulation_folder + "/ensemble/random_plane"
    os.makedirs(ensemble_folder_path, exist_ok=True)

    # 0 to emulate the addition of seed
    sec_param_folder_path = emulation_folder + "/sec_params/0"
    os.makedirs(sec_param_folder_path, exist_ok=True)

    old_code_results_folder_path = emulation_folder + "/old_data"
    os.makedirs(old_code_results_folder_path, exist_ok=True)

    with open(emulation_folder + "/params.yaml", "w") as param_file:
        yaml.dump(potential_param_dict, param_file)

    opt_param_fname = "minima_finder_params.yaml"
    with open(emulation_folder + "/" + opt_param_fname, "w") as param_file:
        yaml.dump(parameter_dict, param_file)

    initial_coords_fname = str(index) + ".txt"
    np.savetxt(ensemble_folder_path + "/" + initial_coords_fname,
               quench_coords,
               delimiter=",")

    mesh_coords_fname = str(index) + "_mesh.txt"
    np.savetxt(ensemble_folder_path + "/" + mesh_coords_fname,
               particle_coords,
               delimiter=",")

    if mesh_length is not None:
        # 2 *[particle_coords[0], particle_coords[1]]/mesh_length -0.5 form our unit mesh_grid
        # centered around [0, 0]
        mesh_coords = (2 * np.array([particle_coords[0], particle_coords[1]]) /
                       mesh_length - 0.5)
        mesh_coords_fname = str(index) + "_mesh.txt"
        np.savetxt(ensemble_folder_path + "/" + mesh_coords_fname,
                   mesh_coords,
                   delimiter=",")

    np.savetxt(
        sec_param_folder_path + "/" + "initial_coords.txt",
        initial_coords,
        delimiter=",",
    )
    np.savetxt(
        sec_param_folder_path + "/" + "box_length.txt",
        np.array([box_length]),
        delimiter=",",
    )
    np.savetxt(sec_param_folder_path + "/" + "hs_radii.txt",
               hs_radii,
               delimiter=",")

    # ret = quench_mixed_optimizer(potential,
    #                              quench_coords,  # make sure right coords are being passed
    #                              T=10,
    #                              step=1,
    #                              nsteps=100000,
    #                              conv_tol=1e-8,
    #                              tol=1e-6, rtol=1e-4, atol=1e-4)
    # ret = quench_steepest(
    #     potential,
    #     quench_coords,  # make sure right coords are being passed
    #     nsteps=2000000,
    #     stepsize=5e-3,  # for steepest descent step size should be small
    #     tol=1e-4)
    # ret = quench_cvode_opt(potential, quench_coords, tol=1e-6, rtol=1e-4, atol=1e-4)
    try:
        ret = optimizer(quench_coords, potential, **parameter_dict)
    except:
        raise Exception("failure")
    # ret = lbfgs_cpp(quench_coords, potential, tol=1e-8, M=1)

    # This exists because some runs don't have hessian evaluations
    try:
        ret["nhev"]
    except:
        ret["nhev"] = 0
    coordarg = 0

    final_coords = ret["coords"]
    final_coords_fname = str(index) + "_coords.txt"
    np.savetxt(
        old_code_results_folder_path + "/" + final_coords_fname,
        final_coords,
        delimiter=",",
    )

    energy, grad, hess = potential.getEnergyGradientHessian(final_coords)

    hessian_fname = str(index) + "_hessian.txt"
    np.savetxt(old_code_results_folder_path + "/" + hessian_fname,
               hess,
               delimiter=",")
    grad_fname = str(index) + "_grad.txt"
    np.savetxt(old_code_results_folder_path + "/" + grad_fname,
               grad,
               delimiter=",")

    last_step_fname = str(index) + "_step.txt"
    # np.savetxt(old_code_results_folder_path + '/' +
    #            last_step_fname, ret['step'], delimiter=',')

    # check that the minimum hessian eigenvalue is positive
    # print out the eigenvector corresponding to it
    eigvals, eigvecs = np.linalg.eigh(hess)
    print(eigvals[1])
    print(eigvecs[:, 1])

    # step_in_eigvec_basis = np.matmul(eigvecs.T, ret['step'])
    # step_in_eigvec_basis_normalized = step_in_eigvec_basis/np.linalg.norm(step_in_eigvec_basis)

    # -1e-15 is to avoid numerical issues
    if np.min(eigvals) < -1e-3:
        print("minimum Eigenvalue: ", np.min(eigvals))
        print("Eigenvalues: ", eigvals)
        raise Exception("negative eigenvalue")

    # save heuristics as a dict
    results = (
        ret.coords,
        ret.success,
        coordarg,
        ret.nfev,
        ret.nsteps,
        ret.nhev,
        eigvecs,
    )

    # simplified dictionary since we're only using the success variable
    res_dict = {
        "success": bool(ret.success),
        "nsteps": ret.nsteps,
        "energy": ret.energy,
        "nfev": ret.nfev,
    }
    # print("-------------------------------------------------steppp", step_in_eigvec_basis_normalized)
    # print("eigenvalues", eigvals)

    yaml_fname = str(index) + ".yaml"
    # save heuristics as yaml
    with open(old_code_results_folder_path + "/" + yaml_fname, "w") as f:
        yaml.dump(res_dict, f)

    print(ret.nsteps, "nsteps")
    # the reason the potential is being passed is because quench coords needs the potential to figure out what to do
    return results