Пример #1
0
def basinhopping():
    logger.info('Eon version %s', version())
    # First of all, does the root directory even exist?
    if not os.path.isdir(config.path_root):
        logger.critical("Root directory does not exist")
        sys.exit(1)

    # load metadata
    bhstates = BHStates()

    if os.path.isfile("wuid.dat"):
        wuid_file = open("wuid.dat")
        wuid = int(wuid_file.readline().strip())
        wuid_file.close()
    else:
        wuid = 0

    # get communicator
    comm = communicator.get_communicator()

    # Register all the results. There is  no need to ever discard found
    # processes like we do with akmc. There is no confidence to calculate.
    register_results(comm, bhstates)

    wuid = make_searches(comm, wuid, bhstates)

    wuid_file = open("wuid.dat","w")
    wuid_file.write("%i\n" % wuid)
    wuid_file.close()

    io.save_prng_state()
Пример #2
0
def basinhopping():
    logger.info('Eon version %s', version())
    # First of all, does the root directory even exist?
    if not os.path.isdir(config.path_root):
        logger.critical("Root directory does not exist")
        sys.exit(1)

    # load metadata
    bhstates = BHStates()

    if os.path.isfile("wuid.dat"):
        wuid_file = open("wuid.dat")
        wuid = int(wuid_file.readline().strip())
        wuid_file.close()
    else:
        wuid = 0

    # get communicator
    comm = communicator.get_communicator()

    # Register all the results. There is  no need to ever discard found
    # processes like we do with akmc. There is no confidence to calculate.
    register_results(comm, bhstates)

    wuid = make_searches(comm, wuid, bhstates)

    wuid_file = open("wuid.dat", "w")
    wuid_file.write("%i\n" % wuid)
    wuid_file.close()

    io.save_prng_state()
Пример #3
0
    def __init__(self, states, previous_state, state, superbasin=None):
        """Init MinModeExplorer.

        If superbasin is passed, all states in the superbasin will be
        searched until all of them have the required confidence. It
        will also lead to a different confidence definition being
        used, which takes only processes exiting the SB into account.

        """
        Explorer.__init__(self, superbasin)
        self.states = states
        self.state = state
        self.previous_state = previous_state
        self.comm = communicator.get_communicator()

        if config.recycling_on:
            self.nrecycled = 0
            self.recycler = recycling.Recycling(self.states,
                                                self.previous_state,
                                                self.state,
                                                config.recycling_move_distance,
                                                config.recycling_save_sugg)

        # If we plan to only displace atoms that moved getting to the current state.
        if config.disp_moved_only and self.state.number != 0:
            moved_atoms = self.recycler.process_atoms
        else:
            moved_atoms = None

        if config.kdb_on:
            if not os.path.isdir(config.kdb_scratch_path):
                os.makedirs(config.kdb_scratch_path)
            try:
                queried = [
                    int(q) for q in open(
                        os.path.join(config.kdb_scratch_path, "queried"),
                        'r').readlines()
                ]
            except:
                queried = []
            if self.state.number not in queried:
                queried.append(self.state.number)
                f = open(os.path.join(config.kdb_scratch_path, "queried"), 'w')
                for q in queried:
                    f.write("%d\n" % q)
                f.close()
                kdb.query(self.state)

        self.reactant = self.state.get_reactant()
        self.displace = displace.DisplacementManager(self.reactant,
                                                     moved_atoms)
Пример #4
0
    def __init__(self, states, previous_state, state, superbasin=None):
        """Init MinModeExplorer.

        If superbasin is passed, all states in the superbasin will be
        searched until all of them have the required confidence. It
        will also lead to a different confidence definition being
        used, which takes only processes exiting the SB into account.

        """
        Explorer.__init__(self, superbasin)
        self.states = states
        self.state = state
        self.previous_state = previous_state
        self.comm = communicator.get_communicator()

        if config.recycling_on: 
            self.nrecycled = 0
            self.recycler = recycling.Recycling(self.states,
                                           self.previous_state, 
                                           self.state, 
                                           config.recycling_move_distance,
                                           config.recycling_save_sugg)

        # If we plan to only displace atoms that moved getting to the current state.
        if config.disp_moved_only and self.state.number != 0:
            moved_atoms = self.recycler.process_atoms
        else:
            moved_atoms = None

        if config.kdb_on:
            if not os.path.isdir(config.kdb_scratch_path):
                os.makedirs(config.kdb_scratch_path)
            try:
                queried = [int(q) for q in open(os.path.join(config.kdb_scratch_path, "queried"), 'r').readlines()]
            except:
                queried = []
            if self.state.number not in queried:
                queried.append(self.state.number)
                f = open(os.path.join(config.kdb_scratch_path, "queried"), 'w')
                for q in queried:
                    f.write("%d\n" % q)
                f.close()
                kdb.query(self.state)

        self.reactant = self.state.get_reactant()
        self.displace = displace.DisplacementManager(self.reactant, moved_atoms)
Пример #5
0
def main():
    config.init()

    # Should we have some kind of sanity-check module/function somewhere?
    fnames = [
        os.path.basename(f)
        for f in glob.glob(os.path.join(config.path_pot, '*'))
    ]
    if 'pos.con' in fnames:
        print "WARNING: pos.con found in potfiles path. Are you sure you want this? It will overwrite the pos.con in the calculation directory when your jobs are being run."

    job = config.main_job.lower()
    if job == 'akmc':
        akmc.main()
    elif job == 'parallel_replica' or job == 'unbiased_parallel_replica':
        parallelreplica.main()
    elif job == 'basin_hopping':
        basinhopping.main()
    elif job == 'escape_rate':
        escaperate.main()
    else:
        import communicator
        import shutil
        config.path_scratch = config.path_root
        comm = communicator.get_communicator()

        invariants = {}

        # Merge potential files into invariants
        invariants = dict(invariants, **io.load_potfiles(config.path_pot))

        job = {}
        files = [f for f in os.listdir(".") if os.path.isfile(f)]
        for f in files:
            fh = open(f)
            if (len(f.split('.')) > 1):
                #f_passed = f.split('.')[0] + "_passed." + f.split('.')[1]
                f_passed = f.split('.')[0] + "." + f.split('.')[1]
                job[f_passed] = StringIO(fh.read())
            fh.close()
        job["id"] = "output"
        if os.path.isdir("output_old"):
            shutil.rmtree("output_old")
        if os.path.isdir("output"):
            shutil.move("output", "output_old")
        comm.submit_jobs([job], invariants)
Пример #6
0
def main():
    config.init()

    # Should we have some kind of sanity-check module/function somewhere?
    fnames = [os.path.basename(f) for f in glob.glob(os.path.join(config.path_pot, '*'))]
    if 'pos.con' in fnames:
        print "WARNING: pos.con found in potfiles path. Are you sure you want this? It will overwrite the pos.con in the calculation directory when your jobs are being run."

    job = config.main_job.lower()
    if job == 'akmc':
        akmc.main()
    elif job == 'parallel_replica' or job == 'unbiased_parallel_replica':
        parallelreplica.main()
    elif job == 'basin_hopping':
        basinhopping.main()
    elif job == 'escape_rate':
        escaperate.main()
    else:
        import communicator
        import shutil
        config.path_scratch = config.path_root
        comm = communicator.get_communicator()

        invariants = {}

        # Merge potential files into invariants
        invariants = dict(invariants, **io.load_potfiles(config.path_pot))

        job = {}
        files = [ f for f in os.listdir(".") if os.path.isfile(f) ]
        for f in files:
            fh = open(f)
            if(len(f.split('.')) > 1):
                #f_passed = f.split('.')[0] + "_passed." + f.split('.')[1]
		f_passed = f.split('.')[0] + "." + f.split('.')[1]
                job[f_passed] = StringIO(fh.read())
            fh.close()
        job["id"] = "output"
        if os.path.isdir("output_old"):
            shutil.rmtree("output_old")
        if os.path.isdir("output"):
            shutil.move("output", "output_old")
        comm.submit_jobs([job], invariants)
Пример #7
0
def parallelreplica():
    logger.info('Eon version: %s', version())
    # First of all, does the root directory even exist?
    if not os.path.isdir(config.path_root):
        logger.critical("Root directory does not exist")
        sys.exit(1)

    # load metadata
    start_state_num, time, wuid = get_pr_metadata()
    logger.info("Simulation time: %e s", time)
    states = get_statelist()
    current_state = states.get_state(start_state_num)

    # get communicator
    comm = communicator.get_communicator()

    # Register all the results. There is no need to ever discard processes
    # like we do with akmc. There is no confidence to calculate.
    num_registered, transition, sum_spdup = register_results(
        comm, current_state, states)

    if num_registered >= 1:
        avg_spdup = sum_spdup / num_registered
        logger.info("Total speedup: %f", avg_spdup)

    if transition:
        current_state, previous_state = step(time, current_state, states,
                                             transition)
        time += transition['time']

    logger.info("Time in current state: %e s", current_state.get_time())
    logger.info("Simulation time: %e s", time)
    wuid = make_searches(comm, current_state, wuid)

    # Write out metadata. XXX:ugly
    metafile = os.path.join(config.path_results, 'info.txt')
    parser = ConfigParser.RawConfigParser()
    write_pr_metadata(parser, current_state.number, time, wuid)
    parser.write(open(metafile, 'w'))
    io.save_prng_state()
Пример #8
0
def parallelreplica():
    logger.info('Eon version: %s', version())
    # First of all, does the root directory even exist?
    if not os.path.isdir(config.path_root):
        logger.critical("Root directory does not exist")
        sys.exit(1)

    # load metadata
    start_state_num, time, wuid = get_pr_metadata()
    logger.info("Simulation time: %e s", time)
    states = get_statelist() 
    current_state = states.get_state(start_state_num)

    # get communicator
    comm = communicator.get_communicator()

    # Register all the results. There is no need to ever discard processes
    # like we do with akmc. There is no confidence to calculate.
    num_registered, transition, sum_spdup = register_results(comm, current_state, states)
   
    if num_registered >= 1:
        avg_spdup = sum_spdup/num_registered
        logger.info("Total speedup: %f",avg_spdup)

    if transition:
        current_state, previous_state = step(time, current_state, states, transition)
        time += transition['time']

    logger.info("Time in current state: %e s", current_state.get_time()) 
    logger.info("Simulation time: %e s", time)
    wuid = make_searches(comm, current_state, wuid)

    # Write out metadata. XXX:ugly
    metafile = os.path.join(config.path_results, 'info.txt')
    parser = ConfigParser.RawConfigParser() 
    write_pr_metadata(parser, current_state.number, time, wuid)
    parser.write(open(metafile, 'w'))
    io.save_prng_state()
Пример #9
0
def main():
    optpar = optparse.OptionParser(usage = "usage: %prog [options] config.ini")
    optpar.add_option("-C", "--continuous", action="store_true", dest="continuous", default=False, help="don't quit")
    optpar.add_option("-R", "--reset", action="store_true", dest="reset", default = False, help="reset the aKMC simulation, discarding all data")
    optpar.add_option("-f", "--force", action="store_true", dest="force", default = False, help="force a reset, no questions asked")
    optpar.add_option("-r", "--restart", action="store_true", dest="restart", default = False, help="restart the aKMC simulations from a clean dynamics.txt file")
    optpar.add_option("-s", "--status", action="store_true", dest="print_status", default = False, help = "print the status of the simulation and currently running jobs")
    optpar.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,help="only write to the log file")
    optpar.add_option("-m", "--movie", action="store", dest="movie_type", default = "", help="Specify the type of movie to make [dynamics, states, fastestpath, fastestfullpath, graph, processes]. Process movies are specified like so: --movie processes,statenumber,processlimit. Where processes is the string processes, statenumber is the number of the state that you want to view, and process limit is the maximum number of processes you would like in the movie. The returned processes are reverse sorted by rate such that the fastest processes is the first in the movie.")
    optpar.add_option("-M", "--separate-movie-files", action="store_true", dest="separate_movie_files", default=False, help="Do not write the movie into a single file but use a separate POSCAR for every state. These are created in the \"movies\" directory. Only useful in conjunction with --movie.")
    optpar.add_option("-n", "--no-submit", action="store_true", dest="no_submit", default=False,help="don't submit searches; only register finished results")
    (options, args) = optpar.parse_args()

    if len(args) > 1:
        print "akmc.py takes only one positional argument"
    sys.argv = sys.argv[0:1]
    if len(args) == 1:
        sys.argv += args
        #always run from the directory where the config file is
        #os.chdir(os.path.dirname(args[0]))

    #XXX: config is ugly as it finds out where the config file is directly from
    #     sys.argv instead of being passed it.
    #import sys
    if len(sys.argv) > 1:
        config.init(sys.argv[-1])
    else:
        config.init()
    #set options.path_root to be where the config file is if given as an arg
    if config.path_root.strip() == '.' and len(args) == 1:
        config.path_root = os.path.abspath(os.path.dirname(args[0]))
        os.chdir(config.path_root)

    if options.no_submit:
        config.comm_job_buffer_size = 0

    #setup logging
    logging.basicConfig(level=logging.DEBUG,
            filename=os.path.join(config.path_results, "akmc.log"),
            format="%(asctime)s %(levelname)s:%(name)s: %(message)s",
            datefmt="%F %T")
    logging.raiseExceptions = False

    if not options.quiet:
        rootlogger = logging.getLogger('')
        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        formatter = logging.Formatter("%(message)s")
        console.setFormatter(formatter)
        rootlogger.addHandler(console)

    lock = locking.LockFile(os.path.join(config.path_results, "lockfile"))

    # Some options are mutually exclusive. Let's check them now.
    exclusive_options = {}
    if len(options.movie_type) > 0:
        exclusive_options['movie_type'] = True
    else:
        exclusive_options['movie_type'] = False
    exclusive_options['print_status'] = options.print_status
    exclusive_options['reset'] = options.reset

    if sum(exclusive_options.values()) > 1:
        offending_options = [ k for k,v in exclusive_options.iteritems() if v ]
        optpar.error("Options %s are mutually exclusive" % ", ".join(offending_options))

    if len(options.movie_type) > 0:
        states = get_statelist(config.main_temperature / 11604.5)
        movie.make_movie(options.movie_type, config.path_root, states,
                         options.separate_movie_files)
        sys.exit(0)

    # From the config file: The Novotny and C&V (ASKMC) methods should not be used together.
    if config.sb_on and config.askmc_on:
        logger.error("Both superbasin methods should not be used at the same time")
        sys.exit(1)

    if options.print_status:
        states = get_statelist(config.main_temperature / 11604.5)
        start_state_num, time, previous_state_num, first_run, previous_temperature =\
            get_akmc_metadata()
        current_state = states.get_state(start_state_num)
        if config.sb_on:
            sb_scheme = get_superbasin_scheme(states)
            sb = sb_scheme.get_containing_superbasin(current_state)
        else:
            sb = None
        print
        print "General"
        print "-------"
        if not sb:
            print "Current state:", start_state_num
        else:
            print "Current state:", start_state_num, "in superbasin", sb.id
        print "Number of states:",states.get_num_states()
        print "Time simulated: %.3e seconds" % time
        print

        print "Current State"
        print "-------------"
        if not sb:
            print "Confidence: %.4f" % current_state.get_confidence()
        else:
            print "Superbasin Confidence: %.4f" % sb.get_confidence()
            non_ignored_states = set(sb._get_filtered_states())
            for s in sb.states:
                if s in non_ignored_states:
                    ignore_string = ""
                else:
                    ignore_string = " (no exit from superbasin found)"
                print "       %4i: %.4f%s" % (s.number, s.get_confidence(sb), ignore_string)
        print "Unique Saddles:", current_state.get_unique_saddle_count()
        print "Good Saddles:", current_state.get_good_saddle_count()
        print "Bad Saddles:", current_state.get_bad_saddle_count()
        print "Percentage bad saddles: %.1f" % (float(current_state.get_bad_saddle_count())/float(max(current_state.get_bad_saddle_count() + current_state.get_good_saddle_count(), 1)) * 100)
        print 

        comm = communicator.get_communicator()
        print "Saddle Searches"
        print "---------------" 
        print "Searches in queue:", comm.get_queue_size() 
        print
        if config.sb_on:
            print "Superbasins"
            print "-----------"
            for i in sb_scheme.superbasins:
                print "%s: %s" % (i.id, i.state_numbers)
        sys.exit(0)
    elif options.reset:
        if options.force:
            res = 'y'
        else:
            res = raw_input("Are you sure you want to reset (all data files will be lost)? (y/N) ").lower()
        if len(res)>0 and res[0] == 'y':
                def attempt_removal(thing):
                    if thing is None:
                        return
                    if os.path.isdir(thing):
                        shutil.rmtree(thing)
                        os.mkdir(thing)
                        os.removedirs(thing)
                    elif os.path.isfile(thing):
                        os.remove(thing)
                rmthings = [config.path_jobs_out,
                            config.path_jobs_in,
                            config.path_incomplete,
                            config.path_states,
                            config.path_scratch,
                            config.kdb_name,
                            config.kdb_scratch_path,
                            config.sb_path,
                            config.sb_recycling_path,
                            config.debug_results_path,
                            os.path.join(config.path_root, "searchdata"),
                            os.path.join(config.path_results, "askmc_data.txt"),
                            os.path.join(config.path_results, "searches.log"),
                            os.path.join(config.path_results, "dynamics.txt"),
                            os.path.join(config.path_results, "info.txt"),
                            os.path.join(config.path_results, "akmc.log"),
                            os.path.join(config.path_results, "jobs.tbl"),
                            os.path.join(config.path_root, "results"),
                            os.path.join(config.path_root, "prng.pkl"),
                            os.path.join(config.path_root, "explorer.pickle"),
                            os.path.join(config.path_root, "temperatures.dat"),
                            os.path.join(config.path_root, "client.log"),
                            os.path.join(config.path_root, "lockfile"),
                            ]
                for thing in rmthings:
                    attempt_removal(thing)
                if not options.quiet:
                    print "Reset"
                sys.exit(0)
        else:
            print "Not resetting"
            sys.exit(1)

    elif options.restart:
        string_sb_clear = ""

        if options.force:
            res = 'y'
        else:
            res = raw_input("Are you sure you want to restart (remove dynamics.txt, info.txt and akmc.log)? (y/N) ").lower()
        if len(res)>0 and res[0] == 'y':

            # remove akmc data that are specific for a trajectory
            dynamics_path = os.path.join(config.path_results, "dynamics.txt")
            info_path = os.path.join(config.path_results, "info.txt")
            log_path = os.path.join(config.path_results, "akmc.log")
            jobs_path = os.path.join(config.path_results, "jobs.tbl")
            for i in [info_path, dynamics_path, log_path, jobs_path]:
                if os.path.isfile(i):
                    os.remove(i)

            if config.sb_on:
                if options.force:
                    res = 'y'
                else:
                    res = raw_input("Should the superbasins be removed? (y/N) ").lower()

                # remove superbasin data (specific for a trajectory)
                if len(res)>0 and res[0] == 'y':
                    # remove directory superbasins
                    if os.path.isdir(config.sb_path):
                        shutil.rmtree(config.sb_path)
                        #XXX: ugly way to remove all empty directories containing this one
                        os.mkdir(config.sb_path)
                        os.removedirs(config.sb_path)

                    # remove superbasins files from states dirctories
                    state_dirs = os.listdir(config.path_states)
                    for i in state_dirs:
                        if i != 'state_table':
                            superbasin_file = os.path.join(config.path_states, i)
                            superbasin_file = os.path.join(superbasin_file, config.sb_state_file)
                            if os.path.isfile(superbasin_file):
                                os.remove(superbasin_file)

                    string_sb_clear = " with directory 'superbasins' and files named '"
                    string_sb_clear += str(config.sb_state_file) + "' removed"

            if not options.quiet:
                print "Restart"+string_sb_clear+"."
            sys.exit(0)
        else:
            print "Not restarting"
            sys.exit(1)

    if lock.aquirelock():
        if options.continuous or config.comm_type == 'mpi':
            # define a wait method.
            if config.comm_type == 'mpi':
                from mpiwait import mpiwait
                wait = mpiwait
            elif options.continuous:
                if config.comm_type == "local":
                    # In local, everything is synchronous, so no need to wait here.
                    wait = lambda: None
                else:
                    wait = lambda: sleep(10.0)
            else:
                raise RuntimeError("You have found a bug in EON!")
            # Run a specified number of steps or forever.
            steps = 0
            while True:
                steps = akmc(config, steps)
                if (config.akmc_max_kmc_steps > 0 and
                    steps >= config.akmc_max_kmc_steps):
                    break
                wait()
            # In MPI mode we need to signal exit to all processes.
            # TODO: This is the sledgehammer method, it would be cleaner to
            #       communicate to all clients that they should exit.
            if config.comm_type == 'mpi':
                from mpi4py import MPI
                MPI.COMM_WORLD.Abort(0)
        else:
            akmc(config)
    else:
        logger.info("Server is locked by pid %i" % lock.pid)
        sys.exit(1)