# Connect to the redis server client_queues, server_queues = make_queue_pairs(args.redishost, args.redisport, use_pickle=True) # Apply wrappers to function to affix static settings my_generate_molecules = partial(generate_molecules, episodes=args.rl_episodes) my_generate_molecules = update_wrapper(my_generate_molecules, generate_molecules) # Create the method server and task generator doer = ParslMethodServer([ my_generate_molecules, compute_score, compute_atomization_energy, compute_reference_energy ], server_queues, default_executors=['htex']) # Select a list of initial molecules with open('qm9-smiles.json') as fp: initial_mols = np.random.choice(json.load(fp), size=(args.initial_count, ), replace=False) thinker = Thinker(client_queues, output_dir=out_dir, initial_molecules=initial_mols, n_parallel=args.parallel_guesses, n_molecules=args.search_size) logging.info('Created the method server and task generator')
ThreadPoolExecutor(label="local_threads", max_threads=4) ], strategy=None, ) parsl.load(config) parsl.set_stream_logger(level=logging.INFO) # Connect to the redis server client_queues, server_queues = make_queue_pairs(args.redishost, args.redisport, clean_slate=True, use_pickle=True) # Create the method server and task generator doer = ParslMethodServer([target_fun, generate, score, select], server_queues, default_executors=['htex']) thinker = Thinker(client_queues) logging.info('Created the method server and task generator') try: # Launch the servers # The method server is a Thread, so that it can access the Parsl DFK # The task generator is a Thread, so that all debugging methods get cast to screen doer.start() thinker.start() logging.info('Launched the servers') # Wait for the task generator to complete thinker.join() logging.info('Task generator has completed')
code=code) my_compute_atomization = update_wrapper(my_compute_atomization, compute_atomization_energy) my_evaluate_mpnn = partial(evaluate_mpnn, atom_types=atom_types, bond_types=bond_types, batch_size=512, n_jobs=64) my_evaluate_mpnn = update_wrapper(my_evaluate_mpnn, evaluate_mpnn) # Create the method server and task generator ml_cfg = {'executors': ['ml']} dft_cfg = {'executors': ['qc']} doer = ParslMethodServer([(my_evaluate_mpnn, ml_cfg), (my_compute_atomization, dft_cfg)], server_queues, config) # Compute the number of excess tasks excess_tasks = nnodes * args.ml_prefetch + args.ml_excess_queue # Configure the "thinker" application thinker = Thinker(client_queues, args.search_space, args.mpnn_model_files, out_dir, qc_workers, nnodes, args.molecules_per_ml_task, args.search_size, args.sampling_fraction, excess_tasks) logging.info('Created the method server and task generator') try: # Launch the servers # The method server is a Thread, so that it can access the Parsl DFK # The task generator is a Thread, so that all debugging methods get cast to screen
my_evaluate_mpnn = partial(evaluate_mpnn, atom_types=atom_types, bond_types=bond_types) my_evaluate_mpnn = update_wrapper(my_evaluate_mpnn, evaluate_mpnn) my_update_mpnn = partial(update_mpnn, atom_types=atom_types, bond_types=bond_types) my_update_mpnn = update_wrapper(my_update_mpnn, update_mpnn) # Create the method server and task generator ml_cfg = {'executors': ['ml']} dft_cfg = {'executors': ['qc']} doer = ParslMethodServer([(my_generate_molecules, ml_cfg), (my_evaluate_mpnn, ml_cfg), (my_update_mpnn, ml_cfg), (my_compute_atomization, dft_cfg)], server_queues) # Configure the "thinker" application thinker = Thinker(client_queues, initial_database, initial_search_space, agent, mpnn, output_dir=out_dir, n_parallel=args.parallel_guesses, n_molecules=args.search_size) logging.info('Created the method server and task generator') try:
search_space = search_space.sample(args.max_evals, random_state=args.random_seed) logging.info( f'Read {len(search_space)} from {args.search_space} and shuffled them.' f' First: {search_space["smiles"].iloc[:5].values}') # Connect to the redis server client_queues, server_queues = make_queue_pairs( args.redishost, args.redisport, serialization_method="pickle", topics=['simulate', 'infer', 'train'], keep_inputs=False) # Create the method server and task generator doer = ParslMethodServer([run_simulation], server_queues, config) # Configure the "thinker" application thinker = Thinker(client_queues, out_dir, search_space['smiles'].tolist(), args.mode, nnodes, args.nodes_per_task) logging.info('Created the method server and task generator') # Start the usage tracker thr = Thread(target=track_memory_usage, args=(os.path.join(out_dir, 'usage.json'), 15), daemon=True) thr.start() try: # Launch the servers # The method server is a Thread, so that it can access the Parsl DFK
my_retrain_mpnn = partial(retrain_mpnn, num_epochs=args.num_epochs, atom_types=atom_types, bond_types=bond_types, learning_rate=args.learning_rate, bootstrap=True) my_retrain_mpnn = update_wrapper(my_retrain_mpnn, retrain_mpnn) # Create the method server and task generator inf_cfg = {'executors': ['ml-inference']} tra_cfg = {'executors': ['ml-train']} dft_cfg = {'executors': ['qc']} doer = ParslMethodServer([(my_evaluate_mpnn, inf_cfg), (run_simulation, dft_cfg), (my_update_mpnn, tra_cfg), (my_retrain_mpnn, tra_cfg)], server_queues, config) # Connect to MongoDB database = MoleculePropertyDB.from_connection_info(args.mongohost, args.mongoport) # Configure the "thinker" application thinker = Thinker(client_queues, database, args.search_space, args.search_size, args.retrain_frequency, args.retrain_from_scratch, models, args.molecules_per_ml_task, nnodes, args.nodes_per_task, out_dir, args.beta) logging.info('Created the method server and task generator')
max_blocks=1, ), ), ThreadPoolExecutor(label="local_threads", max_threads=4) ], strategy=None, ) parsl.load(config) # Connect to the redis server client_queues = ClientQueues(args.redishost, args.redisport) server_queues = MethodServerQueues(args.redishost, args.redisport) # Create the method server and task generator doer = ParslMethodServer([target_fun], server_queues, default_executors=['htex']) thinker = Thinker(client_queues) logging.info('Created the method server and task generator') try: # Launch the servers # The method server is a Thread, so that it can access the Parsl DFK # The task generator is a Thread, so that all debugging methods get cast to screen doer.start() thinker.start() logging.info('Launched the servers') # Wait for the task generator to complete thinker.join() logging.info('Task generator has completed')
), ), ThreadPoolExecutor(label="local_threads", max_threads=4) ], strategy=None, ) config.run_dir = os.path.join(out_dir, 'run-info') parsl.load(config) # Create the method server and task generator my_ackley = partial(ackley, mean_rt=np.log(args.runtime), std_rt=np.log(args.runtime_var)) update_wrapper(my_ackley, ackley) doer = ParslMethodServer([my_ackley], server_queues, default_executors=['htex']) thinker = Thinker(client_queues, out_dir, dim=args.dim, n_guesses=args.num_guesses, batch_size=args.num_parallel) logging.info('Created the method server and task generator') try: # Launch the servers # The method server is a Thread, so that it can access the Parsl DFK # The task generator is a Thread, so that all debugging methods get cast to screen doer.start() thinker.start() logging.info('Launched the servers')
args.redisport, use_pickle=True, topics=['simulator', 'ML']) # Apply wrappers to function to affix static settings my_generate_molecules = partial(generate_molecules, episodes=args.rl_episodes) my_generate_molecules = update_wrapper(my_generate_molecules, generate_molecules) # Create the method server and task generator ml_cfg = {'executors': ['ml']} dft_cfg = {'executors': ['qc']} doer = ParslMethodServer([(my_generate_molecules, ml_cfg), (compute_score, ml_cfg), (compute_atomization_energy, dft_cfg), (compute_reference_energy, dft_cfg)], server_queues) # Select a list of initial molecules with open('qm9-smiles.json') as fp: initial_mols = np.random.choice(json.load(fp), size=(args.initial_count, ), replace=False) thinker = Thinker(client_queues, output_dir=out_dir, initial_molecules=initial_mols, n_parallel=args.parallel_guesses, n_molecules=args.search_size) logging.info('Created the method server and task generator')
def cli_run(): parser = argparse.ArgumentParser() parser.add_argument( "--redishost", default="127.0.0.1", help="Address at which the redis server can be reached") parser.add_argument("--redisport", default="6379", help="Port on which redis is available") parser.add_argument("-d", "--debug", action='store_true', help="Count of apps to launch") parser.add_argument("-m", "--mac", action='store_true', help="Configure for Mac") args = parser.parse_args() if args.debug: parsl.set_stream_logger() if args.mac: config = Config( executors=[ ThreadPoolExecutor(label="htex"), ThreadPoolExecutor(label="local_threads") ], strategy=None, ) else: config = Config( executors=[ HighThroughputExecutor( label="htex", # Max workers limits the concurrency exposed via mom node max_workers=2, provider=LocalProvider( init_blocks=1, max_blocks=1, ), ), ThreadPoolExecutor(label="local_threads") ], strategy=None, ) parsl.load(config) print( '''This program creates an "MPI Method Server" that listens on an inputs queue and write on an output queue: input_queue --> mpi_method_server --> queues To send it a request, add an entry to the inputs queue: run "pipeline-pump -p N" where N is an integer request To access a value, remove it from the outout queue: run "pipeline-pull" (blocking) or "pipeline-pull -t T" (T an integer) to time out after T seconds TODO: Timeout does not work yet! ''') # Get the queues for the method server method_queues = MethodServerQueues(args.redishost, port=args.redisport) # Start the method server mms = ParslMethodServer([target_fun], method_queues, default_executors=['htex']) mms.run()