def test_simulated(self): exp_dataframe = pd.read_pickle( os.path.join(CAMD_TEST_FILES, "mn-ni-o-sb.pickle")) experiment = ATFSampler(exp_dataframe) candidate_data = exp_dataframe.iloc[:, :-11] # Set up agents and loop parameters agent = AgentStabilityAdaBoost( model=MLPRegressor(hidden_layer_sizes=(84, 50)), n_query=2, hull_distance=0.2, exploit_fraction=1.0, uncertainty=True, alpha=0.5, diversify=True, n_estimators=20) analyzer = StabilityAnalyzer(hull_distance=0.2) # Reduce seed_data icsd_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2") seed_data = filter_dataframe_by_composition(icsd_data, "MnNiOSb") leftover = ~icsd_data.index.isin(seed_data.index) # Add some random other data to test compositional flexibility seed_data = seed_data.append(icsd_data.loc[leftover].sample(30)) del icsd_data with ScratchDir('.'): campaign = ProtoDFTCampaign(candidate_data=candidate_data, agent=agent, experiment=experiment, analyzer=analyzer, seed_data=seed_data, heuristic_stopper=5) campaign.autorun() self.assertTrue(os.path.isfile('hull_finalized.png'))
def from_chemsys(cls, chemsys): """ Class factory method for constructing campaign from chemsys. Args: chemsys (str): chemical system for the campaign Returns: (ProtoDFTCampaign): Standard proto-dft campaign from the chemical system """ s3_prefix = "proto-dft-2/runs/{}".format(chemsys) # Initialize s3 dumpfn({ "started": datetime.now().isoformat(), "version": __version__ }, "start.json") s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.') # Get structure domain element_list = chemsys.split('-') max_coeff, charge_balanced = heuristic_setup(element_list) domain = StructureDomain.from_bounds(element_list, charge_balanced=charge_balanced, n_max_atoms=20, **{'grid': range(1, max_coeff)}) candidate_data = domain.candidates() # Dump structure/candidate data with open('candidate_data.pickle', 'wb') as f: pickle.dump(candidate_data, f) # Set up agents and loop parameters agent = AgentStabilityAdaBoost( model=MLPRegressor(hidden_layer_sizes=(84, 50)), n_query=10, hull_distance=0.2, exploit_fraction=1.0, uncertainty=True, alpha=0.5, diversify=True, n_estimators=20) analyzer = StabilityAnalyzer(hull_distance=0.2) experiment = OqmdDFTonMC1(timeout=30000) seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2") # Construct and start loop return cls(candidate_data=candidate_data, agent=agent, experiment=experiment, analyzer=analyzer, seed_data=seed_data, heuristic_stopper=5, s3_prefix="proto-dft/runs/{}".format(chemsys))
def test_adaboost_loop(self): df = pd.read_csv(os.path.join(CAMD_TEST_FILES, 'test_df.csv')) df_sub = df[df['N_species'] <= 3] n_seed = 200 # Starting sample size agent = AgentStabilityAdaBoost(model=MLPRegressor(hidden_layer_sizes=(84, 50)), n_query=10, exploit_fraction=1.0, alpha=0.5, n_estimators=10) analyzer = StabilityAnalyzer(hull_distance=0.05, parallel=False) experiment = ATFSampler(df_sub) candidate_data = df_sub new_loop = Campaign(candidate_data, agent, experiment, analyzer, create_seed=n_seed) new_loop.initialize() self.assertTrue(new_loop.initialized) new_loop.auto_loop(6) self.assertTrue(True)
def from_chemsys(cls, chemsys, prefix="proto-dft-2/runs", n_max_atoms=20, agent=None, analyzer=None, experiment=None, log_file="campaign.log", cloudwatch_group="/camd/worker/dev/"): """ Class factory method for constructing campaign from chemsys. Args: chemsys (str): chemical system for the campaign prefix (str): prefix for s3 n_max_atoms (int): number of maximum atoms agent (Agent): agent for stability campaign analyzer (Analyzer): analyzer for stability campaign experiment (Agent): experiment for stability campaign log_file (str): log filename cloudwatch_group (str): cloudwatch group to log to Returns: (ProtoDFTCampaign): Standard proto-dft campaign from the chemical system """ logger = logging.Logger("camd") logger.setLevel("INFO") file_handler = logging.FileHandler(log_file) cw_handler = CloudWatchLogHandler(log_group=cloudwatch_group, stream_name=chemsys) logger.addHandler(file_handler) logger.addHandler(cw_handler) logger.addHandler(logging.StreamHandler()) logger.info( "Starting campaign factory from_chemsys {}".format(chemsys)) s3_prefix = "{}/{}".format(prefix, chemsys) # Initialize s3 dumpfn({ "started": datetime.now().isoformat(), "version": __version__ }, "start.json") s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.') # Get structure domain element_list = chemsys.split('-') max_coeff, charge_balanced = heuristic_setup(element_list) domain = StructureDomain.from_bounds(element_list, charge_balanced=charge_balanced, n_max_atoms=n_max_atoms, **{'grid': range(1, max_coeff)}) candidate_data = domain.candidates() # Dump structure/candidate data with open('candidate_data.pickle', 'wb') as f: pickle.dump(candidate_data, f) s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.') logger.info("Candidates generated") # Set up agents and loop parameters agent = agent or AgentStabilityAdaBoost( model=MLPRegressor(hidden_layer_sizes=(84, 50)), n_query=10, hull_distance=0.2, exploit_fraction=1.0, uncertainty=True, alpha=0.5, diversify=True, n_estimators=20) analyzer = analyzer or StabilityAnalyzer(hull_distance=0.2) experiment = experiment or OqmdDFTonMC1(timeout=30000) seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2") # Construct and start loop return cls(candidate_data=candidate_data, agent=agent, experiment=experiment, analyzer=analyzer, seed_data=seed_data, heuristic_stopper=5, s3_prefix=s3_prefix, logger=logger)
def from_chemsys(cls, chemsys, prefix="proto-dft-2/runs", n_max_atoms=20, agent=None, analyzer=None, experiment=None, log_file="campaign.log", cloudwatch_group="/camd/worker/dev/"): """ Class factory method for constructing campaign from chemsys. Args: chemsys (str): chemical system for the campaign prefix (str): prefix for s3 n_max_atoms (int): number of maximum atoms agent (Agent): agent for stability campaign analyzer (Analyzer): analyzer for stability campaign experiment (Agent): experiment for stability campaign log_file (str): log filename cloudwatch_group (str): cloudwatch group to log to Returns: (ProtoDFTCampaign): Standard proto-dft campaign from the chemical system """ logger = logging.Logger("camd") logger.setLevel("INFO") file_handler = logging.FileHandler(log_file) cw_handler = CloudWatchLogHandler(log_group=cloudwatch_group, stream_name=chemsys) logger.addHandler(file_handler) logger.addHandler(cw_handler) logger.addHandler(logging.StreamHandler()) logger.info( "Starting campaign factory from_chemsys {}".format(chemsys)) s3_prefix = "{}/{}".format(prefix, chemsys) # Initialize s3 dumpfn({ "started": datetime.now().isoformat(), "version": __version__ }, "start.json") s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.') # Get structure domain # Check cache cache_key = "protosearch_cache/v1/{}/{}/candidates.pickle".format( chemsys, n_max_atoms) # TODO: create test of isfile if s3_key_exists(bucket=CAMD_S3_BUCKET, key=cache_key): logger.info("Found cached protosearch domain.") candidate_data = pd.read_pickle("s3://{}/{}".format( CAMD_S3_BUCKET, cache_key)) logger.info("Loaded cached {}.".format(cache_key)) else: logger.info( "Generating domain with max {} atoms.".format(n_max_atoms)) element_list = chemsys.split('-') max_coeff, charge_balanced = heuristic_setup(element_list) domain = StructureDomain.from_bounds( element_list, charge_balanced=charge_balanced, n_max_atoms=n_max_atoms, **{'grid': range(1, max_coeff)}) candidate_data = domain.candidates() logger.info("Candidates generated") candidate_data.to_pickle("s3://{}/{}".format( CAMD_S3_BUCKET, cache_key)) logger.info("Cached protosearch domain at {}.".format(cache_key)) # Dump structure/candidate data candidate_data.to_pickle("candidate_data.pickle") s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.') # Set up agents and loop parameters agent = agent or AgentStabilityAdaBoost( model=MLPRegressor(hidden_layer_sizes=(84, 50)), n_query=10, hull_distance=0.2, exploit_fraction=1.0, uncertainty=True, alpha=0.5, diversify=True, n_estimators=20) analyzer = analyzer or StabilityAnalyzer(hull_distance=0.2) experiment = experiment or OqmdDFTonMC1(timeout=30000, prefix_append="proto-dft") seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2") # Load cached experiments logger.info("Loading cached experiments") cached_experiments = experiment.fetch_cached(candidate_data) logger.info("Found {} experiments.".format(len(cached_experiments))) if len(cached_experiments) > 0: summary, seed_data = analyzer.analyze(cached_experiments, seed_data) # Remove cached experiments from candidate_data candidate_space = candidate_data.index.difference( cached_experiments.index, sort=False).tolist() candidate_data = candidate_data.loc[candidate_space] logger.info("Cached experiments added to seed.") # Construct and start loop return cls(candidate_data=candidate_data, agent=agent, experiment=experiment, analyzer=analyzer, seed_data=seed_data, heuristic_stopper=5, s3_prefix=s3_prefix, logger=logger)
from camd.experiment.base import ATFSampler from camd.utils.data import load_default_atf_data ########################################################## # Load dataset and filter by N_species of 2 or less ########################################################## df = load_default_atf_data() ########################################################## # Binary stable material discovery 50:50 explore/exploit agent ########################################################## n_seed = 5000 # Starting sample size - a seed of this size will be randomly chosen. n_query = 200 # This many new candidates are "calculated with DFT" (i.e. requested from Oracle -- DFT) agent = AgentStabilityAdaBoost(model=MLPRegressor(hidden_layer_sizes=(84, 50)), n_query=n_query, hull_distance=0.05, uncertainty=True, exploit_fraction=0.75, n_estimators=20) analyzer = StabilityAnalyzer(hull_distance=0.05) experiment = ATFSampler(dataframe=df) candidate_data = df new_loop = Campaign(candidate_data, agent, experiment, analyzer, create_seed=n_seed) new_loop.auto_loop(n_iterations=4, initialize=True)
def test_agent_stability_adaboost(self): agent = AgentStabilityAdaBoost() hypotheses = agent.get_hypotheses(candidate_data=self.candidate_data, seed_data=self.seed_data)