def test_random_agent_loop(self):

        df = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")
        n_seed = 5000
        n_query = 200
        agent = RandomAgent
        agent_params = {
            'hull_distance': 0.05,
            'n_query': n_query,
        }
        analyzer = AnalyzeStability
        analyzer_params = {'hull_distance': 0.05}
        experiment = ATFSampler
        experiment_params = {'params': {'dataframe': df}}
        candidate_data = df
        path = '.'

        new_loop = Loop(candidate_data,
                        agent,
                        experiment,
                        analyzer,
                        agent_params=agent_params,
                        analyzer_params=analyzer_params,
                        experiment_params=experiment_params,
                        create_seed=n_seed)

        new_loop.initialize()
        self.assertFalse(new_loop.create_seed)

        for _ in range(6):
            new_loop.run()
            self.assertTrue(True)
Beispiel #2
0
 def test_simulated(self):
     exp_dataframe = pd.read_pickle(
         os.path.join(CAMD_TEST_FILES, "mn-ni-o-sb.pickle"))
     experiment = ATFSampler(exp_dataframe)
     candidate_data = exp_dataframe.iloc[:, :-11]
     # Set up agents and loop parameters
     agent = AgentStabilityAdaBoost(
         model=MLPRegressor(hidden_layer_sizes=(84, 50)),
         n_query=2,
         hull_distance=0.2,
         exploit_fraction=1.0,
         uncertainty=True,
         alpha=0.5,
         diversify=True,
         n_estimators=20)
     analyzer = StabilityAnalyzer(hull_distance=0.2)
     # Reduce seed_data
     icsd_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")
     seed_data = filter_dataframe_by_composition(icsd_data, "MnNiOSb")
     leftover = ~icsd_data.index.isin(seed_data.index)
     # Add some random other data to test compositional flexibility
     seed_data = seed_data.append(icsd_data.loc[leftover].sample(30))
     del icsd_data
     with ScratchDir('.'):
         campaign = ProtoDFTCampaign(candidate_data=candidate_data,
                                     agent=agent,
                                     experiment=experiment,
                                     analyzer=analyzer,
                                     seed_data=seed_data,
                                     heuristic_stopper=5)
         campaign.autorun()
         self.assertTrue(os.path.isfile('hull_finalized.png'))
Beispiel #3
0
    def test_partition(self):
        df_name = self.smallfile.rsplit('.', 1)[0]
        dataframe = load_dataframe(df_name)
        cand, seed = partition_intercomp(dataframe)
        self.assertEqual(len(dataframe), len(cand) + len(seed))

        cand, seed = partition_intercomp(dataframe, n_elements=1)
        self.assertEqual(len(dataframe), len(cand) + len(seed))
        self.assertGreater(len(seed), 0)
Beispiel #4
0
    def from_chemsys(cls, chemsys):
        """
        Class factory method for constructing campaign from
        chemsys.

        Args:
            chemsys (str): chemical system for the campaign

        Returns:
            (ProtoDFTCampaign): Standard proto-dft campaign from
                the chemical system

        """
        s3_prefix = "proto-dft-2/runs/{}".format(chemsys)

        # Initialize s3
        dumpfn({
            "started": datetime.now().isoformat(),
            "version": __version__
        }, "start.json")
        s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')

        # Get structure domain
        element_list = chemsys.split('-')
        max_coeff, charge_balanced = heuristic_setup(element_list)
        domain = StructureDomain.from_bounds(element_list,
                                             charge_balanced=charge_balanced,
                                             n_max_atoms=20,
                                             **{'grid': range(1, max_coeff)})
        candidate_data = domain.candidates()

        # Dump structure/candidate data
        with open('candidate_data.pickle', 'wb') as f:
            pickle.dump(candidate_data, f)

        # Set up agents and loop parameters
        agent = AgentStabilityAdaBoost(
            model=MLPRegressor(hidden_layer_sizes=(84, 50)),
            n_query=10,
            hull_distance=0.2,
            exploit_fraction=1.0,
            uncertainty=True,
            alpha=0.5,
            diversify=True,
            n_estimators=20)
        analyzer = StabilityAnalyzer(hull_distance=0.2)
        experiment = OqmdDFTonMC1(timeout=30000)
        seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")

        # Construct and start loop
        return cls(candidate_data=candidate_data,
                   agent=agent,
                   experiment=experiment,
                   analyzer=analyzer,
                   seed_data=seed_data,
                   heuristic_stopper=5,
                   s3_prefix="proto-dft/runs/{}".format(chemsys))
    def test_random_agent_loop(self):
        df = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")
        n_seed = 5000
        agent = RandomAgent(n_query=200)
        analyzer = StabilityAnalyzer(hull_distance=0.05, parallel=False)
        experiment = ATFSampler(dataframe=df)
        candidate_data = df

        new_loop = Campaign(candidate_data, agent, experiment, analyzer,
                            create_seed=n_seed)

        new_loop.initialize()
        self.assertFalse(new_loop.create_seed)

        for _ in range(6):
            new_loop.run()
            self.assertTrue(True)
Beispiel #6
0
 def test_simulated(self):
     exp_dataframe = pd.read_pickle(
         os.path.join(CAMD_TEST_FILES, "mn-ni-o-sb.pickle"))
     experiment = ATFSampler(exp_dataframe)
     candidate_data = exp_dataframe.iloc[:, :-11]
     agent = RandomAgent(n_query=2)
     analyzer = StabilityAnalyzer(hull_distance=0.2)
     # Reduce seed_data
     seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")
     seed_data = filter_dataframe_by_composition(seed_data, "MnNiOSb")
     with ScratchDir('.'):
         campaign = ProtoDFTCampaign(candidate_data=candidate_data,
                                     agent=agent,
                                     experiment=experiment,
                                     analyzer=analyzer,
                                     seed_data=seed_data,
                                     heuristic_stopper=5)
         campaign.autorun()
Beispiel #7
0
 def test_load_dataframe(self):
     df_name = self.smallfile.rsplit('.', 1)[0]
     dataframe = load_dataframe(df_name)
     self.assertEqual(len(dataframe), 36581)
Beispiel #8
0
    def from_chemsys(cls,
                     chemsys,
                     prefix="proto-dft-2/runs",
                     n_max_atoms=20,
                     agent=None,
                     analyzer=None,
                     experiment=None,
                     log_file="campaign.log",
                     cloudwatch_group="/camd/worker/dev/"):
        """
        Class factory method for constructing campaign from
        chemsys.

        Args:
            chemsys (str): chemical system for the campaign
            prefix (str): prefix for s3
            n_max_atoms (int): number of maximum atoms
            agent (Agent): agent for stability campaign
            analyzer (Analyzer): analyzer for stability campaign
            experiment (Agent): experiment for stability campaign
            log_file (str): log filename
            cloudwatch_group (str): cloudwatch group to log to

        Returns:
            (ProtoDFTCampaign): Standard proto-dft campaign from
                the chemical system

        """
        logger = logging.Logger("camd")
        logger.setLevel("INFO")
        file_handler = logging.FileHandler(log_file)
        cw_handler = CloudWatchLogHandler(log_group=cloudwatch_group,
                                          stream_name=chemsys)
        logger.addHandler(file_handler)
        logger.addHandler(cw_handler)
        logger.addHandler(logging.StreamHandler())

        logger.info(
            "Starting campaign factory from_chemsys {}".format(chemsys))
        s3_prefix = "{}/{}".format(prefix, chemsys)

        # Initialize s3
        dumpfn({
            "started": datetime.now().isoformat(),
            "version": __version__
        }, "start.json")
        s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')

        # Get structure domain
        element_list = chemsys.split('-')
        max_coeff, charge_balanced = heuristic_setup(element_list)
        domain = StructureDomain.from_bounds(element_list,
                                             charge_balanced=charge_balanced,
                                             n_max_atoms=n_max_atoms,
                                             **{'grid': range(1, max_coeff)})
        candidate_data = domain.candidates()

        # Dump structure/candidate data
        with open('candidate_data.pickle', 'wb') as f:
            pickle.dump(candidate_data, f)
        s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')
        logger.info("Candidates generated")

        # Set up agents and loop parameters
        agent = agent or AgentStabilityAdaBoost(
            model=MLPRegressor(hidden_layer_sizes=(84, 50)),
            n_query=10,
            hull_distance=0.2,
            exploit_fraction=1.0,
            uncertainty=True,
            alpha=0.5,
            diversify=True,
            n_estimators=20)
        analyzer = analyzer or StabilityAnalyzer(hull_distance=0.2)
        experiment = experiment or OqmdDFTonMC1(timeout=30000)
        seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")

        # Construct and start loop
        return cls(candidate_data=candidate_data,
                   agent=agent,
                   experiment=experiment,
                   analyzer=analyzer,
                   seed_data=seed_data,
                   heuristic_stopper=5,
                   s3_prefix=s3_prefix,
                   logger=logger)
Beispiel #9
0
    def from_chemsys(cls,
                     chemsys,
                     prefix="proto-dft-2/runs",
                     n_max_atoms=20,
                     agent=None,
                     analyzer=None,
                     experiment=None,
                     log_file="campaign.log",
                     cloudwatch_group="/camd/worker/dev/"):
        """
        Class factory method for constructing campaign from
        chemsys.

        Args:
            chemsys (str): chemical system for the campaign
            prefix (str): prefix for s3
            n_max_atoms (int): number of maximum atoms
            agent (Agent): agent for stability campaign
            analyzer (Analyzer): analyzer for stability campaign
            experiment (Agent): experiment for stability campaign
            log_file (str): log filename
            cloudwatch_group (str): cloudwatch group to log to

        Returns:
            (ProtoDFTCampaign): Standard proto-dft campaign from
                the chemical system

        """
        logger = logging.Logger("camd")
        logger.setLevel("INFO")
        file_handler = logging.FileHandler(log_file)
        cw_handler = CloudWatchLogHandler(log_group=cloudwatch_group,
                                          stream_name=chemsys)
        logger.addHandler(file_handler)
        logger.addHandler(cw_handler)
        logger.addHandler(logging.StreamHandler())

        logger.info(
            "Starting campaign factory from_chemsys {}".format(chemsys))
        s3_prefix = "{}/{}".format(prefix, chemsys)

        # Initialize s3
        dumpfn({
            "started": datetime.now().isoformat(),
            "version": __version__
        }, "start.json")
        s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')

        # Get structure domain
        # Check cache
        cache_key = "protosearch_cache/v1/{}/{}/candidates.pickle".format(
            chemsys, n_max_atoms)
        # TODO: create test of isfile
        if s3_key_exists(bucket=CAMD_S3_BUCKET, key=cache_key):
            logger.info("Found cached protosearch domain.")
            candidate_data = pd.read_pickle("s3://{}/{}".format(
                CAMD_S3_BUCKET, cache_key))
            logger.info("Loaded cached {}.".format(cache_key))
        else:
            logger.info(
                "Generating domain with max {} atoms.".format(n_max_atoms))
            element_list = chemsys.split('-')
            max_coeff, charge_balanced = heuristic_setup(element_list)
            domain = StructureDomain.from_bounds(
                element_list,
                charge_balanced=charge_balanced,
                n_max_atoms=n_max_atoms,
                **{'grid': range(1, max_coeff)})
            candidate_data = domain.candidates()
            logger.info("Candidates generated")
            candidate_data.to_pickle("s3://{}/{}".format(
                CAMD_S3_BUCKET, cache_key))
            logger.info("Cached protosearch domain at {}.".format(cache_key))

        # Dump structure/candidate data
        candidate_data.to_pickle("candidate_data.pickle")
        s3_sync(s3_bucket=CAMD_S3_BUCKET, s3_prefix=s3_prefix, sync_path='.')

        # Set up agents and loop parameters
        agent = agent or AgentStabilityAdaBoost(
            model=MLPRegressor(hidden_layer_sizes=(84, 50)),
            n_query=10,
            hull_distance=0.2,
            exploit_fraction=1.0,
            uncertainty=True,
            alpha=0.5,
            diversify=True,
            n_estimators=20)
        analyzer = analyzer or StabilityAnalyzer(hull_distance=0.2)
        experiment = experiment or OqmdDFTonMC1(timeout=30000,
                                                prefix_append="proto-dft")
        seed_data = load_dataframe("oqmd1.2_exp_based_entries_featurized_v2")

        # Load cached experiments
        logger.info("Loading cached experiments")
        cached_experiments = experiment.fetch_cached(candidate_data)
        logger.info("Found {} experiments.".format(len(cached_experiments)))
        if len(cached_experiments) > 0:
            summary, seed_data = analyzer.analyze(cached_experiments,
                                                  seed_data)
            # Remove cached experiments from candidate_data
            candidate_space = candidate_data.index.difference(
                cached_experiments.index, sort=False).tolist()
            candidate_data = candidate_data.loc[candidate_space]
            logger.info("Cached experiments added to seed.")

        # Construct and start loop
        return cls(candidate_data=candidate_data,
                   agent=agent,
                   experiment=experiment,
                   analyzer=analyzer,
                   seed_data=seed_data,
                   heuristic_stopper=5,
                   s3_prefix=s3_prefix,
                   logger=logger)