Exemplo n.º 1
0
    def predictor_from_file(config_path, model_path):
        with open(config_path, "rb") as f:
            config_dict = json.load(f)

        if config_dict["model_type"] == "neural_bandit":
            # initialize the pytorch model and put it in `eval` mode
            model = embed_dnn.EmbedDnn(**config_dict["model_spec"])
            model.load_state_dict(torch.load(model_path))
            model.eval()
        else:
            with open(model_path, "rb") as f:
                model = pickle.load(f)

        # initialize transforms
        transforms = {}
        for feature_name, transform_spec in config_dict["transforms"].items():
            if transform_spec is None:
                # id lists don't have transforms
                transform = None
            elif transform_spec["name"] == "StandardScaler":
                transform = preprocessing.StandardScaler()
                transform.mean_ = np.array(transform_spec["mean"])
                transform.scale_ = np.array(transform_spec["scale"])
                transform.var_ = np.array(transform_spec["var"])
            elif transform_spec["name"] == "OneHotEncoder":
                transform = preprocessing.OneHotEncoder()
                transform.sparse = transform_spec["sparse"]
                transform.categories_ = np.array(transform_spec["categories"])
            else:
                raise Exception(
                    f"Don't know how to load transform_spec of type {transform_spec['name']}"
                )
            transforms[feature_name] = transform

        # initialize imputers
        imputers = {}
        for feature_name, imputer_spec in config_dict["imputers"].items():
            if imputer_spec is None:
                # categoricals & id lists don't have imputers
                imputer = None
            else:
                imputer = SimpleImputer()
                imputer.set_params(**imputer_spec["parameters"])
                imputer.statistics_ = np.array(imputer_spec["statistics"])
            imputers[feature_name] = imputer

        return BanditPredictor(
            feature_config=config_dict["feature_config"],
            float_feature_order=config_dict["float_feature_order"],
            id_feature_order=config_dict["id_feature_order"],
            id_feature_str_to_int_map=config_dict["id_feature_str_to_int_map"],
            transforms=transforms,
            imputers=imputers,
            model=model,
            model_type=config_dict["model_type"],
            reward_type=config_dict["reward_type"],
            model_spec=config_dict["model_spec"],
            dense_features_to_use=config_dict["dense_features_to_use"],
        )
Exemplo n.º 2
0
def predict_catkit_demo(images):
    """Return a prediction of adsorption energies for structures generated with
    CatKitDemo.

    Parameters
    ----------
    images : list
        List of atoms objects representing adsorbate-surface structures.
    model : str
        Path and filename of Catlearn model pickle.
    """
    model_ref = {'H': 'H2', 'O': 'H2O, H2', 'C': 'CH4, H2'}

    # Make list of strings showing the references.
    display_ref = []
    for atoms in images:
        try:
            initial_state = [
                model_ref[s] for s in ase.atoms.string2symbols(
                    atoms.info['key_value_pairs']['species'])
            ]
        except KeyError:
            return {}
        display_ref.append('*, ' + ', '.join(list(np.unique(initial_state))))

    images = autogen_info(images)

    gen = FeatureGenerator(nprocs=1)
    train_fpv = default_fingerprinters(gen, 'adsorbates')
    train_fpv = [
        gen.mean_chemisorbed_atoms, gen.count_chemisorbed_fragment,
        gen.count_ads_atoms, gen.count_ads_bonds, gen.ads_av, gen.ads_sum,
        gen.bulk, gen.term, gen.strain, gen.mean_surf_ligands, gen.mean_site,
        gen.median_site, gen.max_site, gen.min_site, gen.sum_site,
        gen.generalized_cn, gen.en_difference_ads, gen.en_difference_chemi,
        gen.en_difference_active, gen.db_size, gen.delta_energy
    ]
    matrix = gen.return_vec(images, train_fpv)

    feature_index = np.load(clean_index_name)
    clean_feature_mean = np.load(clean_mean)

    impute = SimpleImputer(strategy='mean')
    impute.statistics_ = clean_feature_mean
    new_data = impute.transform(matrix[:, feature_index])

    prediction = gp.predict(new_data,
                            get_validation_error=False,
                            get_training_error=False,
                            uncertainty=True)

    output = {
        'mean': list(prediction['prediction']),
        'uncertainty': list(prediction['uncertainty']),
        'references': display_ref
    }
    return output
Exemplo n.º 3
0
    def predictor_from_file(config_path, net_path):
        with open(config_path, "rb") as f:
            config_dict = json.load(f)

        # initialize the pytorch model and put it in `eval` mode
        net = embed_dnn.EmbedDnn(**config_dict["net_spec"])
        net.load_state_dict(torch.load(net_path))
        net.eval()

        # initialize transforms
        transforms = {}
        for feature_name, transform_spec in config_dict["transforms"].items():
            if transform_spec is None:
                # id lists don't have transforms
                transform = None
            elif transform_spec["name"] == "StandardScaler":
                transform = preprocessing.StandardScaler()
                transform.mean_ = np.array(transform_spec["mean"])
                transform.scale_ = np.array(transform_spec["scale"])
                transform.var_ = np.array(transform_spec["var"])
            elif transform_spec["name"] == "OneHotEncoder":
                transform = preprocessing.OneHotEncoder()
                transform.sparse = transform_spec["sparse"]
                transform.categories_ = np.array(transform_spec["categories"])
            else:
                raise Exception(
                    f"Don't know how to load transform_spec of type {transform_spec['name']}"
                )
            transforms[feature_name] = transform

        # initialize imputers
        imputers = {}
        for feature_name, imputer_spec in config_dict["imputers"].items():
            if imputer_spec is None:
                # id lists don't have imputers
                imputer = None
            else:
                imputer = SimpleImputer()
                imputer.set_params(**imputer_spec["parameters"])
                imputer.statistics_ = np.array(imputer_spec["statistics"])
            imputers[feature_name] = imputer

        return BanditPredictor(
            experiment_specific_params=config_dict["experiment_specific_params"],
            float_feature_order=config_dict["float_feature_order"],
            id_feature_order=config_dict["id_feature_order"],
            transforms=transforms,
            imputers=imputers,
            net=net,
            net_spec=config_dict["net_spec"],
        )