# Filter the jobs that were unsuccessful
df_m = df_m[[not i for i in pd.isna(df_m["elec_energy"].tolist())]]
df_m = df_m[df_m["job_type"] == "surface_coverage_energy"]

cols_to_keep = [
    'facet',
    'job_type',
    'layers',
    'surface_type',
    'elec_energy',
    'atoms_object',
    'bulk_system',
    'coverage_type',
]

df_m = drop_columns(df=df_m, columns=cols_to_keep, keep_or_drop="keep")

# #############################################################################
# Read bulk data ##############################################################
with open(oer_bulk_structures_path, "rb") as fle:
    bulk_data = pickle.load(fle)

# + attributes={"classes": [], "id": "", "n": "5"}
path_i = os.path.join(os.environ["PROJ_irox"],
                      "workflow/07_bulk_pourbaix/01_pourbaix_scripts",
                      "out_data/bulk_pourb_transitions.pickle")
with open(path_i, "rb") as fle:
    pourb_trans = pickle.load(fle)

pourb_trans
# -
Ejemplo n.º 2
0
# df_ccf = DF_dict["df_ccf"]
df_dij = DF_dict["df_dij"]
# ids_to_discard__too_many_atoms = DF_dict["ids_to_discard__too_many_atoms"]
# df_dft_final_final = DF_dict["df_dft_final_final"]

df_prototype_dft_path = DF_dict["df_prototype_dft"]
df_prototype_static_path = DF_dict["df_prototype_static"]

# +
from misc_modules.pandas_methods import drop_columns

df_prototype_dft_path = drop_columns(
    df=df_prototype_dft_path,
    columns=[
        # 'p_name',
        'structure_name',
        # 'spacegroup',
        'wyckoffs',
        'species',
    ],
    keep_or_drop="drop")

df_bulk_dft = drop_columns(
    df=df_bulk_dft,
    columns=[
        "atoms",
        "form_e_chris",
        "id",
        "id_old",
        "path",
        "volume",
        # "",
Ejemplo n.º 3
0
df_m.loc[df_m["coverage_type"] == "O-2_OH-2", "coverage_type"] = "h_covered"

# #############################################################################
# Drop unnecessary columns from dataframe #####################################
drop_cols = [
    'bulk_system',
    'facet',
    'adsorbate',
    'coverage_type',
    'ads_e',
    'elec_energy',
    'surface_type',
    "ooh_direction",
]

df_m = drop_columns(df=df_m, columns=drop_cols, keep_or_drop="keep")

# Resetting index to have unique id to pass to OXR module
df_m = df_m.reset_index()
# __|
# -

# # | - Fitting only O-covered data

# +
df_o = df_m[df_m["coverage_type"] == "o_covered"]
df_m_tmp = df_o

ORR_PLT = ORR_Free_E_Plot(
    free_energy_df=None,
    state_title="adsorbate",
df = drop_columns(df=df, keep_or_drop="keep",
    columns=[

        "bulk_system",
        "facet",
        "adsorbate",
        "coverage_type",
        # "ooh_direction",
        "ads_e",
        # "elec_energy",
        # "total_magmom",
        # "abs_magmom",
        # "path_short",
        # "name_i",
        # "max_force",
        # "sum_force",
        # "elem_num_dict",
        # "incar_parsed",
        # "init_atoms",
        # "atoms_object",
        # "N_atoms",
        # "dipole_correction",
        # "path",
        # "name_i_2",
        # "name_i_3",
        # "priority",
        "surface_type",

        ],
    )
data_list = []
grouped = df_iro2_long.groupby(["id"])
for name, group in grouped:

    # df_succ = group[group["job_state"] == "SUCCEEDED"]
    df_succ = group[group["completed"] == True]
    isif_2_done = 2 in df_succ["isif"].tolist()

    if len(df_succ) > 0 and isif_2_done:
        latest_succ_rev = df_succ.sort_values("revision").iloc[-1]
        data_list.append(latest_succ_rev)
df_iro2 = pd.DataFrame(data_list)

# Droping all unnecessary columns
df_iro2 = drop_columns(df=df_iro2,
                       columns=["atoms", "path"],
                       keep_or_drop="keep")

# Adding stoich column
df_iro2["stoich"] = "AB2"
# -

# # IrO3 Bulk Data

# +
# #############################################################################
# Parsing NERSC IrO3 DFT Data
path_i = os.path.join(os.environ["PROJ_DATA"],
                      "04_IrOx_surfaces_OER/ml_bulk_irox_dft/iro3",
                      "df_dict_nersc.pickle")
# "df_dict.pickle")
Ejemplo n.º 6
0
# # Main Loop

# +
from misc_modules.pandas_methods import drop_columns

# print(list(df_jobs_anal.columns))

# ['job_id_max', 'timed_out', 'completed', 'brmix_issue', 'job_understandable', 'decision', 'dft_params_new', 'job_completely_done']
cols_to_keep = [
    "job_id_max",
    "job_completely_done",
]
df_jobs_anal_i = drop_columns(
    df=df_jobs_anal,
    columns=cols_to_keep,
    keep_or_drop="keep",
)

# +
# assert False

# +
index_keys = list(df_jobs_anal_i.index.names)

# #########################################################
data_dict_list = []
# #########################################################
for index_i, row_i in df_jobs_anal_i.iterrows():
    # #####################################################
    data_dict_i = dict()
Ejemplo n.º 7
0
# #########################################################
dir_i = os.path.join(os.environ["PROJ_irox_oer"],
                     "workflow/process_bulk_dft/standardize_bulks", "out_data")
file_name_i = os.path.join(dir_i, "df_dft_stan.pickle")
with open(file_name_i, "rb") as fle:
    df_dft_stan = pickle.load(fle)

# +
# #####################################################
df_dft = drop_columns(df=df_dft,
                      columns=[
                          "atoms",
                          "form_e_chris",
                          "id_old",
                          "path",
                          "id",
                          "source",
                          "energy",
                      ],
                      keep_or_drop="drop")

df_dft = df_dft.sort_values("dH")
# -

# Create new atoms column from `atoms_dict`

df_dft["atoms"] = df_dft.index.map(atoms_dict)

# +
# df_dft.drop(columns=["num_atoms", "", ])