# Pickling data ###########################################
import os; import pickle
directory = os.path.join(
    os.environ["PROJ_irox_oer"],
    "dft_workflow/job_analysis/create_oh_slabs",
    "out_data")
if not os.path.exists(directory): os.makedirs(directory)
with open(os.path.join(directory, "df_slabs_oh.pickle"), "wb") as fle:
    pickle.dump(df_slabs_oh, fle)
# #########################################################

# +
from methods import get_df_slabs_oh

df_slabs_oh_tmp = get_df_slabs_oh()
# df_slabs_oh_tmp
# -

# #########################################################
print(20 * "# # ")
print("All done!")
print("Run time:", np.round((time.time() - ti) / 60, 3), "min")
print("create_oh_slabs.ipynb")
print(20 * "# # ")
# #########################################################

# + active=""
#
#
#
    get_df_init_slabs,
    get_df_magmoms,
)

# # Read Data

df_dft = get_df_dft()
df_job_ids = get_df_job_ids()
df_jobs = get_df_jobs(exclude_wsl_paths=True)
df_jobs_data = get_df_jobs_data(exclude_wsl_paths=True)
df_jobs_data_clusters = get_df_jobs_data_clusters()
df_slab = get_df_slab()
df_slab_ids = get_df_slab_ids()
df_jobs_anal = get_df_jobs_anal()
df_jobs_paths = get_df_jobs_paths()
df_slabs_oh = get_df_slabs_oh()
df_init_slabs = get_df_init_slabs()
df_magmoms = get_df_magmoms()

# # Writing finished *O slabs to file

# +
df_jobs_anal_i = df_jobs_anal[df_jobs_anal.job_completely_done == True]

var = "o"
df_jobs_anal_i = df_jobs_anal_i.query('ads == @var')

for i_cnt, (name_i, row_i) in enumerate(df_jobs_anal_i.iterrows()):

    # #####################################################
    compenv_i = name_i[0]