verbose = False

# # Read data

# +
# #########################################################
df_jobs = get_df_jobs()

# #########################################################
df_slab = get_df_slab()

# #########################################################
df_jobs_paths = get_df_jobs_paths()

# #########################################################
df_jobs_data = get_df_jobs_data()

# #########################################################
df_jobs_anal = get_df_jobs_anal()
df_jobs_anal_completed = df_jobs_anal[df_jobs_anal.job_completely_done == True]

# #########################################################
df_init_slabs = get_df_init_slabs()
# -

# # Removing rows that don't have the necessary files present locally
#
# Might need to download them with rclone

indices_tmp = [
    ('sherlock', 'ripirefu_15', 'bare', 62.0, 1),
Example #2
0
# -

# ### Read Data

# +
# #########################################################
df_jobs_paths = get_df_jobs_paths()

# #########################################################
df_jobs = get_df_jobs(exclude_wsl_paths=True)

# #########################################################
df_jobs_data_clusters = get_df_jobs_data_clusters()

# #########################################################
df_jobs_data_old = get_df_jobs_data(exclude_wsl_paths=True, drop_cols=False)

# #########################################################
# Checking if in local env
if compenv == "wsl":
    df_jobs_i = df_jobs
else:
    df_jobs_i = df_jobs[df_jobs.compenv == compenv]
# -

# ### Getting job state loop

# +
data_dict_list = []
for job_id_i, row_i in df_jobs_i.iterrows():
    data_dict_i = dict()
from local_methods import M
# -

from methods import isnotebook    
isnotebook_i = isnotebook()
if isnotebook_i:
    from tqdm.notebook import tqdm
    verbose = True
else:
    from tqdm import tqdm
    verbose = False

# ### Read data objects with methods

# +
df_jobs_data = get_df_jobs_data(exclude_wsl_paths=True)

df_jobs_anal = get_df_jobs_anal()

df_active_sites = get_df_active_sites()

df_atoms_sorted_ind = get_df_atoms_sorted_ind()
# -

# ### Filtering down to only `oer_adsorbate` jobs

# +
df_ind = df_jobs_anal.index.to_frame()

df_jobs_anal = df_jobs_anal.loc[
    df_ind[df_ind.job_type == "oer_adsorbate"].index
Example #4
0
# ### Script Inputs

# +
# TEST_no_file_ops = True  # True if just testing around, False for production mode
# # TEST_no_file_ops = False
# -

# ### Read Data

# +
df_jobs = get_df_jobs()
if verbose:
    print("df_jobs.shape:", 2 * "\t", df_jobs.shape)

df_jobs_data = get_df_jobs_data(drop_cols=False)
if verbose:
    print("df_jobs_data.shape:", 1 * "\t", df_jobs_data.shape)

df_jobs_paths = get_df_jobs_paths()

# +
group_cols = ["compenv", "slab_id", "att_num", "ads", "active_site"]
# group_cols = ["compenv", "slab_id", "att_num", ]
grouped = df_jobs.groupby(group_cols)
max_job_row_list = []
data_dict_list = []
for name, group in grouped:
    data_dict_i = dict()

    max_job = group[group.rev_num == group.rev_num.max()]