from layout__v1 import layout, axis_num_list

from plotting.my_plotly import my_plotly_plot, get_xy_axis_info

# #############################################################################
from IPython.display import Image
# -

# ## Read data

# + attributes={"classes": [], "id": "", "n": "4"}
dataframe_dir = data_dir

df_pourbaix, df_ads, df_surf = load_df(from_file=True,
                                       root_dir=dataframe_dir,
                                       data_dir=dataframe_dir,
                                       file_name="df_master.pickle",
                                       process_df=True)
df_m = df_surf

# Filter the jobs that were unsuccessful
df_m = df_m[[not i for i in pd.isna(df_m["elec_energy"].tolist())]]
df_m = df_m[df_m["job_type"] == "surface_coverage_energy"]

cols_to_keep = [
    'facet',
    'job_type',
    'layers',
    'surface_type',
    'elec_energy',
    'atoms_object',
# # Read/Process DataFrame

# +
# /mnt/c/Users/raulf/Dropbox/01_acad_folder/01_grad_school/01_norskov/04_comp_clusters/02_DATA/04_IrOx_surfaces_OER

data_dir = os.path.join(
    os.environ["dropbox"],
    "01_acad_folder/01_grad_school/01_norskov/04_comp_clusters/02_DATA/04_IrOx_surfaces_OER",
)
# -

df_master = load_df(
    from_file=False,
    root_dir="../data",
    data_dir="../../data",
    file_name="df_master.pickle",
    process_df=False,
    filter_early_revisions=False,
)

df_master

len(df_master)

2 + 2


def parse_cpu_time(row_i):
    outcar_list = row_i.outcar
    search_lines = [i for i in outcar_list if "Total CPU time used" in i]
    if len(search_lines) == 1:
Example #3
0
    "cv_ooh": cv_ooh,
    "Umin": Umin,
    "Umax": Umax,
    "print_out": False,
    "save_dir": save_dir,
#     "file_name": "_".join(list(key_i)) + ".pdf",
    "close_plt": close_plt,
    }

# # Read/Process DataFrame

# +
df_pourbaix, df_ads, df_surf = load_df(
    from_file=False,
    root_dir="../data",
    data_dir="../data",
    file_name="df_master.pickle",
    process_df=True,
    )

# df_m = df_pourbaix

# # Elimate structures that aren't converged w.r.t. forces
# df_m = df_m[df_m["max_force"] < 0.05]

# df_m["name_i"] = df_m["name_i"].str.replace("_", " ")
# df_m["name_i"] = df_m["name_i"].str.replace("|", ",")

# grouped = df_m.groupby(["facet", "bulk_system"])
# group_dict = {}
# for i_ind, (name, group) in enumerate(grouped):
Example #4
0
# *****************************************************************************

# Local Imports ***************************************************************
# *****************************************************************************
from an_data_processing import load_df

# Project Data
from proj_data_irox import (
    data_dir,
    )
# -

df_pourbaix, df_ads, df_surf = load_df(
    from_file=False,
    root_dir=data_dir,
    data_dir=data_dir + "/190103_new_job_df",
    file_name="df_master.pickle",
    process_df=True,
    )

# +
df_oh = df_ads[df_ads["adsorbate"] == "oh"]


atoms_list = []
for traj_i in df_oh["atoms_object"].tolist():
    atoms_list.append(traj_i[-1])
# -

view(atoms_list)
Example #5
0
import pandas as pd
pd.set_option("display.max_columns", None)
pd.set_option('display.max_rows', None)

from ase import io
from ase.visualize import view

import numpy as np
# -

# # Read/Process DataFrame

df_pourbaix, df_ads, df_surf = load_df(
    from_file=False,
    root_dir=os.path.join(os.environ["PROJ_irox"], "workflow/data"),
    data_dir=os.path.join(os.environ["PROJ_irox"], "workflow/data"),

    file_name="df_master.pickle",
    process_df=True,
    )

df_surf

from vasp.vasp_methods import parse_incar

parse_incar(df_ads.iloc[0].incar)

df_ads.iloc[0].incar_parsed

#  
#   
#