def get_all_active_sites( slab=None, slab_id=None, bulk_id=None, df_coord_slab_i=None, ): """ """ #| - get_all_active_sites data_dict_i = dict() #| - Collecting df_coord objects if df_coord_slab_i is None: # ##################################################### df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab") df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk") #__| # ######################################################### def method(row_i, metal_elem=None): neighbor_count = row_i.neighbor_count elem_num = neighbor_count.get(metal_elem, None) return (elem_num) df_i = df_coord_bulk_i df_i["num_metal"] = df_i.apply(method, axis=1, metal_elem="Ir") df_i = df_coord_slab_i df_i["num_metal"] = df_i.apply(method, axis=1, metal_elem="Ir") # ######################################################### # mean_O_metal_coord = mean_O_metal_coord(df_coord=df_coord_bulk_i) dz = 4 positions = slab.positions z_min = np.min(positions[:, 2]) z_max = np.max(positions[:, 2]) # ######################################################### active_sites = [] for atom in slab: if atom.symbol == "O": if atom.position[2] > z_max - dz: df_row_i = df_coord_slab_i[df_coord_slab_i.structure_index == atom.index] df_row_i = df_row_i.iloc[0] num_metal = df_row_i.num_metal if num_metal == 1: active_sites.append(atom.index) data_dict_i["active_sites"] = active_sites data_dict_i["num_active_sites"] = len(active_sites) return (active_sites)
def get_bare_o_from_oh( compenv=None, slab_id=None, active_site=None, att_num=None, atoms=None, ): """ """ #| - get_bare_o_from_oh # ##################################################### compenv_i = compenv slab_id_i = slab_id active_site_i = active_site att_num_i = att_num # ##################################################### name_i = (compenv_i, slab_id_i, "oh", active_site_i, att_num_i, ) df_coord_i = get_df_coord( slab_id=None, bulk_id=None, mode="post-dft", # 'bulk', 'slab', 'post-dft' slab=None, post_dft_name_tuple=name_i, porous_adjustment=True, ) row_coord_i = df_coord_i[df_coord_i.element == "H"] mess_i = "isdjfisdif" assert row_coord_i.shape[0] == 1, mess_i row_coord_i = row_coord_i.iloc[0] h_index_i = row_coord_i.structure_index nn_info_i = row_coord_i.nn_info # mess_i = "Should only be 1 *O atom attached to *H here" # assert(len(nn_info_i)) == 1, mess_i #| - Reading df_coord with porous_adjustment turned off if len(nn_info_i) != 1: name_i = (compenv_i, slab_id_i, "oh", active_site_i, att_num_i, ) df_coord_i = get_df_coord( slab_id=None, bulk_id=None, mode="post-dft", # 'bulk', 'slab', 'post-dft' slab=None, post_dft_name_tuple=name_i, porous_adjustment=False, ) row_coord_i = df_coord_i[df_coord_i.element == "H"] mess_i = "isdjfisdif" assert row_coord_i.shape[0] == 1, mess_i row_coord_i = row_coord_i.iloc[0] h_index_i = row_coord_i.structure_index nn_info_i = row_coord_i.nn_info mess_i = "Should only be 1 *O atom attached to *H here" assert(len(nn_info_i)) == 1, mess_i #__| nn_info_j = nn_info_i[0] site_j = nn_info_j["site"] elem_j = site_j.specie.as_dict()["element"] mess_i = "Must be an *O atom that *H is attached to" assert elem_j == "O", mess_i site_index_j = nn_info_j["site_index"] # ######################################################### # ######################################################### # ######################################################### # ######################################################### # ######################################################### # atoms = atoms_i atoms_new = copy.deepcopy(atoms) # ######################################################### indices_to_remove = [site_index_j, h_index_i] mask = [] for atom in atoms_new: if atom.index in indices_to_remove: mask.append(True) else: mask.append(False) del atoms_new[mask] atoms_bare = atoms_new # ######################################################### atoms_new = copy.deepcopy(atoms) indices_to_remove = [h_index_i, ] mask = [] for atom in atoms_new: if atom.index in indices_to_remove: mask.append(True) else: mask.append(False) del atoms_new[mask] atoms_O = atoms_new # ##################################################### out_dict = dict() # ##################################################### out_dict["atoms_bare"] = atoms_bare out_dict["atoms_O"] = atoms_O # ##################################################### return(out_dict)
# ##################################################### name_i = (compenv_i, slab_id_i, ads_i, active_site_i, att_num_i, ) # ##################################################### # ##################################################### for site_i in active_sites_unique_i: # print("site_i:", site_i) # ################################################# # ################################################# df_coord_i = get_df_coord( mode="post-dft", # 'bulk', 'slab', 'post-dft' post_dft_name_tuple=name_i, ) oh_slabs_list = get_ads_pos_oh( atoms=atoms, site_i=site_i, df_coord_i=df_coord_i, # ######################### include_colinear=True, verbose=False, num_side_ads=3, ) for att_num_oh_j, slab_oh_j in enumerate(oh_slabs_list): # ############################################# data_dict_i = dict()
metal_atom_symbol=metal_atom_symbol # + jupyter={"outputs_hidden": true} # def get_unique_active_sites( # slab=None, # active_sites=None, # bulk_id=None, # facet=None, # slab_id=None, # metal_atom_symbol=None, # ): """ """ #| - get_unique_active_sites df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab") df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk") # ######################################################### custom_name_pre = bulk_id + "__" + facet + "__" + slab_id df_rdf_dict = dict() for i in active_sites: print("active_site:", i) df_rdf_i = process_rdf( atoms=slab, active_site_i=i, df_coord_slab_i=df_coord_slab_i,
def get_effective_ox_state( name=None, active_site=None, df_coord_i=None, metal_atom_symbol="Ir", active_site_original=None, verbose=True, ): """ """ #| - get_effective_ox_state # ######################################################### name_i = name active_site_j = active_site # ######################################################### compenv_i = name_i[0] slab_id_i = name_i[1] ads_i = name_i[2] active_site_i = name_i[3] att_num_i = name_i[4] # ######################################################### # ######################################################### #| - Processing central Ir atom nn_info df_coord_i = df_coord_i.set_index("structure_index", drop=False) import os import sys import pickle # row_coord_i = df_coord_i.loc[21] row_coord_i = df_coord_i.loc[active_site_j] nn_info_i = row_coord_i.nn_info neighbor_count_i = row_coord_i.neighbor_count num_Ir_neigh = neighbor_count_i.get("Ir", 0) if num_Ir_neigh != 1: if verbose: mess_i = "TEMP | For now only deal with active sites that have 1 Ir neighbor" print(mess_i) # ################################################# out_dict = dict() # ################################################# out_dict["effective_ox_state"] = None out_dict["used_unrelaxed_df_coord"] = None out_dict["num_missing_Os"] = None out_dict["orig_slab_good"] = None out_dict["found_active_Ir"] = False # ################################################# return (out_dict) # ################################################# for j_cnt, nn_j in enumerate(nn_info_i): site_j = nn_j["site"] elem_j = site_j.as_dict()["species"][0]["element"] if elem_j == metal_atom_symbol: corr_j_cnt = j_cnt site_j = nn_info_i[corr_j_cnt] metal_index = site_j["site_index"] #__| # ######################################################### row_coord_i = df_coord_i.loc[metal_index] neighbor_count_i = row_coord_i["neighbor_count"] nn_info_i = row_coord_i.nn_info num_neighbors_i = row_coord_i.num_neighbors num_O_neigh = neighbor_count_i.get("O", 0) six_O_neigh = num_O_neigh == 6 mess_i = "There should be exactly 6 oxygens about the Ir atom" six_neigh = num_neighbors_i == 6 mess_i = "Only 6 neighbors total is allowed, all oxygens" skip_this_sys = False if not six_O_neigh or not six_neigh: # print("Skip this sys") skip_this_sys = True #| - If missing some O's then go back to slab before DFT and get missing O from methods import get_df_coord init_slab_name_tuple_i = ( compenv_i, slab_id_i, ads_i, active_site_original, att_num_i, ) df_coord_orig_slab = get_df_coord( mode="init-slab", init_slab_name_tuple=init_slab_name_tuple_i, ) orig_slab_good_i = original_slab_is_good( nn_info=nn_info_i, metal_index=metal_index, df_coord_orig_slab=df_coord_orig_slab, ) num_missing_Os = 0 used_unrelaxed_df_coord = False if not six_O_neigh: used_unrelaxed_df_coord = True from methods import get_df_coord init_slab_name_tuple_i = ( compenv_i, slab_id_i, ads_i, active_site_original, att_num_i, # active_site_i, att_num_i, ) df_coord_orig_slab = get_df_coord( mode="init-slab", init_slab_name_tuple=init_slab_name_tuple_i, ) out_dict_0 = find_missing_O_neigh_with_init_df_coord( nn_info=nn_info_i, # slab_id=slab_id_i, metal_index=metal_index, df_coord_orig_slab=df_coord_orig_slab, ) new_nn_info_i = out_dict_0["nn_info"] num_missing_Os = out_dict_0["num_missing_Os"] orig_slab_good_i = out_dict_0["orig_slab_good"] nn_info_i = new_nn_info_i if new_nn_info_i is not None: skip_this_sys = False else: skip_this_sys = True #__| # ##################################################### effective_ox_state = None if not skip_this_sys: #| - Iterating through 6 oxygens orig_df_coord_was_used = False second_shell_coord_list = [] tmp_list = [] for nn_j in nn_info_i: site_index = nn_j["site_index"] #| - Fixing bond number of missing *O # If Ir was missing *O bond, then neigh count for that O will be undercounted # Although sometimes even through the Ir is missing the *O, the *O is not missing the Ir # Happened for this system: ('slac', 'ralutiwa_59', 'o', 31.0, 1) from_orig_df_coord = nn_j.get("from_orig_df_coord", False) active_metal_in_nn_list = False if from_orig_df_coord: orig_df_coord_was_used = True Ir_neigh_adjustment = 1 for i in df_coord_i.loc[site_index].nn_info: if i["site_index"] == metal_index: active_metal_in_nn_list = True if active_metal_in_nn_list: Ir_neigh_adjustment = 0 else: Ir_neigh_adjustment = 0 #__| oxy_ind = site_index num_metal_neigh_2 = get_num_metal_neigh_manually( oxy_ind, df_coord=df_coord_i, metal_atom_symbol=metal_atom_symbol) # ################################################# #| - Checking manually the discrepency if False: row_coord_j = df_coord_i.loc[site_index] neighbor_count_j = row_coord_j.neighbor_count # TODO | IMPORTANT # We should check manually the previous structure for Ir neighbors # Also we should check if the 'lost' Ir-O bonds are good or are completely bad num_Ir_neigh_j = neighbor_count_j.get("Ir", 0) num_Ir_neigh_j += Ir_neigh_adjustment if num_Ir_neigh_j != num_metal_neigh_2: if Ir_neigh_adjustment == 0: # print("") print("name:", name) print( "oxy_ind:", oxy_ind, "|", "Original num Ir Neigh: ", num_Ir_neigh_j, "|", "New num Ir Neigh: ", num_metal_neigh_2, "|", "Ir adjustment:", Ir_neigh_adjustment, "|", "orig_df_coord_was_used:", orig_df_coord_was_used, "|", ) # I shouldn't have to do this, but we know that there is at least 1 Ir-O bond (to the active Ir) so we'll just manually set it here if num_Ir_neigh_j == 0: num_Ir_neigh_j = 1 #__| num_metal_neigh_2 += Ir_neigh_adjustment # second_shell_coord_list.append(num_Ir_neigh_j) # tmp_list.append(2 / num_Ir_neigh_j) tmp_list.append(2 / num_metal_neigh_2) second_shell_coord_list.append(num_metal_neigh_2) # second_shell_coord_list effective_ox_state = np.sum(tmp_list) #__| # ##################################################### out_dict = dict() # ##################################################### out_dict["effective_ox_state"] = effective_ox_state out_dict["used_unrelaxed_df_coord"] = used_unrelaxed_df_coord out_dict["num_missing_Os"] = num_missing_Os out_dict["orig_slab_good"] = orig_slab_good_i out_dict["found_active_Ir"] = True # ##################################################### return (out_dict)
# ##################################################### row_atoms_i = df_atoms_sorted_ind.loc[name_new_i] # ##################################################### magmoms_i = row_atoms_i.magmoms_sorted_good atoms_i = row_atoms_i.atoms_sorted_good # ##################################################### if magmoms_i is None: magmoms_i = atoms_i.get_magnetic_moments() magmom_active_site_i = magmoms_i[int(active_site_i)] init_name_i = (compenv_i, slab_id_i, "o", "NaN", 1) df_coord_i = get_df_coord( mode='init-slab', init_slab_name_tuple=init_name_i, ) row_coord_i = df_coord_i.loc[active_site_i] Ir_nn_found = False nn_Ir = None for nn_i in row_coord_i["nn_info"]: symbol_i = nn_i["site"].specie.symbol if symbol_i == "Ir": nn_Ir = nn_i Ir_nn_found = True Ir_bader_charge_i = None if Ir_nn_found: Ir_index = nn_Ir["site_index"]
# ##################################################### # name_dict_i = dict(zip(list(df_jobs_anal_i.index.names), name_i)) name_dict_i = dict(zip(list(df_jobs_anal_i_2.index.names), name_i)) # ##################################################### # ##################################################### row_atoms_sorted_i = df_atoms_sorted_ind.loc[name_i] # ##################################################### atoms_sorted_good_i = row_atoms_sorted_i.atoms_sorted_good failed_to_sort_i = row_atoms_sorted_i.failed_to_sort # ##################################################### if not failed_to_sort_i: df_coord_i = get_df_coord( mode="post-dft", # 'bulk', 'slab', 'post-dft' post_dft_name_tuple=name_i, ) if df_coord_i is None: if verbose: print("No df_coord found, running") # ################################################# # Get df_coord for post-dft, sorted slab df_coord_i = get_structure_coord_df( atoms_sorted_good_i, porous_adjustment=True, ) # Pickling data ################################### file_name_i = "_".join([str(i) for i in list(name_new_i)]) + ".pickle" file_path_i = os.path.join(directory, file_name_i)
# sep="") # ##################################################### # Get df_coord to find nearest neighbors init_slab_name_tuple_i = ( name_i[0], name_i[1], "o", # name_i[2], active_site_o_i, att_num_i ) df_coord_i = get_df_coord( mode="init-slab", # 'bulk', 'slab', 'post-dft', 'init-slab' init_slab_name_tuple=init_slab_name_tuple_i, verbose=False, ) # ##################################################### row_coord_i = df_coord_i.loc[active_site_i] # ##################################################### nn_info_i = row_coord_i.nn_info # ##################################################### # assert len(nn_info_i) == 1, "Only one bound Ir" ir_nn_present = False for j_cnt, nn_j in enumerate(nn_info_i): if nn_j["site"].specie.symbol == "Ir":
def get_unique_active_sites_temp( slab=None, active_sites=None, bulk_id=None, facet=None, slab_id=None, metal_atom_symbol=None, df_coord_slab_i=None, create_heatmap_plot=False, ): """ """ #| - get_unique_active_sites #| - __temp__ import os import pickle #__| if df_coord_slab_i is None: df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab") df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk") # ######################################################### # active_sites_i = df_active_sites[df_active_sites.slab_id == slab_id] # active_sites_i = active_sites_i.iloc[0] # # active_sites = active_sites_i.active_sites # ######################################################### custom_name_pre = bulk_id + "__" + facet + "__" + slab_id df_rdf_dict = dict() for i in active_sites: # print(i) df_rdf_i = process_rdf( atoms=slab, active_site_i=i, df_coord_slab_i=df_coord_slab_i, metal_atom_symbol=metal_atom_symbol, custom_name=custom_name_pre, ) df_rdf_dict[i] = df_rdf_i # Saving df_rdf_dict # Pickling data ########################################### # directory = "out_data/df_rdf_dict" directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/enumerate_adsorption", "out_data/df_rdf_dict", ) # assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) with open(os.path.join(directory, custom_name_pre + ".pickle"), "wb") as fle: pickle.dump(df_rdf_dict, fle) # ######################################################### # ######################################################### diff_rdf_matrix = np.empty(( len(active_sites), len(active_sites), )) diff_rdf_matrix[:] = np.nan for i_cnt, active_site_i in enumerate(active_sites): df_rdf_i = df_rdf_dict[active_site_i] for j_cnt, active_site_j in enumerate(active_sites): df_rdf_j = df_rdf_dict[active_site_j] diff_i = compare_rdf_ij( df_rdf_i=df_rdf_i, df_rdf_j=df_rdf_j, ) diff_rdf_matrix[i_cnt, j_cnt] = diff_i # ######################################################### df_rdf_ij = pd.DataFrame(diff_rdf_matrix, columns=active_sites) df_rdf_ij.index = active_sites # Pickling data ########################################### # directory = "out_data/df_rdf_ij" directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/enumerate_adsorption", "out_data/df_rdf_ij", ) # assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) with open(os.path.join(directory, custom_name_pre + ".pickle"), "wb") as fle: pickle.dump(df_rdf_ij, fle) # ######################################################### # ######################################################### active_sites_cpy = copy.deepcopy(active_sites) diff_threshold = 0.2 duplicate_active_sites = [] for active_site_i in active_sites: if active_site_i in duplicate_active_sites: continue for active_site_j in active_sites: if active_site_i == active_site_j: continue diff_ij = df_rdf_ij.loc[active_site_i, active_site_j] if diff_ij < diff_threshold: try: active_sites_cpy.remove(active_site_j) duplicate_active_sites.append(active_site_j) except: pass active_sites_unique = active_sites_cpy # ######################################################### #| - Creating Figure # print("TEMP isjdfjsd8sfs8d") # print("create_heatmap_plot:", create_heatmap_plot) if create_heatmap_plot: # print("SIDJFIDISJFIDSIFI") import plotly.express as px import plotly.graph_objects as go active_sites_str = [str(i) for i in active_sites] fig = go.Figure(data=go.Heatmap( z=df_rdf_ij.to_numpy(), x=active_sites_str, y=active_sites_str, xgap=3, ygap=3, # type="category", )) fig["layout"]["xaxis"]["type"] = "category" fig["layout"]["yaxis"]["type"] = "category" # fig.show() # directory = "out_plot/rdf_heat_maps_1" directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/enumerate_adsorption", "out_data/rdf_heat_maps_1", ) # assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) # from plotting.my_plotly import my_plotly_plot # file_name = "rdf_heat_maps_1/" + custom_name_pre + "_rdf_diff_heat_map" # file_name = os.path.join( # "/".join(directory.split("/")[1:]), # custom_name_pre + "_rdf_diff_heat_map", # ) save_dir = os.path.join("/".join(directory.split("/")[1:]), # custom_name_pre + "_rdf_diff_heat_map", ) file_name = custom_name_pre + "_rdf_diff_heat_map" print(file_name) my_plotly_plot( figure=fig, save_dir=save_dir, place_in_out_plot=False, # plot_name="rdf_heat_maps/rdf_diff_heat_map", plot_name=file_name, write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) #__| return (active_sites_unique)
def get_unique_active_sites( slab=None, active_sites=None, bulk_id=None, facet=None, slab_id=None, metal_atom_symbol=None, ): """ """ #| - get_unique_active_sites df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab") df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk") # ######################################################### # active_sites_i = df_active_sites[df_active_sites.slab_id == slab_id] # active_sites_i = active_sites_i.iloc[0] # # active_sites = active_sites_i.active_sites # ######################################################### custom_name_pre = bulk_id + "__" + facet + "__" + slab_id df_rdf_dict = dict() for i in active_sites: # print(i) df_rdf_i = process_rdf( atoms=slab, active_site_i=i, df_coord_slab_i=df_coord_slab_i, metal_atom_symbol=metal_atom_symbol, custom_name=custom_name_pre, ) df_rdf_dict[i] = df_rdf_i # ######################################################### diff_rdf_matrix = np.empty(( len(active_sites), len(active_sites), )) diff_rdf_matrix[:] = np.nan for i_cnt, active_site_i in enumerate(active_sites): df_rdf_i = df_rdf_dict[active_site_i] for j_cnt, active_site_j in enumerate(active_sites): df_rdf_j = df_rdf_dict[active_site_j] diff_i = compare_rdf_ij( df_rdf_i=df_rdf_i, df_rdf_j=df_rdf_j, ) diff_rdf_matrix[i_cnt, j_cnt] = diff_i # ######################################################### df_rdf_ij = pd.DataFrame(diff_rdf_matrix, columns=active_sites) df_rdf_ij.index = active_sites # ######################################################### active_sites_cpy = copy.deepcopy(active_sites) diff_threshold = 0.3 duplicate_active_sites = [] for active_site_i in active_sites: if active_site_i in duplicate_active_sites: continue for active_site_j in active_sites: if active_site_i == active_site_j: continue diff_ij = df_rdf_ij.loc[active_site_i, active_site_j] if diff_ij < diff_threshold: try: active_sites_cpy.remove(active_site_j) duplicate_active_sites.append(active_site_j) except: pass active_sites_unique = active_sites_cpy # ######################################################### #| - Plotting heat map active_sites_str = [str(i) for i in active_sites] fig = go.Figure(data=go.Heatmap( z=df_rdf_ij.to_numpy(), x=active_sites_str, y=active_sites_str, # type="category", )) fig["layout"]["xaxis"]["type"] = "category" fig["layout"]["yaxis"]["type"] = "category" directory = "out_plot/rdf_heat_maps_1" assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) file_name = "rdf_heat_maps/" + custom_name_pre + "_rdf_diff_heat_map" my_plotly_plot( figure=fig, plot_name=file_name, write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) #__| return (active_sites_unique)
def get_data_for_Bader_methods( path=None, df_atoms_sorted_ind=None, compenv=None, slab_id=None, ads=None, active_site=None, att_num_bader=None, verbose=None, ): """ This is pre-work for get_active_Bader_charges_1 and get_active_Bader_charges_2 """ #| - get_active_Bader_charges_1 # file_i = "bader_charge.json" # # file_path_i = os.path.join(path, file_i) # # file_path_i = os.path.join(dir_i, file_i) # atoms_bader_i = io.read(file_path_i) # if False: # atoms_bader_i.write("__temp__/atoms_bader.json") # Get the new active site number to use (atoms objects get shuffled around) # ############################################# row_atoms_i = df_atoms_sorted_ind.loc[( "dos_bader", compenv, slab_id, ads, active_site, att_num_bader, )] # ############################################# atom_index_mapping_i = row_atoms_i.atom_index_mapping # ############################################# atom_index_mapping_i = {v: k for k, v in atom_index_mapping_i.items()} new_active_site = atom_index_mapping_i[active_site] # new_active_site = new_active_site + 1 # active_O_bader_i = atoms_bader_i[new_active_site].charge init_slab_tuple = ( compenv, slab_id, ads, "NaN", 1, ) df_coord_i = get_df_coord( mode="init-slab", # 'bulk', 'slab', 'post-dft', 'init-slab' init_slab_name_tuple=init_slab_tuple, verbose=verbose, ) row_coord_i = df_coord_i.loc[active_site] Ir_nn_found = False nn_Ir = None for nn_i in row_coord_i["nn_info"]: symbol_i = nn_i["site"].specie.symbol if symbol_i == "Ir": nn_Ir = nn_i Ir_nn_found = True Ir_bader_charge_i = None if Ir_nn_found: Ir_index = nn_Ir["site_index"] # Ir_bader_i = atoms_bader_i[ # atom_index_mapping_i[Ir_index] # ].charge else: print("Ir not found") # ##################################################### out_dict = dict() # ##################################################### out_dict["active_Ir_index"] = Ir_index out_dict["active_Ir_index_mapped"] = atom_index_mapping_i[Ir_index] out_dict["new_active_site"] = new_active_site # ##################################################### return (out_dict)
atoms_init_i = row_data_i.init_atoms # ######################################################### from_oh_i = row_data_i.rerun_from_oh if np.isnan(from_oh_i): from_oh_i = False active_site_orig_i = "NaN" df_coord_i = get_df_coord( mode="init-slab", # 'bulk', 'slab', 'post-dft', 'init-slab' init_slab_name_tuple=( compenv_i, slab_id_i, ads_i, active_site_orig_i, att_num_i, ), verbose=True, ) for active_site_i in group["active_site"].tolist(): # for active_site_i in [95.0, ]: name_tmp_i = ( compenv_i, slab_id_i, ads_i, active_site_i, att_num_i, from_oh_i, ) row_octa_info_i = df_octa_info.loc[name_tmp_i] octahedra_atoms_i = row_octa_info_i.octahedra_atoms metal_active_site_i = row_octa_info_i.metal_active_site
def get_effective_ox_state__test( name=None, active_site=None, df_coord_i=None, metal_atom_symbol="Ir", active_site_original=None, ): """ """ #| - get_effective_ox_state # ######################################################### name_i = name active_site_j = active_site # ######################################################### compenv_i = name_i[0] slab_id_i = name_i[1] ads_i = name_i[2] active_site_i = name_i[3] att_num_i = name_i[4] # ######################################################### # ######################################################### #| - Processing central Ir atom nn_info df_coord_i = df_coord_i.set_index("structure_index", drop=False) import os import sys import pickle # row_coord_i = df_coord_i.loc[21] row_coord_i = df_coord_i.loc[active_site_j] nn_info_i = row_coord_i.nn_info neighbor_count_i = row_coord_i.neighbor_count num_Ir_neigh = neighbor_count_i.get("Ir", 0) mess_i = "For now only deal with active sites that have 1 Ir neighbor" # print("num_Ir_neigh:", num_Ir_neigh) assert num_Ir_neigh == 1, mess_i for j_cnt, nn_j in enumerate(nn_info_i): site_j = nn_j["site"] elem_j = site_j.as_dict()["species"][0]["element"] if elem_j == metal_atom_symbol: corr_j_cnt = j_cnt site_j = nn_info_i[corr_j_cnt] metal_index = site_j["site_index"] #__| # ######################################################### row_coord_i = df_coord_i.loc[metal_index] neighbor_count_i = row_coord_i["neighbor_count"] nn_info_i = row_coord_i.nn_info num_neighbors_i = row_coord_i.num_neighbors num_O_neigh = neighbor_count_i.get("O", 0) six_O_neigh = num_O_neigh == 6 mess_i = "There should be exactly 6 oxygens about the Ir atom" # assert six_O_neigh, mess_i six_neigh = num_neighbors_i == 6 mess_i = "Only 6 neighbors total is allowed, all oxygens" # assert six_neigh, mess_i skip_this_sys = False if not six_O_neigh or not six_neigh: skip_this_sys = True from methods import get_df_coord init_slab_name_tuple_i = ( compenv_i, slab_id_i, ads_i, active_site_original, att_num_i, ) # print("init_slab_name_tuple_i:", init_slab_name_tuple_i) df_coord_orig_slab = get_df_coord( mode="init-slab", init_slab_name_tuple=init_slab_name_tuple_i, ) orig_slab_good_i = original_slab_is_good( nn_info=nn_info_i, # slab_id=None, metal_index=metal_index, df_coord_orig_slab=df_coord_orig_slab, ) num_missing_Os = 0 used_unrelaxed_df_coord = False if not six_O_neigh: used_unrelaxed_df_coord = True from methods import get_df_coord init_slab_name_tuple_i = ( compenv_i, slab_id_i, ads_i, # active_site_i, att_num_i, active_site_original, att_num_i, ) df_coord_orig_slab = get_df_coord( mode="init-slab", init_slab_name_tuple=init_slab_name_tuple_i, ) out_dict_0 = find_missing_O_neigh_with_init_df_coord( nn_info=nn_info_i, slab_id=slab_id_i, metal_index=metal_index, df_coord_orig_slab=df_coord_orig_slab, ) new_nn_info_i = out_dict_0["nn_info"] num_missing_Os = out_dict_0["num_missing_Os"] orig_slab_good_i = out_dict_0["orig_slab_good"] nn_info_i = new_nn_info_i if new_nn_info_i is not None: skip_this_sys = False else: skip_this_sys = True # ##################################################### effective_ox_state = None # if six_O_neigh and six_neigh: if not skip_this_sys: #| - Iterating through 6 oxygens second_shell_coord_list = [] tmp_list = [] # print("nn_info_i:", nn_info_i) neigh_dict = dict() for nn_j in nn_info_i: from_orig_df_coord = nn_j.get("from_orig_df_coord", False) if from_orig_df_coord: Ir_neigh_adjustment = 1 active_metal_in_nn_list = False for i in df_coord_i.loc[site_index].nn_info: if i["site_index"] == metal_index: active_metal_in_nn_list = True if active_metal_in_nn_list: Ir_neigh_adjustment = 0 else: Ir_neigh_adjustment = 0 site_index = nn_j["site_index"] row_coord_j = df_coord_i.loc[site_index] neighbor_count_j = row_coord_j.neighbor_count num_Ir_neigh_j = neighbor_count_j.get("Ir", 0) # print(site_index, "|", num_Ir_neigh_j) neigh_dict[site_index] = num_Ir_neigh_j # print("num_Ir_neigh_j:", site_index, num_Ir_neigh_j) num_Ir_neigh_j += Ir_neigh_adjustment # print("num_Ir_neigh_j:", num_Ir_neigh_j) second_shell_coord_list.append(num_Ir_neigh_j) tmp_list.append(2 / num_Ir_neigh_j) # second_shell_coord_list effective_ox_state = np.sum(tmp_list) #__| neigh_keys = list(neigh_dict.keys()) for i in np.sort(neigh_keys): print(i, "|", neigh_dict[i]) # ##################################################### out_dict = dict() # ##################################################### out_dict["effective_ox_state"] = effective_ox_state out_dict["used_unrelaxed_df_coord"] = used_unrelaxed_df_coord out_dict["num_missing_Os"] = num_missing_Os out_dict["orig_slab_good"] = orig_slab_good_i out_dict["neigh_dict"] = neigh_dict # ##################################################### return (out_dict)
# ##################################################### data_dict_i = dict() # ##################################################### row_i = df_slab.loc[slab_id] # ##################################################### slab = row_i.slab_final slab_id = row_i.name bulk_id = row_i.bulk_id facet = row_i.facet num_atoms = row_i.num_atoms # ##################################################### # ################################################# df_coord_slab_i = get_df_coord( slab_id=slab_id, mode="slab", slab=slab, ) # ################################################# active_sites = get_all_active_sites( slab=slab, slab_id=slab_id, bulk_id=bulk_id, df_coord_slab_i=df_coord_slab_i, ) # ################################################# # active_sites_unique = get_unique_active_sites( active_sites_unique = get_unique_active_sites_temp( slab=slab,