def get_unique_active_sites( slab=None, active_sites=None, bulk_id=None, facet=None, slab_id=None, metal_atom_symbol=None, ): """ """ #| - get_unique_active_sites df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab") df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk") # ######################################################### # active_sites_i = df_active_sites[df_active_sites.slab_id == slab_id] # active_sites_i = active_sites_i.iloc[0] # # active_sites = active_sites_i.active_sites # ######################################################### custom_name_pre = bulk_id + "__" + facet + "__" + slab_id df_rdf_dict = dict() for i in active_sites: # print(i) df_rdf_i = process_rdf( atoms=slab, active_site_i=i, df_coord_slab_i=df_coord_slab_i, metal_atom_symbol=metal_atom_symbol, custom_name=custom_name_pre, ) df_rdf_dict[i] = df_rdf_i # ######################################################### diff_rdf_matrix = np.empty(( len(active_sites), len(active_sites), )) diff_rdf_matrix[:] = np.nan for i_cnt, active_site_i in enumerate(active_sites): df_rdf_i = df_rdf_dict[active_site_i] for j_cnt, active_site_j in enumerate(active_sites): df_rdf_j = df_rdf_dict[active_site_j] diff_i = compare_rdf_ij( df_rdf_i=df_rdf_i, df_rdf_j=df_rdf_j, ) diff_rdf_matrix[i_cnt, j_cnt] = diff_i # ######################################################### df_rdf_ij = pd.DataFrame(diff_rdf_matrix, columns=active_sites) df_rdf_ij.index = active_sites # ######################################################### active_sites_cpy = copy.deepcopy(active_sites) diff_threshold = 0.3 duplicate_active_sites = [] for active_site_i in active_sites: if active_site_i in duplicate_active_sites: continue for active_site_j in active_sites: if active_site_i == active_site_j: continue diff_ij = df_rdf_ij.loc[active_site_i, active_site_j] if diff_ij < diff_threshold: try: active_sites_cpy.remove(active_site_j) duplicate_active_sites.append(active_site_j) except: pass active_sites_unique = active_sites_cpy # ######################################################### #| - Plotting heat map active_sites_str = [str(i) for i in active_sites] fig = go.Figure(data=go.Heatmap( z=df_rdf_ij.to_numpy(), x=active_sites_str, y=active_sites_str, # type="category", )) fig["layout"]["xaxis"]["type"] = "category" fig["layout"]["yaxis"]["type"] = "category" directory = "out_plot/rdf_heat_maps_1" assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) file_name = "rdf_heat_maps/" + custom_name_pre + "_rdf_diff_heat_map" my_plotly_plot( figure=fig, plot_name=file_name, write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) #__| return (active_sites_unique)
def return_modified_rdf( df_rdf=None, chunks_to_edit=None, dx=None, ): """ """ #| - TEMP df_rdf_j = df_rdf if type(chunks_to_edit) is not list: chunks_to_edit = [chunks_to_edit] # df_rdf_j = df_rdf_j.rename(columns={" g(r)": "g"}) # x-axis spacing of data dr = df_rdf_j.r.tolist()[1] - df_rdf_j.r.tolist()[0] df_i = df_rdf_j[df_rdf_j.g > 1e-5] trace = go.Scatter(x=df_i.r, y=df_i.g, mode="markers") data = [trace] fig = go.Figure(data=data) my_plotly_plot(figure=fig, plot_name="temp_rds_distr", write_html=True) # fig.show() # chunk_coord_list = [] chunk_start_coords = [] chunk_end_coords = [] row_i = df_i.iloc[0] chunk_start_coords.append(row_i.r) for i in range(1, df_i.shape[0] - 1): # ##################################################### row_i = df_i.iloc[i] row_ip1 = df_i.iloc[i + 1] row_im1 = df_i.iloc[i - 1] # ##################################################### r_i = row_i.r r_ip1 = row_ip1.r r_im1 = row_im1.r # ##################################################### # if i == 0: # chunk_coord_list.append(r_i) if r_i - r_im1 > 3 * dr: chunk_start_coords.append(r_i) if r_ip1 - r_i > 3 * dr: chunk_end_coords.append(r_i) # ######################################################### row_i = df_i.iloc[-1] chunk_end_coords.append(row_i.r) chunk_coord_list = [] for i in range(len(chunk_end_coords)): start_i = chunk_start_coords[i] end_i = chunk_end_coords[i] # print( # str(np.round(start_i, 2)).zfill(5), # str(np.round(end_i, 2)).zfill(5), # ) chunk_coord_list.append([start_i, end_i]) df_chunks_list = [] for i_cnt, chunk_i in enumerate(chunk_coord_list): # if i_cnt == chunk_to_edit: if i_cnt in chunks_to_edit: if type(dx) == list: dx_tmp = dx[i_cnt] else: dx_tmp = dx # dx_tmp = dx else: dx_tmp = 0 df_j = df_rdf_j[(df_rdf_j.r >= chunk_i[0]) & (df_rdf_j.r <= chunk_i[1])] df_j.r += dx_tmp df_chunks_list.append(df_j) import pandas as pd df_i = pd.concat(df_chunks_list) # trace = go.Scatter( # x=df_i.r, y=df_i.g, # mode="markers") # data = [trace] # fig = go.Figure(data=data) # # my_plotly_plot( # # figure=fig, # # plot_name="temp_rds_distr", # # write_html=True) # fig.show() return (df_i)
# # # # - # # 2x2 Surface Energy Pourbaix Plot # + attributes={"classes": [], "id": "", "n": "10"} # fig = make_surf_e_pourb_plot(df_m=df_m, num_cols=2, layout=layout) fig = make_surf_e_pourb_plot(df_m=df_m, num_cols=1, layout=layout) plot_name_i = "surf_e_pourbaix_irox__large" # fig = my_plotly_plot( my_plotly_plot(figure=fig, plot_name=plot_name_i, write_html=False, write_pdf=False, try_orca_write=False) # fig.show() # - # # 1x4 Surface Energy Pourbaix Plot # + attributes={"classes": [], "id": "", "n": "11"} fig = make_surf_e_pourb_plot(df_m=df_m, num_cols=1, layout=layout) # fig = make_surf_e_pourb_plot(df_m=df_m, num_cols=2, layout=layout) fig.show() # -
data.append(trace) # + fig = go.Figure(data=data) # fig.show() from plotting.my_plotly import my_plotly_plot plot_dir = "." out_plot_file = "./temp_plot" my_plotly_plot( figure=fig, # plot_name=str(active_site_i).zfill(4) + "_rdf", plot_name=out_plot_file, write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) # + # i # j # + active="" # # #
# ### Saving plot to json fig.write_json( os.path.join( os.environ["PROJ_irox_oer"], "workflow/oer_analysis/oer_scaling", "out_plot/oer_scaling__O_OH_histogram.json")) my_plotly_plot( figure=fig, save_dir=root_dir, place_in_out_plot=True, plot_name="oer_histogram_gO_gOH", write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, verbose=False, ) if show_plot: fig.show() # ### Creating combined scaling and histogram plot # + df_concat = pd.concat([ df_features_targets[("targets", "g_o", "")],
name="df_rdf_j_new", ) data.append(trace) trace = go.Scatter( x=df_rdf_j.r, y=df_rdf_j.g, name="df_rdf_j", ) data.append(trace) fig = go.Figure(data=data) from plotting.my_plotly import my_plotly_plot file_name = "__temp__/modified_and_opt_rdf_plots" my_plotly_plot( figure=fig, plot_name=file_name, write_html=True) fig.show() # + jupyter={"outputs_hidden": true} df_rdf_i = df_rdf_i df_rdf_j = df_rdf_j_new # ######################################################### r_combined = np.sort((df_rdf_j.r.tolist() + df_rdf_i.r.tolist())) r_combined = np.sort(list(set(r_combined))) df_interp_i = create_interp_df(df_rdf_i, r_combined) df_interp_j = create_interp_df(df_rdf_j, r_combined)
fig = go.Figure( data=data, layout=layout_shared_i, ) if show_plot: fig.show() # + from plotting.my_plotly import my_plotly_plot my_plotly_plot( figure=fig, plot_name="MAE_vs_PCA_comp", save_dir=root_dir, write_html=True, write_pdf=True, try_orca_write=True, ) # - # ## Plotting the best model (optimal num PCA components) # + num_pca_best = 3 # num_pca_best = 1 # num_pca_best = 11 # + data_dict_i = data_dict[num_pca_best]
# + import plotly.express as px fig = px.scatter_matrix(df_i) # print("show_plot:", show_plot) if show_plot: fig.show() # + from plotting.my_plotly import my_plotly_plot my_plotly_plot( figure=fig, save_dir=os.path.join( os.environ["PROJ_irox_oer"], # "workflow/oer_vs_features", "workflow/feature_engineering/scatter_plot_matrix", ), plot_name="scatter_matrix_plot", write_html=True, ) # - # ######################################################### print(20 * "# # ") print("All done!") print("Run time:", np.round((time.time() - ti) / 60, 3), "min") print("scatter_plot_matrix.ipynb") print(20 * "# # ") # ######################################################### # + active=""
title="Processing speed vs structure size (num atoms)", xaxis=dict(title=dict(text="Number of atoms")), yaxis=dict( title=dict(text="Processing time (min)", ), range=[-1, 40], ), ) fig.show() # - my_plotly_plot( figure=fig, plot_name="iter_speed_vs_num_atoms", write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) # # Writing the structures that are unique octahedras # + # ####################################################################### data_path = os.path.join(os.environ["PROJ_irox_oer"], "workflow/creating_slabs/selecting_bulks", "out_data/data.json") with open(data_path, "r") as fle: data = json.load(fle) # #######################################################################
print("show_plot:", show_plot) if show_plot: fig.show() # + from plotting.my_plotly import my_plotly_plot my_plotly_plot( figure=fig, save_dir=os.path.join( os.environ["PROJ_irox_oer"], "workflow/oer_vs_features", ), plot_name="scatter_matrix_plot", write_html=True, # write_png=False, # png_scale=6.0, # write_pdf=False, # write_svg=False, # try_orca_write=False, # verbose=False, ) # - # ######################################################### print(20 * "# # ") print("All done!") print("Run time:", np.round((time.time() - ti) / 60, 3), "min") print("scatter_plot_matrix.ipynb") print(20 * "# # ")
], **shared_meth_props) add_duplicate_axes(fig, axis_type='y', axis_data=shared_yaxis_data, axis_num_list=[ i, ], **shared_meth_props) # + from plotting.my_plotly import my_plotly_plot my_plotly_plot( figure=fig, plot_name="disc_vs_dft", write_html=True, write_png=False, png_scale=6.0, write_pdf=True, write_svg=False, try_orca_write=True, ) # - print(20 * "# # ") print("All done!") assert False fig.show()
fig.update_yaxes(linecolor="red", row=1, col=3) # Update first subplot to have tick props fig.update_yaxes(showticklabels=True, ticks="outside", dtick=0.5, row=1, col=1) # ######################################################### fig_al_series = copy.deepcopy(fig) my_plotly_plot( figure=fig, plot_name=stoich_i + "_" + "al_5_gens_in_row", write_html=True, # write_png=True, # png_scale=10, # write_pdf=True, ) fig.layout.update(paper_bgcolor="white") # fig.show() # ######################################################### figs_dict = { # "fig_inset": fig_inset, # "fig_main": fig_main, "fig_al_series": fig_al_series, # "fig_al_series_top10_marked": fig_al_series_top10_marked, "traces_tracking": traces_list_tracking, "num_dft_list": num_dft_list,
}}) # for trace_i in data: for trace_i in fig_cpy.data: try: trace_i.update(scatter_shared_props_cpy) except: pass # ######################################################### my_plotly_plot( figure=fig_cpy, save_dir=root_dir, place_in_out_plot=True, plot_name="00_volcano_plot__v", write_html=False, write_png=False, png_scale=6.0, write_pdf=True, write_svg=False, try_orca_write=True, verbose=True, ) # - fig.write_json( os.path.join(os.environ["PROJ_irox_oer"], "workflow/oer_analysis/volcano_2d", "out_plot/volcano_2d.json")) # + active="" #
def get_unique_active_sites_temp( slab=None, active_sites=None, bulk_id=None, facet=None, slab_id=None, metal_atom_symbol=None, df_coord_slab_i=None, create_heatmap_plot=False, ): """ """ #| - get_unique_active_sites #| - __temp__ import os import pickle #__| if df_coord_slab_i is None: df_coord_slab_i = get_df_coord(slab_id=slab_id, mode="slab") df_coord_bulk_i = get_df_coord(bulk_id=bulk_id, mode="bulk") # ######################################################### # active_sites_i = df_active_sites[df_active_sites.slab_id == slab_id] # active_sites_i = active_sites_i.iloc[0] # # active_sites = active_sites_i.active_sites # ######################################################### custom_name_pre = bulk_id + "__" + facet + "__" + slab_id df_rdf_dict = dict() for i in active_sites: # print(i) df_rdf_i = process_rdf( atoms=slab, active_site_i=i, df_coord_slab_i=df_coord_slab_i, metal_atom_symbol=metal_atom_symbol, custom_name=custom_name_pre, ) df_rdf_dict[i] = df_rdf_i # Saving df_rdf_dict # Pickling data ########################################### # directory = "out_data/df_rdf_dict" directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/enumerate_adsorption", "out_data/df_rdf_dict", ) # assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) with open(os.path.join(directory, custom_name_pre + ".pickle"), "wb") as fle: pickle.dump(df_rdf_dict, fle) # ######################################################### # ######################################################### diff_rdf_matrix = np.empty(( len(active_sites), len(active_sites), )) diff_rdf_matrix[:] = np.nan for i_cnt, active_site_i in enumerate(active_sites): df_rdf_i = df_rdf_dict[active_site_i] for j_cnt, active_site_j in enumerate(active_sites): df_rdf_j = df_rdf_dict[active_site_j] diff_i = compare_rdf_ij( df_rdf_i=df_rdf_i, df_rdf_j=df_rdf_j, ) diff_rdf_matrix[i_cnt, j_cnt] = diff_i # ######################################################### df_rdf_ij = pd.DataFrame(diff_rdf_matrix, columns=active_sites) df_rdf_ij.index = active_sites # Pickling data ########################################### # directory = "out_data/df_rdf_ij" directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/enumerate_adsorption", "out_data/df_rdf_ij", ) # assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) with open(os.path.join(directory, custom_name_pre + ".pickle"), "wb") as fle: pickle.dump(df_rdf_ij, fle) # ######################################################### # ######################################################### active_sites_cpy = copy.deepcopy(active_sites) diff_threshold = 0.2 duplicate_active_sites = [] for active_site_i in active_sites: if active_site_i in duplicate_active_sites: continue for active_site_j in active_sites: if active_site_i == active_site_j: continue diff_ij = df_rdf_ij.loc[active_site_i, active_site_j] if diff_ij < diff_threshold: try: active_sites_cpy.remove(active_site_j) duplicate_active_sites.append(active_site_j) except: pass active_sites_unique = active_sites_cpy # ######################################################### #| - Creating Figure # print("TEMP isjdfjsd8sfs8d") # print("create_heatmap_plot:", create_heatmap_plot) if create_heatmap_plot: # print("SIDJFIDISJFIDSIFI") import plotly.express as px import plotly.graph_objects as go active_sites_str = [str(i) for i in active_sites] fig = go.Figure(data=go.Heatmap( z=df_rdf_ij.to_numpy(), x=active_sites_str, y=active_sites_str, xgap=3, ygap=3, # type="category", )) fig["layout"]["xaxis"]["type"] = "category" fig["layout"]["yaxis"]["type"] = "category" # fig.show() # directory = "out_plot/rdf_heat_maps_1" directory = os.path.join( os.environ["PROJ_irox_oer"], "workflow/enumerate_adsorption", "out_data/rdf_heat_maps_1", ) # assert False, "Fix os.makedirs" if not os.path.exists(directory): os.makedirs(directory) # from plotting.my_plotly import my_plotly_plot # file_name = "rdf_heat_maps_1/" + custom_name_pre + "_rdf_diff_heat_map" # file_name = os.path.join( # "/".join(directory.split("/")[1:]), # custom_name_pre + "_rdf_diff_heat_map", # ) save_dir = os.path.join("/".join(directory.split("/")[1:]), # custom_name_pre + "_rdf_diff_heat_map", ) file_name = custom_name_pre + "_rdf_diff_heat_map" print(file_name) my_plotly_plot( figure=fig, save_dir=save_dir, place_in_out_plot=False, # plot_name="rdf_heat_maps/rdf_diff_heat_map", plot_name=file_name, write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) #__| return (active_sites_unique)
], line_color="grey" ) # data = volcano_legs_data + volcano_legs_data_tmp + VP.data_points data = volcano_legs_data_tmp + volcano_legs_data + VP.data_points if plot_exp_traces: data.insert(0, trace_iro3) data.insert(0, trace_iro2) # + fig = go.Figure(data=data, layout=layout) my_plotly_plot( figure=fig, plot_name="out_plot_02_large") # - # # TEMP | Changing line type of volcano # + shared_axis_props = dict(ticklen=3) ticks_props_new_x = dict( dtick=0.1, **shared_axis_props) ticks_props_new_y = dict( dtick=0.05, **shared_axis_props)
y=y_array, ) data = [trace] fig = go.Figure(data=data) # fig.show() # + fig = px.imshow( df_SOAP_AS.to_numpy(), aspect='auto', # 'equal', 'auto', or None ) my_plotly_plot( figure=fig, plot_name="df_SOAP_AS", write_html=True, ) fig.show() # + fig = px.imshow( df_SOAP_MS.to_numpy(), aspect='auto', # 'equal', 'auto', or None ) my_plotly_plot( figure=fig, plot_name="df_SOAP_MS", write_html=True,
def create_linear_model_plot( df=None, feature_columns=None, ads=None, feature_ads=None, format_dict=None, layout=None, verbose=True, save_plot_to_file=False, ): """ """ #| - create_linear_model_plot # ##################################################### df_i = df features_cols_to_include = feature_columns # ##################################################### #| - Dropping feature columns if features_cols_to_include is None or features_cols_to_include == "all": features_cols_to_include = df_i["features_stan"][feature_ads].columns cols_to_drop = [] for col_i in df_i["features_stan"][feature_ads].columns: if col_i not in features_cols_to_include: cols_to_drop.append(col_i) df_tmp = copy.deepcopy(df_i) for col_i in cols_to_drop: df_i = df_i.drop(columns=[("features_stan", feature_ads, col_i)]) # feature_cols = list(df_i.features_stan.columns) feature_cols = list(df_i["features_stan"][feature_ads].columns) # print(feature_cols) plot_title = " | ".join(feature_cols) plot_title = "Features: " + plot_title #__| #| - Creating linear model X = df_i["features_stan"][feature_ads].to_numpy() X = X.reshape(-1, len(df_i["features_stan"][feature_ads].columns)) y = df_i.targets[df_i.targets.columns[0]] model = LinearRegression() model.fit(X, y) y_predict = model.predict(X) if verbose: print(20 * "-") print("model.score(X, y):", model.score(X, y)) print("") # print(feature_cols) # print(model.coef_) for i, j in zip(list(df_i["features_stan"][ads].columns), model.coef_): print(i, ": ", j, sep="") print(20 * "-") #__| #| - Plotting data = [] from methods import get_df_slab df_slab = get_df_slab() #| - DEPRECATED | Getting colors ready # df_slab_tmp = df_slab[["slab_id", "bulk_id"]] # # bulk_id_slab_id_lists = np.reshape( # df_slab_tmp.to_numpy(), # ( # 2, # df_slab_tmp.shape[0], # ) # ) # # slab_bulk_mapp_dict = dict(zip( # list(bulk_id_slab_id_lists[0]), # list(bulk_id_slab_id_lists[1]), # )) # # # slab_bulk_id_map_dict = dict() # for i in df_slab_tmp.to_numpy(): # slab_bulk_id_map_dict[i[0]] = i[1] # # # print("list(bulk_id_slab_id_lists[0]):", list(bulk_id_slab_id_lists[0])) # # print("") # # print("list(bulk_id_slab_id_lists[1]):", list(bulk_id_slab_id_lists[1])) # # print("") # # print("slab_bulk_mapp_dict:", slab_bulk_mapp_dict) # # import random # get_colors = lambda n: list(map(lambda i: "#" + "%06x" % random.randint(0, 0xFFFFFF),range(n))) # # slab_id_unique_list = df_i.index.to_frame()["slab_id"].unique().tolist() # # bulk_id_list = [] # for slab_id_i in slab_id_unique_list: # # bulk_id_i = slab_bulk_mapp_dict[slab_id_i] # bulk_id_i = slab_bulk_id_map_dict[slab_id_i] # bulk_id_list.append(bulk_id_i) # # color_map_dict = dict(zip( # bulk_id_list, # get_colors(len(slab_id_unique_list)), # )) # # # Formatting processing # color_list = [] # for name_i, row_i in df_i.iterrows(): # # ################################################# # slab_id_i = name_i[1] # # ################################################# # phase_i = row_i["data"]["phase"][""] # stoich_i = row_i["data"]["stoich"][""] # sum_norm_abs_magmom_diff_i = row_i["data"]["sum_norm_abs_magmom_diff"][""] # norm_sum_norm_abs_magmom_diff_i = row_i["data"]["norm_sum_norm_abs_magmom_diff"][""] # # ################################################# # # # ################################################# # row_slab_i = df_slab.loc[slab_id_i] # # ################################################# # bulk_id_i = row_slab_i.bulk_id # # ################################################# # # bulk_color_i = color_map_dict[bulk_id_i] # # if stoich_i == "AB2": # color_list.append("#46cf44") # elif stoich_i == "AB3": # color_list.append("#42e3e3") # # # color_list.append(norm_sum_norm_abs_magmom_diff_i) # # color_list.append(bulk_color_i) #__| #| - Creating parity line # x_parity = y_parity = np.linspace(0., 8., num=100, ) x_parity = y_parity = np.linspace( -2., 8., num=100, ) trace_i = go.Scatter(x=x_parity, y=y_parity, line=go.scatter.Line(color="black", width=2.), mode="lines") data.append(trace_i) #__| #| - Main Data Trace color_list_i = df_i["format"]["color"][format_dict["color"]] trace_i = go.Scatter( y=y, x=y_predict, mode="markers", marker=go.scatter.Marker( size=12, color=color_list_i, colorscale='Viridis', colorbar=dict(thickness=20), opacity=0.8, ), # text=df_i.name_str, text=df_i.data.name_str, textposition="bottom center", ) data.append(trace_i) #__| #| - Layout # y_axis_target_col = df_i.target_cols.columns[0] y_axis_target_col = df_i.targets.columns[0] y_axis_target_col = y_axis_target_col[0] # print("y_axis_target_col:", y_axis_target_col) # print("y_axis_target_col:", y_axis_target_col) if y_axis_target_col == "g_o": # print("11111") layout.xaxis.title.text = "Predicted ΔG<sub>*O</sub>" layout.yaxis.title.text = "Simulated ΔG<sub>*O</sub>" elif y_axis_target_col == "g_oh": # print("22222") layout.xaxis.title.text = "Predicted ΔG<sub>*OH</sub>" layout.yaxis.title.text = "Simulated ΔG<sub>*OH</sub>" else: print("Woops isdfsdf8osdfio") layout.xaxis.title.font.size = 25 layout.yaxis.title.font.size = 25 layout.yaxis.tickfont.size = 20 layout.xaxis.tickfont.size = 20 layout.xaxis.range = [2.5, 5.5] layout.showlegend = False dd = 0.2 layout.xaxis.range = [ np.min(y_predict) - dd, np.max(y_predict) + dd, ] layout.yaxis.range = [ np.min(y) - dd, np.max(y) + dd, ] # layout.title = "TEMP isdjfijsd" layout.title = plot_title #__| fig = go.Figure(data=data, layout=layout) if save_plot_to_file: my_plotly_plot(figure=fig, save_dir=os.path.join( os.environ["PROJ_irox_oer"], "workflow/oer_vs_features", ), plot_name="parity_plot", write_html=True) #__| # ##################################################### out_dict = dict() # ##################################################### out_dict["fig"] = fig # ##################################################### return (out_dict)
if compenv == "wsl": fig.show() # + from plotting.my_plotly import my_plotly_plot if compenv != "wsl": write_png = True else: write_png = False my_plotly_plot( figure=fig, plot_name="scf_convergence", write_html=True, write_png=write_png, # png_scale=6.0, # write_pdf=False, # write_svg=False, try_orca_write=True, ) # - # # Copy figure html file to Dropbox with rclone # + rclone_comm = "rclone copy out_plot/scf_convergence.html " + os.environ[ "rclone_dropbox"] + ":__temp__/" import subprocess result = subprocess.run(rclone_comm.split(" "), stdout=subprocess.PIPE) # -
SEC_data.append({ "name": name_i, "bulk_system": name[0], "facet": name[1], "SEC": SEC }) data_i = self.plot_surface_energy(name_i=name_i, color_i=color_i) data += data_i # ## Plot my_plotly_plot(plot_name="TEMP_PLOT", save_dir=None, data=data, upload_plot=False) # + active="" # # # # # - # # ------------------------------------- # + [markdown] toc-hr-collapsed=true # # Averaging the fitted bulk energies across different facets # -
# + fig.layout.xaxis.title.font.size = 8 * (4 / 3) fig.layout.yaxis.title.font.size = 8 * (4 / 3) # fig.layout.xaxis.title.font.size = 6 * (4/3) # # go.layout.xaxis.Title? # + # fig.layout.width = 120 fig.layout.width = 110 # fig.layout.height = 180 # fig.layout.height = 140 # fig.layout.height = 120 fig.layout.height = 110 # - fig.show() my_plotly_plot( figure=fig, plot_name="bulk_pourb_small_toc", write_html=True, write_png=False, png_scale=6.0, write_pdf=True, write_svg=False, try_orca_write=False, )
# + import plotly.express as px fig = px.histogram(df_dft_i, x="num_atoms", nbins=20) fig.update_layout( title="Number of atoms for unique octahedral IrOx bulk structures") fig.show() # + from plotting.my_plotly import my_plotly_plot my_plotly_plot( figure=fig, plot_name="atom_count_histogram_octahedral", write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) # - # # Saving data # ####################################################################### import json data_path = os.path.join("out_data/data.json") with open(data_path, "w") as fle: json.dump(out_dict, fle, indent=2) # #######################################################################
trace_priority="bottom", # 'top' or 'bottom' ) data = volcano_legs_data + VP.data_points layout = VP.get_plotly_layout() fig = go.Figure( data=data, layout=layout, ) # print("Commented out") my_plotly_plot( figure=fig, save_dir=os.path.join( os.environ["PROJ_irox_oer"], "workflow/oer_analysis"), plot_name="out_plot_02_large") # - if show_plot: fig.show() # ######################################################### print(20 * "# # ") print("All done!") print("Run time:", np.round((time.time() - ti) / 60, 3), "min") print("oer_analysis.ipynb") print(20 * "# # ") # #########################################################
def process_rdf( atoms=None, active_site_i=None, df_coord_slab_i=None, metal_atom_symbol=None, custom_name=None, TEST_MODE=False, ): """ """ #| - process_rdf if custom_name is None: custom_name = "" #| - Create out folders import os # directory = "out_data" directory = os.path.join(os.environ["PROJ_irox_oer"], "workflow/enumerate_adsorption", "out_data") if not os.path.exists(directory): os.makedirs(directory) # assert False, "Fix os.makedirs" directory = "out_plot/rdf_figures" if not os.path.exists(directory): os.makedirs(directory) directory = "out_data/rdf_data" if not os.path.exists(directory): os.makedirs(directory) #__| AAA = AseAtomsAdaptor() slab_structure = AAA.get_structure(atoms) # Pickling data ########################################### out_dict = dict() out_dict["active_site_i"] = active_site_i out_dict["df_coord_slab_i"] = df_coord_slab_i out_dict["metal_atom_symbol"] = metal_atom_symbol import os import pickle path_i = os.path.join(os.environ["HOME"], "__temp__", "temp.pickle") with open(path_i, "wb") as fle: pickle.dump(out_dict, fle) # ######################################################### neigh_dict = get_indices_of_neigh(active_oxy_ind=active_site_i, df_coord_slab_i=df_coord_slab_i, metal_atom_symbol=metal_atom_symbol) neighbor_oxy_indices = neigh_dict["neighbor_oxy_indices"] neighbor_metal_indices = neigh_dict["neighbor_metal_indices"] shell_2_metal_atoms = neigh_dict["shell_2_metal_atoms"] neighbor_indices = neighbor_oxy_indices + neighbor_metal_indices + shell_2_metal_atoms # neighbor_indices = neighbor_indices[0:1] # print("neighbor_indices:", neighbor_indices) #| - Get RDF RDF = RadialDistributionFunction( [ slab_structure, ], [ active_site_i, ], neighbor_indices, # ngrid=1801, ngrid=4801, rmax=8.0, cell_range=2, # sigma=0.2, # sigma=0.08, sigma=0.015, # sigma=0.008, # sigma=0.0005, ) # data_file = "out_data/rdf_data/rdf_out.csv" data_file = os.path.join( "out_data/rdf_data", custom_name + "_" + str(active_site_i).zfill(4) + "_" + "rdf_out.csv") RDF.export_rdf(data_file) df_rdf = pd.read_csv(data_file) df_rdf = df_rdf.rename(columns={" g(r)": "g"}) #__| #| - Plotting import plotly.graph_objs as go x_array = df_rdf.r # y_array = df_rdf[" g(r)"] y_array = df_rdf["g"] trace = go.Scatter( x=x_array, y=y_array, ) data = [trace] fig = go.Figure(data=data) # fig.show() from plotting.my_plotly import my_plotly_plot if TEST_MODE: plot_dir = "__temp__" else: plot_dir = "rdf_figures" out_plot_file = os.path.join( plot_dir, custom_name + "_" + str(active_site_i).zfill(4) + "_rdf") my_plotly_plot( figure=fig, # plot_name=str(active_site_i).zfill(4) + "_rdf", plot_name=out_plot_file, write_html=True, write_png=False, png_scale=6.0, write_pdf=False, write_svg=False, try_orca_write=False, ) #__| return (df_rdf)
shapes = None fig.layout.update( shapes=shapes, # xaxis=dict(range=[-0.8, 250]), # yaxis=dict(range=[-0.3, 10.6]), xaxis=dict(range=x_range), yaxis=dict(range=y_range), ) # fig = my_plotly_plot( my_plotly_plot( figure=fig, plot_name=stoich_i + "_" + "al_performance", write_html=True, write_png=False, png_scale=6.0, write_pdf=True, write_svg=False, try_orca_write=True, ) fig.layout.update(paper_bgcolor="white") # fig.show() tmp = 42 # + Collapsed="false" jupyter={"outputs_hidden": false} # Pickling data ###################################################### import os; import pickle directory = "out_data"
# + df_j_ab3 # df_j_ab2 # + # assert False # + from plotting.my_plotly import my_plotly_plot my_plotly_plot( figure=fig, plot_name="G_O__vs__eff_ox", write_html=True, write_pdf=True, try_orca_write=True, ) # - # ## Creating box plots for Eff ox state # + # import plotly.express as px # df = px.data.tips() # fig = px.box(df, x="time", y="total_bill") # fig.show() # + # # df_j_ab3[("features", "oh", "effective_ox_state", )] = np.round(