def make_basic_text_annotation_from_dict(dikt, x=0.50, y=0.1, fontpath='./fzpicalc/FiraMono-Medium.otf'): names = dikt.keys() values = dikt.values() text = '' for val,name in zip(values,names): if isinstance(val, str) == True: text += str(name)+'='+val+'<br>' else: text += str(name)+'='+str(basic_func.tidy(val,3))+'<br>' an = dict(x=x, y=y, showarrow=False, text=text, bgcolor='rgb(255,255,255)', opacity=0.8, bordercolor='#000000', borderwidth=1, align='center', xref='paper', yref='paper', font =dict(family=fontpath, size=15, color='#000000')) return an
def check_input(row_dict, row_index): check = True for each_req in required_params: if each_req not in row_dict.keys(): print('\n') print('INPUT ERROR: ' + each_req + ' is missing in parameter set ' + str(row_index) + ' (row no. starting with 0)') print('\n') check = False else: if each_req == 'fz_thickness': if row_dict[each_req] not in [ 15, 20, 35, 50, 75, 100, 200, 300 ]: print('\n') print( 'INPUT ERROR: ' + each_req + ' in line ' + str(row_index) + ' invalid must be one of the following values: 15, 20, 35, 50, 75, 100, 200, 300 (row no. starting with 0)' ) print('\n') check = False else: if (basic_func.tidy(row_dict[each_req], 10) > param_range_max[each_req] or basic_func.tidy( row_dict[each_req], 10) < param_range_min[each_req] or np.isnan(row_dict[each_req])): print('\n') print('INPUT ERROR: ' + each_req + ' in line ' + str(row_index) + ' invalid must be in valid range: ' + str(param_range_min[each_req]) + ' - ' + str(param_range_max[each_req]) + ' (row no. starting with 0)') print('\n') check = False return check
input_data_set.loc[ei, 'P_ref pick time [h]'] = Ref_Pi_matrix_picktime input_data_set.loc[ei, 'Rel. fault zone PI influence [-]'] = Pi_change_rel input_data_set.loc[ei, 'Pi change FZ [l/s/MPa]'] = Pi_change_si input_data_set.loc[ei, 'dP change FZ [MPa]'] = delta_p_m_fz input_data_set.loc[ei, 'Reference Matrix P[MPa]'] = P_refMatrix input_data_set.loc[ei, 'Pi change pick time [h]'] = Pi_picktime else: #if input check fails line will is not evaluated input_data_set.loc[ ei, 'Rel. fault zone PI influence [-]'] = 'input not valid, skipped' #update progressbar: progressbar.print_progress(ei + 1, l, prefix='Progress:', suffix='Complete', bar_length=50) t_consumed = time() - t av_time_per_param = basic_func.tidy((t_consumed / l), 3) print('\n') print( 'calculations finished with an average calculation speed per parameter combination (= 2 simulations) of ' + str(av_time_per_param) + ' seconds') print('\n') #saving results: stor_name = 'calculated_' + input_filename input_data_set.to_csv(os.path.join(dt_string, stor_name), index=False)
def plot_pressure(ipath_arr, SIparams_dict, fthickness, compare_annotation=True, itype=['golem'], additionaltrace=[], optional_annotation=[], DERplot=[False], indv_plotname = 'semilog_plot', newfoldername = 'error_plots', plotdir = os.getcwd(), xtyp='log', xtitle='time [h]', ytyp='log', ytitle='dp [MPa]', auto_open=True, pdf=True, png=False, html=False, fontpath='./fzpicalc/FiraMono-Medium.otf', identifier='identifier_str'): ''' input documentation: ipath_arr = list of path to csv or np.array SIparams_dict = list of dictionary of parametersetting in SI units and names like this: {'S_fault': 2e-12, 'S_matrix': 2e-12, 'k_fault': 1e-10, 'k_matrix': 1e-13, 'rate': 20, 'visocisty': 0.00028974} fthickness = list of strings string containing fault zone thickness in meters eg. ['50'] itype = list of Datasources: Dwarfelephant online stage output array = 'rb' Dwarfelephant single FE csv = 'rb_fe' GOLEM = 'golem' tickcolor='rgb(0,0,0)' additionaltrace = any kind of trace that should be plotted additionally to input csv can be provided here DERplot = list of booleans, if True the DER curve will also be plotted plotdir = set directory in which a plotting folder will be saved with the plots inside example usage: pls.plot_pressure([RB_outputs, '/home/hombre/Desktop/VGL/1_base_FZ_out.csv', '/home/hombre/Desktop/VGL/1_base_FZ_nolengthscale_out.csv'], [parm_dict,parm_dict,parm_dict], ['100','50','50'], itype=['rb','golem','golem'], additionaltrace=[], optional_annotation=[], DERplot=[False,False,False], indv_plotname = 'testing shit', xtyp='log', ytyp='linear', plotdir = plotdir) ''' #integrate input check for correct types here: #generating plotlabel and checking for differences: differences={} if compare_annotation == True: if len(SIparams_dict) > 1: #check if dics are the same: SIparams_dict_same = True for eachdict in SIparams_dict: if SIparams_dict[0] != eachdict: SIparams_dict_same = False #calculate differences in dictionaries diff = set(SIparams_dict[0].items()) ^ set(eachdict.items()) differences.update(diff) #check if fault thicknesses are the same: SIparams_fz_same = True for eachthickness in fthickness: if fthickness[0] != eachthickness: SIparams_fz_same = False if len(SIparams_dict) == 1: parametertext1 = '<b>'+str(fthickness[0])+'m fault thickness</b>' p_names = SIparams_dict[0].keys() parms_SI = SIparams_dict[0].values() parms_scaled = scale_array(parms_SI,p_names,direction='normal') parametertext2='<br><b>name=SI/scaled</b><br><br>' for val,valscaled,name in zip(parms_SI,parms_scaled,p_names): if isinstance(val, str) == True: parametertext2 += str(name)+'='+val+'<br>' else: parametertext2 += str(name)+'='+str(basic_func.tidy(val,7))+'/'+str(basic_func.tidy(valscaled,7))+'<br>' elif len(SIparams_dict) > 1: parametertext1 = '<b>fault thicknesses [m]: </b><br>' if SIparams_fz_same == False: for each in fthickness: parametertext1 += str(each)+', ' parametertext1=parametertext1[:-2] else: parametertext1 += str(fthickness[0]) if SIparams_dict_same == True: p_names = SIparams_dict[0].keys() parms_SI = SIparams_dict[0].values() parms_scaled = scale_array(parms_SI,p_names,direction='normal') parametertext2='<br><br><b>name=SI/scaled</b><br>' for val,valscaled,name in zip(parms_SI,parms_scaled,p_names): if isinstance(val, str) == True: parametertext2 += str(name)+'='+val+'<br>' else: parametertext2 += str(name)+'='+str(basic_func.tidy(val,7))+'/'+str(basic_func.tidy(valscaled,7))+'<br>' else: parametertext2='<br><br>curve differences:<br>' p_names = differences.keys() parms_SI = differences.values() parms_scaled = scale_array(parms_SI,p_names,direction='normal') for val,valscaled,name in zip(parms_SI,parms_scaled,p_names): parametertext2 += str(name)+', ' parametertext2=parametertext2[:-2] if len(differences)>0: diff_keys=differences.keys() else: parametertext1 = '<b>'+str(fthickness[0])+'m fault thickness</b>' p_names = SIparams_dict[0].keys() parms_SI = SIparams_dict[0].values() parametertext2='<br><b>parameter=SI units</b><br>' for val,name in zip(parms_SI,p_names): if isinstance(val, str) == True: parametertext2 += str(name)+'='+val+'<br>' else: parametertext2 += str(name)+'='+str(basic_func.tidy(val,7))+'<br>' #initialze empty traces that will be plottet traces = [] #add optional traces here already for eachtrace in additionaltrace: traces.append(eachtrace) #print #print('loading data:') filenames=[] ymaxvals=[] yminvals=[] #setting up categorical colors: if len(ipath_arr) > 1: n_min=0 n_max=len(ipath_arr)-1 cma = cm.rainbow for index,eachinput in enumerate(ipath_arr): if len(ipath_arr) > 1: cn=basic_func.normalize(index,n_min,n_max) c = cma(cn,1) rgb = c[:3] rgb = "rgb(%s, %s, %s)" % (int(rgb[0]*255),int(rgb[1]*255),int(rgb[2]*255)) #'rgba(255,255,255,0.8)' else: rgb = '#2B4369' #checking and preparation for further well test analysis: if itype[index] == 'rb_fe': pta = True #setting filename: filename = ntpath.basename(eachinput) df = pd.read_csv(eachinput) if 'rb_dp' not in df.columns: print('wrong input csv type set or input file broken') return #print('RB_FE input csv detected') df.loc[:,'FoerderDruck'] = result_sclaing(df.loc[:,'rb_dp']) elif itype[index] == 'golem': pta = True #setting filename: filename = ntpath.basename(eachinput) df = pd.read_csv(eachinput) if 'FoerderDruck' not in df.columns: print('wrong input csv type set or input file broken') return #print('GOLEM input csv detected') elif itype[index] == 'rb': pta = True if type(eachinput) is np.ndarray and len(eachinput[0]) == 128: #setting filename: filename = 'rb online stage output' #print('RB online input array detected') df = pd.DataFrame() df['time'] = t_list df['t_hour'] = df['time']/3600 df['RB_out']= abs(pd.melt(pd.DataFrame(eachinput)).drop(['variable'],axis=1)) RB_backscaled = result_sclaing(eachinput) df['FoerderDruck']= abs(pd.melt(pd.DataFrame(RB_backscaled)).drop(['variable'],axis=1)) #first row not needed, dublicate specific to RB online stage ouput, remove: df.drop(df.index[0],inplace=True) df.drop(df.tail(1).index,inplace=True) # drop last n rows df.reset_index(drop=True,inplace=True) else: print('input array does not meet criteria needed, please check length=128 and type=np.ndarray') return elif itype[index] == 'preped_df': df = eachinput filename = 'preped df' pta = False else: print('input type not recognised... aborting!') return #print('data loaded to df') filenames.append(filename) if pta == True: #well testing data calculation and flow type evaluation: if len(SIparams_dict) > 1: df_calc,df_mft,faultzone_bd_detected = determine_main_flow_type_light(df,SIparams_dict[index],identifier) else: df_calc,df_mft,faultzone_bd_detected = determine_main_flow_type_light(df,SIparams_dict[0],identifier) #set data for traces: x=df.loc[1:,'t_hour'] y=df.loc[1:,'DruckAenderung'] y_extreme1 = abs(y).max() ymaxvals.append(y_extreme1) y_extreme2 = abs(y).min() if np.allclose(y_extreme2,0.0) == True: y_qunatile = y.quantile(0.20) yminvals.append(y_qunatile) else: yminvals.append(y_extreme2) #if differences exist in inputparams then put them into hover info: if len(SIparams_dict) > 1: current_dict = SIparams_dict[index] diff_string = '<br>'+'FZ_th='+str(fthickness[index])+'m' difftext = 'FZ_th='+str(fthickness[index])+'m' else: current_dict = SIparams_dict[0] diff_string = '<br>'+'FZ_th='+str(fthickness[index])+'m' difftext = 'FZ_th='+str(fthickness[index])+'m' if len(differences)>0: for eachkey in diff_keys: if eachkey in current_dict.keys(): diff_string += '<br>'+str(eachkey)+'='+str(current_dict[eachkey]) difftext += '<br>'+str(eachkey)+'='+str(current_dict[eachkey]) #generate trace and add to traces p_trace=go.Scatter(showlegend=True, x=x, y=y, name='dP '+diff_string, hoverinfo = 'y+name+text', text=difftext, mode = 'lines+markers', line=dict(color=rgb, width=2, dash = 'solid'), marker=dict(size=1, opacity=0, color=rgb, ), ) traces.append(p_trace) if DERplot[index] == True: x=df.loc[2:124,'t_hour'] y=df.loc[2:124,'DER'] flowtype_list=df.loc[2:124,'flowtype'] flowtype_list=[letters[:3] for letters in flowtype_list] flowtype_label= [None]*len(flowtype_list) flowtype_label[::3]=flowtype_list[::3] #generate trace and add to traces p_trace=go.Scatter(showlegend=False, x=x, y=y, name='DER '+filename+diff_string, hoverinfo = 'y+name+text', text=flowtype_label, textposition='top center', mode = 'markers+text', textfont=dict(size=12,color=rgb), line=dict(color=rgb, width=2, dash = 'dot'), marker=dict(size=3, opacity=1.0, color=rgb, ), ) traces.append(p_trace) #adding minimum of DER to adjust scale of y axis y_extreme3 = abs(y).min() if np.allclose(y_extreme3,0.0) == True: y_qunatile = y.quantile(0.20) yminvals.append(y_qunatile) else: yminvals.append(y_extreme3) #determin longest filename: lname = max(filenames, key=len) + ' DER' # setting x-axis: if xtyp == 'log': xmax=math.ceil(np.log10(df['t_hour'].max())) xmin=basic_func.tidy(math.floor(np.log10(df['t_hour'][1])),1) xaxis1=gen_xaxis_dict(title=xtitle,typ=xtyp,rang=[xmin,xmax]) elif xtyp == 'linear': xmax=math.ceil(df['t_hour'].max()) xmin=math.floor(df['t_hour'].min()) dx = basic_func.tidy(xmax-xmin,1)/10 xaxis1=gen_xaxis_dict(title=xtitle,typ=xtyp,rang=[xmin,xmax],dtick=dx) #setting y-axis: if ytyp == 'linear': #calculate y-axis limits and steps ymax = max(ymaxvals)*1.1 dy=basic_func.tidy(ymax,1)/10 if True in DERplot: yaxis1=gen_yaxis_dict(title=ytitle+' / DER',typ=ytyp,rang=[0,ymax],dtick=dy,tickformat="1.3f") else: yaxis1=gen_yaxis_dict(title=ytitle,typ=ytyp,rang=[0,ymax],dtick=dy,tickformat="1.3f") elif ytyp == 'log': logmax=np.log10(max(ymaxvals)) for i, eachy in enumerate(yminvals): if np.allclose(eachy,0.0) == True: del yminvals[i] logmin=np.log10(min(yminvals)) ymax=math.ceil(logmax) #if double log plot make show same amount of log cycles on each axis if xtyp == 'log': ymin=ymax-abs(xmin-xmax) else: ymin=math.floor(logmin) if True in DERplot: yaxis1=gen_yaxis_dict(title=ytitle+' / DER',typ=ytyp,rang=[ymin,ymax],tickformat=".1r") else: yaxis1=gen_yaxis_dict(title=ytitle,typ=ytyp,rang=[ymin,ymax],tickformat="1.3f") annotations=[] an = dict(x=0.50, y=0.95, showarrow=False, text='Derivative Plot<br>Fault Zone vs Matrix', bgcolor='rgb(255,255,255)', opacity=0.8, bordercolor='#000000', borderwidth=1, align='center', xref='paper', yref='paper', font =dict(family=fontpath, size=30, color='#000000')) annotations.append(an) an = dict(x=0.50, y=0.1, showarrow=False, text=parametertext2, bgcolor='rgb(255,255,255)', opacity=0.8, bordercolor='#000000', borderwidth=1, align='center', xref='paper', yref='paper', font =dict(family=fontpath, size=15, color='#000000')) annotations.append(an) #adding optional annotations here: if len(optional_annotation) != 0: for eachan in optional_annotation: annotations.append(eachan) #setting layout here: layout = layout_f_plotly(xaxis1,yaxis1,llegendword=lname,annotations=annotations) #plot construction and saving: fig = dict(data=traces, layout=layout) saveplot_routine(fig,indv_plotname,plotdir,newfoldername,auto_open,pdf,png,html)
path, q_a, q_f, q_l, q_m, n_outputs, n_timesteps, dt, euler_theta, varying_timesteps=varying_timesteps, growth_rate=growth_rate, time_dependent_parameters=time_dependent_parameters, ID_param=ID_param, start_time=start_time, end_time=end_time) t_model_gen = basic_func.tidy(((time() - t)), 3) print( str(t_model_gen) + 's elapsed to add the ' + eachdir + 'm fault zone thickness model!') models.append(model) #add matrix model: modelnames.append('matrix') t = time() matrix_model = rbm.model_from_offline_data( os.path.join(RB_MODEL_FOLDER_NAME, 'matrix', 'offline_data'), 2, 2, q_l, 2, n_outputs,