def test_plot_allmodelruns(self): from spotpy.examples.spot_setup_hymod_python import spot_setup as sp sp = sp() sampler = spotpy.algorithms.dream(sp, parallel="seq", dbname='test_plot_allmodelruns', dbformat="ram", sim_timeout=5) sampler.sample(50) modelruns = [] for run in sampler.getdata(): on_run = [] for i in run: on_run.append(i) on_run = np.array(on_run)[:-9] print(on_run) modelruns.append(on_run.tolist()) test_plot_allmodelruns = spotpy.analyser.plot_allmodelruns( modelruns, sp.evaluation(), dates=range(1, len(sp.evaluation()) + 1)) fig_name = "bestmodel.png" # approximately 8855 KB is the size of an empty matplotlib.pyplot.plot, so # we expecting a plot with some content without testing the structure of the pot just # the size self.assertGreaterEqual(os.path.getsize(fig_name), 8855) os.remove(fig_name)
fig = plt.figure(figsize=(16, 9)) ax = plt.subplot(1, 1, 1) q5, q25, q75, q95 = [], [], [], [] for field in fields: q5.append(np.percentile(results[field][-100:-1], 2.5)) q95.append(np.percentile(results[field][-100:-1], 97.5)) ax.plot(q5, color='dimgrey', linestyle='solid') ax.plot(q95, color='dimgrey', linestyle='solid') ax.fill_between(np.arange(0, len(q5), 1), list(q5), list(q95), facecolor='dimgrey', zorder=0, linewidth=0, label='parameter uncertainty') ax.plot(spot_setup.evaluation(), 'r.', label='data') ax.set_ylim(-50, 450) ax.set_xlim(0, 729) ax.legend() fig.savefig('python_hymod.png', dpi=300) ######################################################### # Example plot to show the convergence ################# fig = plt.figure(figsize=(12, 16)) plt.subplot(2, 1, 1) for i in range(int(max(results['chain'])) + 1): index = np.where(results['chain'] == i) plt.plot(results['like1'][index], label='Chain ' + str(i + 1)) plt.ylabel('Likelihood value') plt.legend()
# Get fields with simulation data fields=[word for word in results.dtype.names if word.startswith('sim')] # Example plot to show remaining parameter uncertainty # fig= plt.figure(figsize=(9,6)) ax = plt.subplot(1,1,1) q5,q25,q75,q95=[],[],[],[] for field in fields: q5.append(np.percentile(results[field][-100:-1],2.5))# ALl 100 runs after convergence q95.append(np.percentile(results[field][-100:-1],97.5))# ALl 100 runs after convergence ax.plot(q5,color='dimgrey',linestyle='solid') ax.plot(q95,color='dimgrey',linestyle='solid') ax.fill_between(np.arange(0,len(q5),1),list(q5),list(q95),facecolor='dimgrey',zorder=0, linewidth=0,label='simulation uncertainty') ax.plot(spot_setup.evaluation(),color='red', markersize=2,label='data') ax.set_ylim(-50,450) ax.set_xlim(0,729) ax.set_ylabel('Discharge [l s-1]') ax.set_xlabel('Days') ax.legend() fig.savefig('DREAM_simulation_uncertainty_Hymod.png',dpi=150) ######################################################### # Example plot to show the convergence ################# spotpy.analyser.plot_gelman_rubin(results, r_hat, fig_name='DREAM_r_hat.png') ########################################################
plt.xlabel('Iteration') fig.savefig('SCEUA_objectivefunctiontrace.png', dpi=150) # Plot the best model run #Find the run_id with the minimal objective function value bestindex, bestobjf = spotpy.analyser.get_minlikeindex(results) # Select best model run best_model_run = results[bestindex] #Filter results for simulation results fields = [ word for word in best_model_run.dtype.names if word.startswith('sim') ] best_simulation = list(best_model_run[fields]) fig = plt.figure(figsize=(9, 6)) ax = plt.subplot(1, 1, 1) ax.plot(best_simulation, color='black', linestyle='solid', label='Best objf.=' + str(bestobjf)) ax.plot(spot_setup.evaluation(), 'r.', markersize=3, label='Observation data') plt.xlabel('Number of Observation Points') plt.ylabel('Discharge [l s-1]') plt.legend(loc='upper right') fig.savefig('SCEUA_best_modelrun.png', dpi=150)