def test_test_gages(self): data_model = GagesModel.load_datamodel(self.config_data.data_path["Temp"], data_source_file_name='test_data_source.txt', stat_file_name='test_Statistics.json', flow_file_name='test_flow.npy', forcing_file_name='test_forcing.npy', attr_file_name='test_attr.npy', f_dict_file_name='test_dictFactorize.json', var_dict_file_name='test_dictAttribute.json', t_s_dict_file_name='test_dictTimeSpace.json') with torch.cuda.device(1): data_models = GagesModel.every_model(data_model) obs_lst = [] pred_lst = [] for i in range(len(data_models)): print("\n", "Testing model", str(i + 1), ":\n") pred, obs = master_test(data_models[i]) basin_area = data_models[i].data_source.read_attr(data_models[i].t_s_dict["sites_id"], ['area_gages2'], is_return_dict=False) mean_prep = data_models[i].data_source.read_attr(data_models[i].t_s_dict["sites_id"], ['p_mean'], is_return_dict=False) pred = _basin_norm(pred, basin_area, mean_prep, to_norm=False) obs = _basin_norm(obs, basin_area, mean_prep, to_norm=False) obs_lst.append(obs.flatten()) pred_lst.append(pred.flatten()) preds = np.array(pred_lst) obss = np.array(obs_lst) plot_we_need(data_model, obss, preds, id_col="id", lon_col="lon", lat_col="lat")
def test_train_camels(self): data_model = GagesModel.load_datamodel(self.config_data.data_path["Temp"], data_source_file_name='data_source.txt', stat_file_name='Statistics.json', flow_file_name='flow.npy', forcing_file_name='forcing.npy', attr_file_name='attr.npy', f_dict_file_name='dictFactorize.json', var_dict_file_name='dictAttribute.json', t_s_dict_file_name='dictTimeSpace.json') with torch.cuda.device(1): data_models = GagesModel.every_model(data_model) for i in range(len(data_models)): print("\n", "Training model", str(i + 1), ":\n") model, train_loss, valid_loss = master_train(data_models[i], valid_size=0.2) fig = plot_loss_early_stop(train_loss, valid_loss) out_dir = self.config_data.data_path["Out"] fig.savefig(os.path.join(out_dir, 'loss_plot.png'), bbox_inches='tight')